Updating plugins status for Rocky

For Rocky we are removing some plugins versions:

- CDH: removing 5.5.0
- MapR: removing 5.1.0
- Spark: removing 1.3.1
- Storm: removing 0.9.2

Also, we are marking some versions as deprecated:

- CDH: deprecating 5.7.0
- Spark: deprecating 1.6.0 and 2.1
- Storm: deprecating 1.0.1

Change-Id: I2dcec1344db4225263be179366eb408d62b9e108
This commit is contained in:
Telles Nobrega 2018-06-13 17:14:22 -03:00
parent c383738d59
commit 17509c3626
101 changed files with 30 additions and 26297 deletions

View File

@ -0,0 +1,11 @@
---
prelude: >
Every new release of Sahara we update our plugins list. Some new versions
are added and some removed and other marked as deprecated. For Rocky we are
deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 as well as Storm 1.0.1.
We are also removing CDH 5.5.0, MapR 5.1.0, Spark 1.3.1 and Storm 0.9.2.
deprecations:
- We are deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 and Storm 1.0.1.
upgrade:
- We are removing some plugins versions. Those are CDH 5.5.0, MapR 5.1.0,
Spark 1.3.1 and Storm 0.9.2.

View File

@ -41,8 +41,7 @@ class CDHPluginProvider(p.ProvisioningPluginBase):
'5.13.0': copy.deepcopy(default),
'5.11.0': copy.deepcopy(default),
'5.9.0': copy.deepcopy(default),
'5.7.0': copy.deepcopy(default),
'5.5.0': copy.deepcopy(deprecated),
'5.7.0': copy.deepcopy(deprecated)
}
return result

View File

@ -1,27 +0,0 @@
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import cloudera_utils as cu
from sahara.plugins.cdh.v5_5_0 import config_helper
from sahara.plugins.cdh.v5_5_0 import plugin_utils as pu
from sahara.plugins.cdh.v5_5_0 import validation
class ClouderaUtilsV550(cu.ClouderaUtils):
def __init__(self):
cu.ClouderaUtils.__init__(self)
self.pu = pu.PluginUtilsV550()
self.validator = validation.ValidatorV550
self.c_helper = config_helper.ConfigHelperV550()

View File

@ -1,101 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import config_helper as c_h
from sahara.plugins import provisioning as p
from sahara.utils import files as f
class ConfigHelperV550(c_h.ConfigHelper):
path_to_config = 'plugins/cdh/v5_5_0/resources/'
CDH5_UBUNTU_REPO = (
'deb [arch=amd64] http://archive.cloudera.com/cdh5'
'/ubuntu/trusty/amd64/cdh trusty-cdh5.5.0 contrib'
'\ndeb-src http://archive.cloudera.com/cdh5/ubuntu'
'/trusty/amd64/cdh trusty-cdh5.5.0 contrib')
DEFAULT_CDH5_UBUNTU_REPO_KEY_URL = (
'http://archive.cloudera.com/cdh5/ubuntu'
'/trusty/amd64/cdh/archive.key')
CM5_UBUNTU_REPO = (
'deb [arch=amd64] http://archive.cloudera.com/cm5'
'/ubuntu/trusty/amd64/cm trusty-cm5.5.0 contrib'
'\ndeb-src http://archive.cloudera.com/cm5/ubuntu'
'/trusty/amd64/cm trusty-cm5.5.0 contrib')
DEFAULT_CM5_UBUNTU_REPO_KEY_URL = (
'http://archive.cloudera.com/cm5/ubuntu'
'/trusty/amd64/cm/archive.key')
CDH5_CENTOS_REPO = (
'[cloudera-cdh5]'
'\nname=Cloudera\'s Distribution for Hadoop, Version 5'
'\nbaseurl=http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/5.5.0/'
'\ngpgkey = http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
CM5_CENTOS_REPO = (
'[cloudera-manager]'
'\nname=Cloudera Manager'
'\nbaseurl=http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/5.5.0/'
'\ngpgkey = http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
KEY_TRUSTEE_UBUNTU_REPO_URL = (
'http://archive.cloudera.com/navigator-'
'keytrustee5/ubuntu/trusty/amd64/navigator-'
'keytrustee/cloudera.list')
DEFAULT_KEY_TRUSTEE_UBUNTU_REPO_KEY_URL = (
'http://archive.cloudera.com/navigator-'
'keytrustee5/ubuntu/trusty/amd64/navigator-'
'keytrustee/archive.key')
KEY_TRUSTEE_CENTOS_REPO_URL = (
'http://archive.cloudera.com/navigator-'
'keytrustee5/redhat/6/x86_64/navigator-'
'keytrustee/navigator-keytrustee5.repo')
DEFAULT_SWIFT_LIB_URL = (
'https://repository.cloudera.com/artifactory/repo/org'
'/apache/hadoop/hadoop-openstack/2.6.0-cdh5.5.0'
'/hadoop-openstack-2.6.0-cdh5.5.0.jar')
SWIFT_LIB_URL = p.Config(
'Hadoop OpenStack library URL', 'general', 'cluster', priority=1,
default_value=DEFAULT_SWIFT_LIB_URL,
description=("Library that adds Swift support to CDH. The file"
" will be downloaded by VMs."))
HIVE_SERVER2_SENTRY_SAFETY_VALVE = f.get_file_text(
path_to_config + 'hive-server2-sentry-safety.xml')
HIVE_METASTORE_SENTRY_SAFETY_VALVE = f.get_file_text(
path_to_config + 'hive-metastore-sentry-safety.xml')
SENTRY_IMPALA_CLIENT_SAFETY_VALVE = f.get_file_text(
path_to_config + 'sentry-impala-client-safety.xml')
def __init__(self):
super(ConfigHelperV550, self).__init__()
self.priority_one_confs = self._load_json(
self.path_to_config + 'priority-one-confs.json')
self._init_all_ng_plugin_configs()

View File

@ -1,168 +0,0 @@
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.i18n import _
from sahara.plugins.cdh import commands as cmd
from sahara.plugins.cdh import deploy as common_deploy
from sahara.plugins.cdh.v5_5_0 import cloudera_utils as cu
from sahara.plugins import utils as gu
from sahara.service.edp import hdfs_helper as h
from sahara.utils import cluster_progress_ops as cpo
CU = cu.ClouderaUtilsV550()
PACKAGES = common_deploy.PACKAGES
def configure_cluster(cluster):
instances = gu.get_instances(cluster)
if not cmd.is_pre_installed_cdh(CU.pu.get_manager(cluster).remote()):
CU.pu.configure_os(instances)
CU.pu.install_packages(instances, PACKAGES)
CU.pu.start_cloudera_agents(instances)
CU.pu.start_cloudera_manager(cluster)
CU.update_cloudera_password(cluster)
CU.configure_rack_awareness(cluster)
CU.await_agents(cluster, instances)
CU.create_mgmt_service(cluster)
CU.create_services(cluster)
CU.configure_services(cluster)
CU.configure_instances(instances, cluster)
CU.deploy_configs(cluster)
@cpo.event_wrapper(
True, step=_("Start roles: NODEMANAGER, DATANODE"), param=('cluster', 0))
def _start_roles(cluster, instances):
for instance in instances:
if 'HDFS_DATANODE' in instance.node_group.node_processes:
hdfs = CU.get_service_by_role('DATANODE', instance=instance)
CU.start_roles(hdfs, CU.pu.get_role_name(instance, 'DATANODE'))
if 'YARN_NODEMANAGER' in instance.node_group.node_processes:
yarn = CU.get_service_by_role('NODEMANAGER', instance=instance)
CU.start_roles(yarn, CU.pu.get_role_name(instance, 'NODEMANAGER'))
def scale_cluster(cluster, instances):
if not instances:
return
if not cmd.is_pre_installed_cdh(instances[0].remote()):
CU.pu.configure_os(instances)
CU.pu.install_packages(instances, PACKAGES)
CU.pu.start_cloudera_agents(instances)
CU.await_agents(cluster, instances)
CU.configure_rack_awareness(cluster)
CU.configure_instances(instances, cluster)
CU.update_configs(instances)
common_deploy.prepare_scaling_kerberized_cluster(
cluster, CU, instances)
CU.pu.configure_swift(cluster, instances)
_start_roles(cluster, instances)
CU.refresh_datanodes(cluster)
CU.refresh_yarn_nodes(cluster)
CU.restart_stale_services(cluster)
def decommission_cluster(cluster, instances):
dns = []
dns_to_delete = []
nms = []
nms_to_delete = []
for i in instances:
if 'HDFS_DATANODE' in i.node_group.node_processes:
dns.append(CU.pu.get_role_name(i, 'DATANODE'))
dns_to_delete.append(
CU.pu.get_role_name(i, 'HDFS_GATEWAY'))
if 'YARN_NODEMANAGER' in i.node_group.node_processes:
nms.append(CU.pu.get_role_name(i, 'NODEMANAGER'))
nms_to_delete.append(
CU.pu.get_role_name(i, 'YARN_GATEWAY'))
if dns:
CU.decommission_nodes(
cluster, 'DATANODE', dns, dns_to_delete)
if nms:
CU.decommission_nodes(
cluster, 'NODEMANAGER', nms, nms_to_delete)
CU.delete_instances(cluster, instances)
CU.refresh_datanodes(cluster)
CU.refresh_yarn_nodes(cluster)
CU.restart_stale_services(cluster)
@cpo.event_wrapper(True, step=_("Prepare cluster"), param=('cluster', 0))
def _prepare_cluster(cluster):
if CU.pu.get_oozie(cluster):
CU.pu.install_extjs(cluster)
if CU.pu.get_hive_metastore(cluster):
CU.pu.configure_hive(cluster)
if CU.pu.get_sentry(cluster):
CU.pu.configure_sentry(cluster)
@cpo.event_wrapper(
True, step=_("Finish cluster starting"), param=('cluster', 0))
def _finish_cluster_starting(cluster):
if CU.pu.get_hive_metastore(cluster):
CU.pu.put_hive_hdfs_xml(cluster)
server = CU.pu.get_hbase_master(cluster)
if CU.pu.c_helper.is_hbase_common_lib_enabled(cluster) and server:
with server.remote() as r:
h.create_hbase_common_lib(r)
if CU.pu.get_flumes(cluster):
flume = CU.get_service_by_role('AGENT', cluster)
CU.start_service(flume)
def start_cluster(cluster):
_prepare_cluster(cluster)
CU.first_run(cluster)
CU.pu.configure_swift(cluster)
if len(CU.pu.get_jns(cluster)) > 0:
CU.enable_namenode_ha(cluster)
# updating configs for NameNode role on needed nodes
CU.update_role_config(CU.pu.get_secondarynamenode(cluster),
'HDFS_NAMENODE')
if CU.pu.get_stdb_rm(cluster):
CU.enable_resourcemanager_ha(cluster)
# updating configs for ResourceManager on needed nodes
CU.update_role_config(CU.pu.get_stdb_rm(cluster), 'YARN_STANDBYRM')
_finish_cluster_starting(cluster)
common_deploy.setup_kerberos_for_cluster(cluster, CU)
def get_open_ports(node_group):
ports = common_deploy.get_open_ports(node_group)
return ports

View File

@ -1,47 +0,0 @@
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import confighints_helper as ch_helper
from sahara.plugins.cdh import edp_engine
from sahara.plugins.cdh.v5_5_0 import cloudera_utils as cu
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.utils import edp
class EdpOozieEngine(edp_engine.EdpOozieEngine):
def __init__(self, cluster):
super(EdpOozieEngine, self).__init__(cluster)
self.cloudera_utils = cu.ClouderaUtilsV550()
@staticmethod
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/cdh/v5_5_0/resources/hive-site.xml')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/cdh/v5_5_0/resources/mapred-site.xml')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/cdh/v5_5_0/resources/mapred-site.xml')}
return oozie_engine.OozieJobEngine.get_possible_job_config(job_type)
class EdpSparkEngine(edp_engine.EdpSparkEngine):
edp_base_version = "5.5.0"

View File

@ -1,23 +0,0 @@
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import plugin_utils as pu
from sahara.plugins.cdh.v5_5_0 import config_helper
class PluginUtilsV550(pu.AbstractPluginUtils):
def __init__(self):
self.c_helper = config_helper.ConfigHelperV550()

View File

@ -1,135 +0,0 @@
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
import six
from sahara.plugins.cdh.client import api_client
# -- cm config --
cm_address = 'localhost'
cm_port = 7180
cm_username = 'admin'
cm_password = 'admin'
hdfs_service_name = 'hdfs01'
yarn_service_name = 'yarn01'
oozie_service_name = 'oozie01'
hive_service_name = 'hive01'
hue_service_name = 'hue01'
spark_service_name = 'spark_on_yarn01'
zookeeper_service_name = 'zookeeper01'
hbase_service_name = 'hbase01'
flume_service_name = 'flume01'
sqoop_service_name = 'sqoop01'
solr_service_name = 'solr01'
ks_indexer_service_name = 'ks_indexer01'
impala_service_name = 'impala01'
sentry_service_name = 'sentry01'
def get_cm_api():
return api_client.ApiResource(cm_address, server_port=cm_port,
username=cm_username, password=cm_password)
def get_cluster(api):
return api.get_all_clusters()[0]
def process_service(service, service_name):
for role_cfgs in service.get_all_role_config_groups():
role_cm_cfg = role_cfgs.get_config(view='full')
role_cfg = parse_config(role_cm_cfg)
role_name = role_cfgs.roleType.lower()
write_cfg(role_cfg, '%s-%s.json' % (service_name, role_name))
service_cm_cfg = service.get_config(view='full')[0]
service_cfg = parse_config(service_cm_cfg)
write_cfg(service_cfg, '%s-service.json' % service_name)
def parse_config(config):
cfg = []
for name, value in six.iteritems(config):
p = {
'name': value.name,
'value': value.default,
'display_name': value.displayName,
'desc': value.description
}
cfg.append(p)
return cfg
def write_cfg(cfg, file_name):
to_write = json.dumps(cfg, sort_keys=True, indent=4,
separators=(',', ': '))
with open(file_name, 'w') as f:
f.write(to_write)
def main():
client = get_cm_api()
cluster = get_cluster(client)
hdfs = cluster.get_service(hdfs_service_name)
process_service(hdfs, 'hdfs')
yarn = cluster.get_service(yarn_service_name)
process_service(yarn, 'yarn')
oozie = cluster.get_service(oozie_service_name)
process_service(oozie, 'oozie')
hive = cluster.get_service(hive_service_name)
process_service(hive, 'hive')
hue = cluster.get_service(hue_service_name)
process_service(hue, 'hue')
spark = cluster.get_service(spark_service_name)
process_service(spark, 'spark')
zookeeper = cluster.get_service(zookeeper_service_name)
process_service(zookeeper, 'zookeeper')
hbase = cluster.get_service(hbase_service_name)
process_service(hbase, 'hbase')
flume = cluster.get_service(flume_service_name)
process_service(flume, 'flume')
sqoop = cluster.get_service(sqoop_service_name)
process_service(sqoop, 'sqoop')
solr = cluster.get_service(solr_service_name)
process_service(solr, 'solr')
ks_indexer = cluster.get_service(ks_indexer_service_name)
process_service(ks_indexer, 'ks_indexer')
impala = cluster.get_service(impala_service_name)
process_service(impala, 'impala')
sentry = cluster.get_service(sentry_service_name)
process_service(sentry, 'sentry')
if __name__ == '__main__':
main()

View File

@ -1,3 +0,0 @@
#!/bin/bash
tox -evenv -- python $(dirname $0)/cdh_config.py $*

File diff suppressed because one or more lines are too long

View File

@ -1,164 +0,0 @@
[
{
"desc": "Name of the Hbase service that this Flume service instance depends on",
"display_name": "Hbase Service",
"name": "hbase_service",
"value": null
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Flume might connect to. This is used when Flume is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Flume TLS/SSL Certificate Trust Store File",
"name": "flume_truststore_file",
"value": null
},
{
"desc": "Whether to suppress the results of the Agent Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Agent Health",
"name": "service_health_suppression_flume_agents_healthy",
"value": "false"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "flume"
},
{
"desc": "Name of the HDFS service that this Flume service instance depends on",
"display_name": "HDFS Service",
"name": "hdfs_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the Agent Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Agent Count Validator",
"name": "service_config_suppression_agent_count_validator",
"value": "false"
},
{
"desc": "Name of the Solr service that this Flume service instance depends on",
"display_name": "Solr Service",
"name": "solr_service",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Flume Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "flume_env_safety_valve",
"value": null
},
{
"desc": "Sets the maximum number of Flume components that will be returned under Flume Metric Details. Increasing this value will negatively impact the interactive performance of the Flume Metrics Details page.",
"display_name": "Maximum displayed Flume metrics components",
"name": "flume_context_groups_request_limit",
"value": "1000"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "flume"
},
{
"desc": "The health test thresholds of the overall Agent health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Agents falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Agents falls below the critical threshold.",
"display_name": "Healthy Agent Monitoring Thresholds",
"name": "flume_agents_healthy_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"95.0\"}"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "flume"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "The frequency in which the log4j event publication appender will retry sending undelivered log events to the Event server, in seconds",
"display_name": "Log Event Retry Frequency",
"name": "log_event_retry_frequency",
"value": "30"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "When set, each role identifies important log events and forwards them to Cloudera Manager.",
"display_name": "Enable Log Event Capture",
"name": "catch_events",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Flume Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_flume_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Flume TLS/SSL Certificate Trust Store Password",
"name": "service_config_suppression_flume_truststore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "The password for the Flume TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Flume TLS/SSL Certificate Trust Store Password",
"name": "flume_truststore_password",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Flume TLS/SSL Certificate Trust Store File",
"name": "service_config_suppression_flume_truststore_file",
"value": "false"
}
]

View File

@ -1,110 +0,0 @@
[
{
"desc": "The directory where the client configs will be deployed",
"display_name": "Deploy Directory",
"name": "client_config_root_dir",
"value": "/etc/hbase"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Gateway Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>hbase-site.xml</strong>.",
"display_name": "HBase Client Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "hbase_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Client Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HBase Client Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "role_config_suppression_hbase_client_config_safety_valve",
"value": "false"
},
{
"desc": "If a multiget operation is performed with 'Consistency.TIMELINE', the read RPC is sent to the primary region server first. After this timeout, a parallel RPC for secondary region replicas is also sent if the primary does not respond. After this, the result is returned from whichever RPC is finished first. If the response returns from the primary region replica, that the data is the most recent. Result.isStale() API has been added to inspect the staleness. If the result is from a secondary region, Result.isStale() is set to true.",
"display_name": "HBase Client Multiget Timeout For Secondary Region Replicas",
"name": "hbase_client_primaryCallTimeout_multiget",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Deploy Directory parameter.",
"display_name": "Suppress Parameter Validation: Deploy Directory",
"name": "role_config_suppression_client_config_root_dir",
"value": "false"
},
{
"desc": "Whether to enable interruption of RPC threads at the client. The default value of true enables primary RegionServers to access data from other regions' secondary replicas.",
"display_name": "Enable Client RPC Threads Interruption",
"name": "hbase_ipc_client_allowsInterrupt",
"value": "true"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into the client configuration for <strong>hbase-env.sh</strong>",
"display_name": "HBase Client Environment Advanced Configuration Snippet (Safety Valve) for hbase-env.sh",
"name": "hbase_client_env_safety_valve",
"value": null
},
{
"desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.",
"display_name": "Alternatives Priority",
"name": "client_config_priority",
"value": "90"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Client Java Configuration Options parameter.",
"display_name": "Suppress Parameter Validation: Client Java Configuration Options",
"name": "role_config_suppression_hbase_client_java_opts",
"value": "false"
},
{
"desc": "If a get operation is performed with 'Consistency.TIMELINE', the read RPC is sent to the primary region server first. After this timeout, parallel RPC for secondary region replicas is also sent if the primary does not respond. After this, the result is returned from whichever RPC is finished first. If the response returns from the primary region replica, that the data is the most recent. Result.isStale() API has been added to inspect the staleness. If the result is from a secondary region, Result.isStale() is set to true.",
"display_name": "HBase Client Get Timeout For Secondary Region Replicas",
"name": "hbase_client_primaryCallTimeout_get",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Client Environment Advanced Configuration Snippet (Safety Valve) for hbase-env.sh parameter.",
"display_name": "Suppress Parameter Validation: HBase Client Environment Advanced Configuration Snippet (Safety Valve) for hbase-env.sh",
"name": "role_config_suppression_hbase_client_env_safety_valve",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java process heap memory. Passed to Java -Xmx.",
"display_name": "Client Java Heap Size in Bytes",
"name": "hbase_client_java_heapsize",
"value": "268435456"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "The minimum log level for Gateway logs",
"display_name": "Gateway Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "These are Java command line arguments. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Client Java Configuration Options",
"name": "hbase_client_java_opts",
"value": "-XX:+HeapDumpOnOutOfMemoryError -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -Djava.net.preferIPv4Stack=true"
}
]

View File

@ -1,440 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for HBase REST Server",
"name": "hbase_restserver_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_hbase_restserver_keystore_file",
"value": "false"
},
{
"desc": "Maximum size of the HBase REST Server thread pool. The server can process this number of concurrent requests. Setting this too high can lead to out of memory errors.",
"display_name": "HBase REST Server Maximum Threads",
"name": "hbase_restserver_threads_max",
"value": "100"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "hbaserestserver_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The host name or IP address of the DNS name server which an HBase REST Server should use to determine the host name used for communication and display purposes.",
"display_name": "HBase REST Server DNS Name Server",
"name": "hbase_restserver_dns_nameserver",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server Log Directory",
"name": "role_config_suppression_hbase_restserver_log_dir",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "HBase REST Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "HBASERESTSERVER_role_env_safety_valve",
"value": null
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "role_config_suppression_hbase_restserver_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_hbase_restserver_keystore_password",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hbaserestserver_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
"display_name": "HBase REST Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "hbase_restserver_config_safety_valve",
"value": null
},
{
"desc": "The password for the HBase REST Server JKS keystore file.",
"display_name": "HBase REST Server TLS/SSL Server JKS Keystore File Password",
"name": "hbase_restserver_keystore_password",
"value": null
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hbase_rest_server_scm_health",
"value": "false"
},
{
"desc": "Directory where HBase REST Server will place its log files.",
"display_name": "HBase REST Server Log Directory",
"name": "hbase_restserver_log_dir",
"value": "/var/log/hbase"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when HBase REST Server is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "HBase REST Server TLS/SSL Server JKS Keystore File Location",
"name": "hbase_restserver_keystore_file",
"value": null
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Encrypt communication between clients and HBase REST Server using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for HBase REST Server",
"name": "hbase_restserver_ssl_enable",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hbase_rest_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HBase REST Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for HBase REST Server",
"name": "role_config_suppression_hbase_restserver_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server DNS Name Server parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server DNS Name Server",
"name": "role_config_suppression_hbase_restserver_dns_nameserver",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hbase_rest_server_file_descriptor",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "HBase REST Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Minimum size of the HBase REST Server thread pool. The server will maintain at least this number of threads in the pool at all times. The thread pool can grow up to the maximum size set by hbase.rest.threads.max.",
"display_name": "HBase REST Server Minimum Threads",
"name": "hbase_restserver_threads_min",
"value": "2"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hbase_rest_server_swap_memory_usage",
"value": "false"
},
{
"desc": "When false, all HTTP methods are permitted (GET/PUT/POST/DELETE). When true, only GET is permitted.",
"display_name": "Enable HBase REST Server Read Only Mode",
"name": "hbase_restserver_readonly",
"value": "false"
},
{
"desc": "The port that HBase REST Server Web UI binds to.",
"display_name": "HBase REST Server Web UI Port",
"name": "hbase_restserver_info_port",
"value": "8085"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When computing the overall HBase REST Server health, consider the host's health.",
"display_name": "HBase REST Server Host Health Test",
"name": "hbaserestserver_host_health_enabled",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when HBase REST Server is acting as a TLS/SSL server.",
"display_name": "HBase REST Server TLS/SSL Server JKS Keystore Key Password",
"name": "hbase_restserver_keystore_keypassword",
"value": null
},
{
"desc": "The maximum size, in megabytes, per log file for HBase REST Server logs. Typically used by log4j or logback.",
"display_name": "HBase REST Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Enables the health test that the HBase REST Server's process state is consistent with the role configuration",
"display_name": "HBase REST Server Process Health Test",
"name": "hbaserestserver_scm_health_enabled",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "If true, HBase REST Server Web UI will bind to a wildcard address (0.0.0.0). Otherwise it will bind to a host name. Only available in CDH 4.3 and later.",
"display_name": "HBase REST Server Web UI Bind to Wildcard Address",
"name": "hbase_restserver_info_bind_to_wildcard",
"value": "true"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "HBase REST Server will bind to this address.",
"display_name": "HBase REST Server Host Address",
"name": "hbase_restserver_host",
"value": "0.0.0.0"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hbase_rest_server_host_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Host Address parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server Host Address",
"name": "role_config_suppression_hbase_restserver_host",
"value": "false"
},
{
"desc": "The name of the DNS network interface from which an HBase REST Server should report its IP address.",
"display_name": "HBase REST Server DNS Network Interface",
"name": "hbase_restserver_dns_interface",
"value": null
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server DNS Network Interface parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server DNS Network Interface",
"name": "role_config_suppression_hbase_restserver_dns_interface",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for HBase REST Server logs. Typically used by log4j or logback.",
"display_name": "HBase REST Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "The port that HBase REST Server binds to.",
"display_name": "HBase REST Server Port",
"name": "hbase_restserver_port",
"value": "20550"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hbase_rest_server_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: HBase REST Server TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_hbase_restserver_keystore_keypassword",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hbase_rest_server_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of HBase REST Server in Bytes",
"name": "hbase_restserver_java_heapsize",
"value": "1073741824"
},
{
"desc": "The minimum log level for HBase REST Server logs",
"display_name": "HBase REST Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
}
]

View File

@ -1,446 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hbasethriftserver_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HBase Thrift Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for HBase Thrift Server",
"name": "role_config_suppression_hbase_thriftserver_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hbase_thrift_server_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server DNS Network Interface parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server DNS Network Interface",
"name": "role_config_suppression_hbase_thriftserver_dns_interface",
"value": "false"
},
{
"desc": "The \"core size\" of the thread pool. New threads are created on every connection until this many threads are created.",
"display_name": "HBase Thrift Server Min Worker Threads",
"name": "hbase_thriftserver_min_worker_threads",
"value": "200"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hbase_thrift_server_host_health",
"value": "false"
},
{
"desc": "The port that HBase Thrift Server binds to.",
"display_name": "HBase Thrift Server Port",
"name": "hbase_thriftserver_port",
"value": "9090"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Bind Address parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server Bind Address",
"name": "role_config_suppression_hbase_thriftserver_bindaddress",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hbase_thrift_server_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server Log Directory",
"name": "role_config_suppression_hbase_thriftserver_log_dir",
"value": "false"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "When computing the overall HBase Thrift Server health, consider the host's health.",
"display_name": "HBase Thrift Server Host Health Test",
"name": "hbasethriftserver_host_health_enabled",
"value": "true"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when HBase Thrift Server over HTTP is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Location",
"name": "hbase_thriftserver_http_keystore_file",
"value": null
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of HBase Thrift Server in Bytes",
"name": "hbase_thriftserver_java_heapsize",
"value": "1073741824"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The password for the HBase Thrift Server over HTTP JKS keystore file.",
"display_name": "HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Password",
"name": "hbase_thriftserver_http_keystore_password",
"value": null
},
{
"desc": "If true, HBase Thrift Server Web UI will bind to a wildcard address (0.0.0.0). Otherwise it will bind to a host name. Only available in CDH 4.3 and later.",
"display_name": "HBase Thrift Server Web UI Bind to Wildcard Address",
"name": "hbase_thriftserver_info_bind_to_wildcard",
"value": "true"
},
{
"desc": "Enables the health test that the HBase Thrift Server's process state is consistent with the role configuration",
"display_name": "HBase Thrift Server Process Health Test",
"name": "hbasethriftserver_scm_health_enabled",
"value": "true"
},
{
"desc": "Address to bind the HBase Thrift Server to. When using the THsHaServer or the TNonblockingServer, always binds to 0.0.0.0 irrespective of this configuration value.",
"display_name": "HBase Thrift Server Bind Address",
"name": "hbase_thriftserver_bindaddress",
"value": "0.0.0.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Type of HBase Thrift Server.",
"display_name": "HBase Thrift Server Type",
"name": "hbase_thriftserver_type",
"value": "threadpool"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_hbase_thriftserver_http_keystore_password",
"value": "false"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "HBase Thrift Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "HBASETHRIFTSERVER_role_env_safety_valve",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "HBase Thrift Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hbase_thrift_server_log_directory_free_space",
"value": "false"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when HBase Thrift Server over HTTP is acting as a TLS/SSL server.",
"display_name": "HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore Key Password",
"name": "hbase_thriftserver_http_keystore_keypassword",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_hbase_thriftserver_http_keystore_file",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for HBase Thrift Server",
"name": "hbase_thriftserver_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Use framed transport. When using the THsHaServer or TNonblockingServer, framed transport is always used irrespective of this configuration value.",
"display_name": "Enable HBase Thrift Server Framed Transport",
"name": "hbase_thriftserver_framed",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for HBase Thrift Server logs. Typically used by log4j or logback.",
"display_name": "HBase Thrift Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "The port that HBase Thrift Server Web UI binds to.",
"display_name": "HBase Thrift Server Web UI Port",
"name": "hbase_thriftserver_info_port",
"value": "9095"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_hbase_thriftserver_http_keystore_keypassword",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hbase_thrift_server_swap_memory_usage",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "The host name or IP address of the DNS name server which an HBase Thrift Server should use to determine the host name used for communication and display purposes.",
"display_name": "HBase Thrift Server DNS Name Server",
"name": "hbase_thriftserver_dns_nameserver",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server DNS Name Server parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server DNS Name Server",
"name": "role_config_suppression_hbase_thriftserver_dns_nameserver",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for HBase Thrift Server logs. Typically used by log4j or logback.",
"display_name": "HBase Thrift Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hbase_thrift_server_scm_health",
"value": "false"
},
{
"desc": "The name of the DNS network interface from which an HBase Thrift Server should report its IP address.",
"display_name": "HBase Thrift Server DNS Network Interface",
"name": "hbase_thriftserver_dns_interface",
"value": null
},
{
"desc": "Use the TCompactProtocol instead of the default TBinaryProtocol. TCompactProtocol is a binary protocol that is more compact than the default and typically more efficient.",
"display_name": "Enable HBase Thrift Server Compact Protocol",
"name": "hbase_thriftserver_compact",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Directory where HBase Thrift Server will place its log files.",
"display_name": "HBase Thrift Server Log Directory",
"name": "hbase_thriftserver_log_dir",
"value": "/var/log/hbase"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
"display_name": "HBase Thrift Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "hbase_thriftserver_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hbase_thrift_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "hbasethriftserver_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HBase Thrift Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "role_config_suppression_hbase_thriftserver_config_safety_valve",
"value": "false"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The minimum log level for HBase Thrift Server logs",
"display_name": "HBase Thrift Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Encrypt communication between clients and HBase Thrift Server over HTTP using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for HBase Thrift Server over HTTP",
"name": "hbase_thriftserver_http_use_ssl",
"value": "false"
}
]

View File

@ -1,506 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.IOException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketClosedException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.EOFException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.nio.channels.CancelledKeyException\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"IPC Server handler.*ClosedChannelException\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"IPC Server Responder, call.*output error\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Daughter regiondir does not exist: .*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"File.*might still be open.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"File.*might still be open.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Moving table .+ state to enabled but was already enabled\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Received OPENED for region.*but region was in the state.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Unknown job [^ ]+ being deleted.*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Error executing shell command .+ No such process.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\".*attempt to override final parameter.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"[^ ]+ is a deprecated filesystem name. Use.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Master Log Directory",
"name": "role_config_suppression_hbase_master_log_dir",
"value": "false"
},
{
"desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.",
"display_name": "Health Check Startup Tolerance",
"name": "master_startup_tolerance",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Master Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "role_config_suppression_hbase_master_config_safety_valve",
"value": "false"
},
{
"desc": "Number of pooled threads to handle region closing in the master.",
"display_name": "Region Closing Threads",
"name": "hbase_master_executor_closeregion_threads",
"value": "5"
},
{
"desc": "When computing the overall Master health, consider the host's health.",
"display_name": "Master Host Health Test",
"name": "master_host_health_enabled",
"value": "true"
},
{
"desc": "Number of pooled threads to handle the recovery of the region servers in the master.",
"display_name": "RegionServer Recovery Threads",
"name": "hbase_master_executor_serverops_threads",
"value": "5"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Directory where Master will place its log files.",
"display_name": "Master Log Directory",
"name": "hbase_master_log_dir",
"value": "/var/log/hbase"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Time period in seconds to reset long-running metrics (e.g. compactions). This is an HBase specific configuration.",
"display_name": "Extended Period",
"name": "hbase_metrics_extended_period",
"value": "3600"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "master_gc_duration_window",
"value": "5"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "Whether to suppress the results of the HBase Master Canary heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: HBase Master Canary",
"name": "role_health_suppression_master_canary_health",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "master_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "A comma-separated list of LogCleanerDelegate(s) that are used in LogsCleaner. WAL/HLog cleaner(s) are called in order, so put the log cleaner that prunes the most log files in the front. To implement your own LogCleanerDelegate, add it to HBase's classpath and add the fully-qualified class name here. You should always add the above default log cleaners in the list, unless you have a special reason not to.",
"display_name": "HBase Master Log Cleaner Plugins",
"name": "hbase_master_logcleaner_plugins",
"value": null
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "master_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_master_log_directory_free_space",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "List of org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are loaded by default on the active HMaster process. For any implemented coprocessor methods, the listed classes will be called in order. After implementing your own MasterObserver, just put it in HBase's classpath and add the fully qualified class name here.",
"display_name": "HBase Coprocessor Master Classes",
"name": "hbase_coprocessor_master_classes",
"value": ""
},
{
"desc": "The host name or IP address of the DNS name server which an HBase Master should use to determine the host name used for communication and display purposes.",
"display_name": "HBase Master DNS Name Server",
"name": "hbase_master_dns_nameserver",
"value": null
},
{
"desc": "Enables the health test that a client can connect to the HBase Master",
"display_name": "HBase Master Canary Health Test",
"name": "master_canary_health_enabled",
"value": "true"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of HBase Master in Bytes",
"name": "hbase_master_java_heapsize",
"value": "1073741824"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_master_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "master_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_master_file_descriptor",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "When true, HBase Master will bind to 0.0.0.0. Only available with CDH 4.3 and later.",
"display_name": "HBase Master Bind to Wildcard Address",
"name": "hbase_master_bind_to_wildcard_address",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_master_scm_health",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The port that the HBase Master binds to.",
"display_name": "HBase Master Port",
"name": "hbase_master_port",
"value": "60000"
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_master_gc_duration",
"value": "false"
},
{
"desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into <strong>hadoop-metrics2.properties</strong>.",
"display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "hadoop_metrics2_safety_valve",
"value": null
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Number of pooled threads to handle region opening in the master.",
"display_name": "Region Opening Threads",
"name": "hbase_master_executor_openregion_threads",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master Web UI Address parameter.",
"display_name": "Suppress Parameter Validation: HBase Master Web UI Address",
"name": "role_config_suppression_hbase_master_info_bindaddress",
"value": "false"
},
{
"desc": "The port for the HBase Master web UI. Set to -1 to disable the HBase Master web UI.",
"display_name": "HBase Master Web UI Port",
"name": "hbase_master_info_port",
"value": "60010"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_master_swap_memory_usage",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Master Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
"display_name": "Master Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "hbase_master_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_master_unexpected_exits",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for HBase Master",
"name": "hbase_master_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "master_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Master Environment Advanced Configuration Snippet (Safety Valve)",
"name": "MASTER_role_env_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The maximum size, in megabytes, per log file for Master logs. Typically used by log4j or logback.",
"display_name": "Master Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Maximum time an HLog remains in the .oldlogdir directory until an HBase Master thread deletes it.",
"display_name": "Maximum Time to Keep HLogs",
"name": "hbase_master_logcleaner_ttl",
"value": "60000"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Master Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master DNS Network Interface parameter.",
"display_name": "Suppress Parameter Validation: HBase Master DNS Network Interface",
"name": "role_config_suppression_hbase_master_dns_interface",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_master_host_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hadoop_metrics2_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The address for the HBase Master web UI",
"display_name": "HBase Master Web UI Address",
"name": "hbase_master_info_bindAddress",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master Log Cleaner Plugins parameter.",
"display_name": "Suppress Parameter Validation: HBase Master Log Cleaner Plugins",
"name": "role_config_suppression_hbase_master_logcleaner_plugins",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_master_web_metric_collection",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Coprocessor Master Classes parameter.",
"display_name": "Suppress Parameter Validation: HBase Coprocessor Master Classes",
"name": "role_config_suppression_hbase_coprocessor_master_classes",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for Master logs. Typically used by log4j or logback.",
"display_name": "Master Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master DNS Name Server parameter.",
"display_name": "Suppress Parameter Validation: HBase Master DNS Name Server",
"name": "role_config_suppression_hbase_master_dns_nameserver",
"value": "false"
},
{
"desc": "Number of RPC Server instances spun up on HBase Master.",
"display_name": "HBase Master Handler Count",
"name": "hbase_master_handler_count",
"value": "25"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HBase Master parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for HBase Master",
"name": "role_config_suppression_hbase_master_java_opts",
"value": "false"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The name of the DNS network interface from which an HBase Master should report its IP address.",
"display_name": "HBase Master DNS Network Interface",
"name": "hbase_master_dns_interface",
"value": null
},
{
"desc": "The minimum log level for Master logs",
"display_name": "Master Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Master Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_master_role_env_safety_valve",
"value": "false"
},
{
"desc": "Enables the health test that the Master's process state is consistent with the role configuration",
"display_name": "Master Process Health Test",
"name": "master_scm_health_enabled",
"value": "true"
}
]

File diff suppressed because one or more lines are too long

View File

@ -1,740 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBASE Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties parameter.",
"display_name": "Suppress Parameter Validation: HBASE Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "service_config_suppression_navigator_client_config_safety_valve",
"value": "false"
},
{
"desc": "Maximum number of rolled-over audit logs to retain. The logs are not deleted if they contain audit events that have not yet been propagated to the Audit Server.",
"display_name": "Number of Audit Logs to Retain",
"name": "navigator_client_max_num_audit_log",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the Short-Circuit Read Enabled Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Short-Circuit Read Enabled Validator",
"name": "service_config_suppression_short_circuit_read_validator",
"value": "false"
},
{
"desc": "Timeout (in ms) for the distributed log splitting manager to receive response from a worker.",
"display_name": "SplitLog Manager Timeout",
"name": "hbase_service_splitlog_manager_timeout",
"value": "120000"
},
{
"desc": "<p>\nEvent filters are defined in a JSON object like the following:\n</p>\n\n<pre>\n{\n \"defaultAction\" : (\"accept\", \"discard\"),\n \"rules\" : [\n {\n \"action\" : (\"accept\", \"discard\"),\n \"fields\" : [\n {\n \"name\" : \"fieldName\",\n \"match\" : \"regex\"\n }\n ]\n }\n ]\n}\n</pre>\n\n<p>\nA filter has a default action and a list of rules, in order of precedence.\nEach rule defines an action, and a list of fields to match against the\naudit event.\n</p>\n\n<p>\nA rule is \"accepted\" if all the listed field entries match the audit\nevent. At that point, the action declared by the rule is taken.\n</p>\n\n<p>\nIf no rules match the event, the default action is taken. Actions\ndefault to \"accept\" if not defined in the JSON object.\n</p>\n\n<p>\nThe following is the list of fields that can be filtered for HBase events:\n</p>\n<ul>\n <li>allowed: whether the operation was allowed or denied.</li>\n <li>username: the user performing the action.</li>\n <li>tableName: the table affected by the operation.</li>\n <li>family: the column family affected by the operation.</li>\n <li>qualifier: the qualifier the operation.</li>\n <li>action: the action being performed.</li>\n</ul>\n\n<p>\nThe default HBase audit event filter discards events that affect the internal\n-ROOT-, .META. and _acl_ tables.\n</p>\n",
"display_name": "Audit Event Filter",
"name": "navigator_audit_event_filter",
"value": "{\n \"comment\" : [\n \"The default HBase audit event filter discards events that affect the \",\n \"internal -ROOT-, .META. and _acl_ tables.\"\n ],\n \"defaultAction\" : \"accept\",\n \"rules\" : [\n {\n \"action\" : \"discard\",\n \"fields\" : [\n { \"name\" : \"tableName\", \"match\" : \"(?:-ROOT-|.META.|_acl_|hbase:meta|hbase:acl)\" }\n ]\n }\n ]\n}\n"
},
{
"desc": "Enables the canary that checks HBase region availability by scanning a row from every region.",
"display_name": "HBase Region Health Canary",
"name": "hbase_region_health_canary_enabled",
"value": "true"
},
{
"desc": "Name of the HDFS service that this HBase service instance depends on",
"display_name": "HDFS Service",
"name": "hdfs_service",
"value": null
},
{
"desc": "Period of time, in milliseconds, to pause between connection retries to ZooKeeper. Used together with ${zookeeper.retries} in an exponential backoff fashion when making queries to ZooKeeper.",
"display_name": "ZooKeeper Connection Retry Pause Duration",
"name": "zookeeper_pause",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the TLS/SSL Server Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: TLS/SSL Server Keystore File Password",
"name": "service_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "When computing the overall HBase cluster health, consider the active HBase Master's health.",
"display_name": "Active Master Health Test",
"name": "hbase_master_health_enabled",
"value": "true"
},
{
"desc": "Size of the threadpool used for hedged reads in hdfs clients. If a read from a block is slow, a parallel 'hedged' read will be started against a different block replica. The first one to return with a result is used while the other one is cancelled. This 'hedged' read feature helps rein in the outliers. A value of zero disables the feature.",
"display_name": "HDFS Hedged Read Threadpool Size",
"name": "hbase_server_dfs_client_hedged_read_threadpool_size",
"value": "0"
},
{
"desc": "Path to the directory where audit logs will be written. The directory will be created if it doesn't exist.",
"display_name": "Audit Log Directory",
"name": "audit_event_log_dir",
"value": "/var/log/hbase/audit"
},
{
"desc": "Use this to enable Http server usage on thrift, which is mainly needed for \"doAs\" functionality.",
"display_name": "Enable HBase Thrift Http Server",
"name": "hbase_thriftserver_http",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hadoop TLS/SSL Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hadoop TLS/SSL Validator",
"name": "service_config_suppression_hadoop_ssl_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase User to Impersonate parameter.",
"display_name": "Suppress Parameter Validation: HBase User to Impersonate",
"name": "service_config_suppression_hbase_user_to_impersonate",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the AWS S3 Secret Access Key for Remote Snapshots parameter.",
"display_name": "Suppress Parameter Validation: AWS S3 Secret Access Key for Remote Snapshots",
"name": "service_config_suppression_hbase_snapshot_s3_secret_access_key",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Timeout for all HBase RPCs in milliseconds.",
"display_name": "RPC Timeout",
"name": "hbase_rpc_timeout",
"value": "60000"
},
{
"desc": "Path to the keystore file containing the server certificate and private key used for encrypted web UIs.",
"display_name": "TLS/SSL Server Keystore File Location",
"name": "ssl_server_keystore_location",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>ssl-server.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "HBase Service Advanced Configuration Snippet (Safety Valve) for ssl-server.xml",
"name": "hbase_ssl_server_safety_valve",
"value": null
},
{
"desc": "Action to take when the audit event queue is full. Drop the event or shutdown the affected process.",
"display_name": "Audit Queue Policy",
"name": "navigator_audit_queue_policy",
"value": "DROP"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HBase Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_hbase_service_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Service Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HBase Service Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "service_config_suppression_hbase_service_config_safety_valve",
"value": "false"
},
{
"desc": "When set, each role identifies important log events and forwards them to Cloudera Manager.",
"display_name": "Enable Log Event Capture",
"name": "catch_events",
"value": "true"
},
{
"desc": "The number of times to retry connections to ZooKeeper. Used for reading and writing root region location. Used together with ${zookeeper.pause} in an exponential backoff fashion when making queries to ZooKeeper.",
"display_name": "ZooKeeper Connection Retries",
"name": "zookeeper_retries",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Tracker parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Tracker",
"name": "service_config_suppression_navigator_event_tracker",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Gateway Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Gateway Count Validator",
"name": "service_config_suppression_gateway_count_validator",
"value": "false"
},
{
"desc": "Timeout for graceful shutdown of this HBase service. Once this timeout is reached, any remaining running roles are abruptly shutdown. A value of 0 means no timeout.",
"display_name": "Graceful Shutdown Timeout",
"name": "hbase_graceful_stop_timeout",
"value": "180"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Service Advanced Configuration Snippet (Safety Valve) for ssl-server.xml parameter.",
"display_name": "Suppress Parameter Validation: HBase Service Advanced Configuration Snippet (Safety Valve) for ssl-server.xml",
"name": "service_config_suppression_hbase_ssl_server_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "The health test thresholds of the overall RegionServer health. The check returns \"Concerning\" health if the percentage of \"Healthy\" RegionServers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" RegionServers falls below the critical threshold.",
"display_name": "Healthy RegionServer Monitoring Thresholds",
"name": "hbase_regionservers_healthy_thresholds",
"value": "{\"critical\":\"90.0\",\"warning\":\"95.0\"}"
},
{
"desc": "Whether to suppress the results of the RegionServer Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: RegionServer Health",
"name": "service_health_suppression_hbase_region_servers_healthy",
"value": "false"
},
{
"desc": "An alert is published if the HBase region health canary detects at least this many unhealthy regions. This setting takes precedence over the hbase_canary_alert_unhealthy_region_percent_threshold config.",
"display_name": "HBase Canary Unhealthy Region Count Alert Threshold",
"name": "hbase_canary_alert_unhealthy_region_count_threshold",
"value": null
},
{
"desc": "Set to true to cause the hosting server (Master or RegionServer) to abort if a coprocessor throws a Throwable object that is not IOException or a subclass of IOException. Setting it to true might be useful in development environments where one wants to terminate the server as soon as possible to simplify coprocessor failure analysis.",
"display_name": "HBase Coprocessor Abort on Error",
"name": "hbase_coprocessor_abort_on_error",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Write buffer size in bytes. A larger buffer requires more memory on both the client and the server because the server instantiates the passed write buffer to process it but reduces the number of remote procedure calls (RPC). To estimate the amount of server memory used, multiply the value of 'hbase.client.write.buffer' by the value of 'hbase.regionserver.handler.count'.",
"display_name": "HBase Client Write Buffer",
"name": "hbase_client_write_buffer",
"value": "2097152"
},
{
"desc": "The user the management services impersonate when connecting to HBase. If no value is specified, the HBase superuser is used.",
"display_name": "HBase User to Impersonate",
"name": "hbase_user_to_impersonate",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the TLS/SSL Server Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: TLS/SSL Server Keystore File Location",
"name": "service_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Amazon S3 Path for Remote Snapshots parameter.",
"display_name": "Suppress Parameter Validation: Amazon S3 Path for Remote Snapshots",
"name": "service_config_suppression_hbase_snapshot_s3_path",
"value": "false"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "hbase"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Scheduler Pool for Remote Snapshots in AWS S3 parameter.",
"display_name": "Suppress Parameter Validation: Scheduler Pool for Remote Snapshots in AWS S3",
"name": "service_config_suppression_hbase_snapshot_s3_scheduler_pool",
"value": "false"
},
{
"desc": "Allow indexing of tables in HBase by Lily HBase Indexer. <strong>Note:</strong> Replication must be enabled for indexing to work.",
"display_name": "Enable Indexing",
"name": "hbase_enable_indexing",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Superusers parameter.",
"display_name": "Suppress Parameter Validation: HBase Superusers",
"name": "service_config_suppression_hbase_superuser",
"value": "false"
},
{
"desc": "Password for the server keystore file used for encrypted web UIs.",
"display_name": "TLS/SSL Server Keystore File Password",
"name": "ssl_server_keystore_password",
"value": null
},
{
"desc": "Ratio of Lily HBase Indexers used by each HBase RegionServer while doing replication.",
"display_name": "Replication Source Ratio",
"name": "hbase_replication_source_ratio",
"value": "1.0"
},
{
"desc": "Maximum size of audit log file in MB before it is rolled over.",
"display_name": "Maximum Audit Log File Size",
"name": "navigator_audit_log_max_file_size",
"value": "100"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>core-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "HBase Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "hbase_core_site_safety_valve",
"value": null
},
{
"desc": "The timeout before injecting a snapshot timeout error when waiting for a snapshot completion.",
"display_name": "HBase Master Snapshot Waiting Timeout",
"name": "hbase_snapshot_master_timeoutMillis",
"value": "60000"
},
{
"desc": "Whether to suppress configuration warnings produced by the HBase Authentication And Authorization Validation configuration validator.",
"display_name": "Suppress Configuration Validator: HBase Authentication And Authorization Validation",
"name": "service_config_suppression_hbase_authentication_and_authorization_validator",
"value": "false"
},
{
"desc": "The maximum amount of time the Hbase RegionServer waits for a snapshot to complete.",
"display_name": "HBase RegionServer Snapshot Timeout",
"name": "hbase_snapshot_region_timeout",
"value": "60000"
},
{
"desc": "If this is set to \"kerberos\", HBase REST Server will authenticate its clients. HBase Proxy User Hosts and Groups should be configured to allow specific users to access HBase through REST Server.",
"display_name": "HBase REST Authentication",
"name": "hbase_restserver_security_authentication",
"value": "simple"
},
{
"desc": "Specifies the combined maximum allowed size of a KeyValue instance. This option configures an upper boundary for a single entry saved in a storage file. This option prevents a region from splitting if the data is too large. Set this option to a fraction of the maximum region size. To disable this check, use a value of zero or less.",
"display_name": "Maximum Size of HBase Client KeyValue",
"name": "hbase_client_keyvalue_maxsize",
"value": "10485760"
},
{
"desc": "Enable snapshots. Disabling snapshots requires deletion of all snapshots before restarting the HBase master; the HBase master will not start if snapshots are disabled and snapshots exist.",
"display_name": "Enable Snapshots",
"name": "hbase_snapshot_enabled",
"value": "true"
},
{
"desc": "Maximum number of client retries. Used as a maximum for all operations such as fetching of the root region from the root RegionServer, getting a cell's value, and starting a row update.",
"display_name": "Maximum HBase Client Retries",
"name": "hbase_client_retries_number",
"value": "35"
},
{
"desc": "Set to true to use HBase Secure RPC Engine for remote procedure calls (RPC). This is only effective in simple authentication mode. Does not provide authentication for RPC calls, but provides user information in the audit logs. Changing this setting requires a restart of this and all dependent services and redeployment of client configurations, along with a restart of the Service Monitor management role.",
"display_name": "HBase Secure RPC Engine",
"name": "hbase_secure_rpc_engine",
"value": "false"
},
{
"desc": "For advanced use only, a list of configuration properties that will be used by the Service Monitor instead of the current client configuration for the service.",
"display_name": "Service Monitor Client Config Overrides",
"name": "smon_client_config_overrides",
"value": "<property><name>zookeeper.recovery.retry</name><value>0</value></property><property><name>zookeeper.recovery.retry.intervalmill</name><value>3000</value></property><property><name>hbase.zookeeper.recoverable.waittime</name><value>1000</value></property><property><name>zookeeper.session.timeout</name><value>30000</value></property><property><name>hbase.rpc.timeout</name><value>10000</value></property><property><name>hbase.client.retries.number</name><value>1</value></property><property><name>hbase.client.rpc.maxattempts</name><value>1</value></property><property><name>hbase.client.operation.timeout</name><value>10000</value></property>"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Amazon S3 Access Key ID for Remote Snapshots parameter.",
"display_name": "Suppress Parameter Validation: Amazon S3 Access Key ID for Remote Snapshots",
"name": "service_config_suppression_hbase_snapshot_s3_access_key_id",
"value": "false"
},
{
"desc": "Whether to suppress the results of the HBase Master Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: HBase Master Health",
"name": "service_health_suppression_hbase_master_health",
"value": "false"
},
{
"desc": "Enable HBase authorization.",
"display_name": "HBase Secure Authorization",
"name": "hbase_security_authorization",
"value": "false"
},
{
"desc": "Period of time, in milliseconds, to pause between searches for work. Used as a sleep interval by service threads such as a META scanner and log roller.",
"display_name": "HBase Server Thread Wake Frequency",
"name": "hbase_server_thread_wakefrequency",
"value": "10000"
},
{
"desc": "Whether to suppress configuration warnings produced by the HBase Thrift Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: HBase Thrift Server Count Validator",
"name": "service_config_suppression_hbasethriftserver_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Root Directory parameter.",
"display_name": "Suppress Parameter Validation: HDFS Root Directory",
"name": "service_config_suppression_hdfs_rootdir",
"value": "false"
},
{
"desc": "Enable collection of audit events from the service's roles.",
"display_name": "Enable Audit Collection",
"name": "navigator_audit_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the RegionServer Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: RegionServer Count Validator",
"name": "service_config_suppression_regionserver_count_validator",
"value": "false"
},
{
"desc": "The frequency in which the log4j event publication appender will retry sending undelivered log events to the Event server, in seconds",
"display_name": "Log Event Retry Frequency",
"name": "log_event_retry_frequency",
"value": "30"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Znode Rootserver parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Znode Rootserver",
"name": "service_config_suppression_zookeeper_znode_rootserver",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Filter parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Filter",
"name": "service_config_suppression_navigator_audit_event_filter",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Znode Parent parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Znode Parent",
"name": "service_config_suppression_zookeeper_znode_parent",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Region Health Canary Exclude Tables parameter.",
"display_name": "Suppress Parameter Validation: HBase Region Health Canary Exclude Tables",
"name": "service_config_suppression_hbase_region_health_canary_exclude_tables",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Proxy User Groups parameter.",
"display_name": "Suppress Parameter Validation: HBase Proxy User Groups",
"name": "service_config_suppression_hbase_proxy_user_groups_list",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Service Advanced Configuration Snippet (Safety Valve) for core-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HBase Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "service_config_suppression_hbase_core_site_safety_valve",
"value": "false"
},
{
"desc": "The root znode for HBase in ZooKeeper. All of HBase's ZooKeeper files that are configured with a relative path will go under this node. By default, all of HBase's ZooKeeper file paths are configured with a relative path, so they will all go under this directory unless changed.",
"display_name": "ZooKeeper Znode Parent",
"name": "zookeeper_znode_parent",
"value": "/hbase"
},
{
"desc": "Access key ID required to access Amazon S3 to store remote snapshots.",
"display_name": "Amazon S3 Access Key ID for Remote Snapshots",
"name": "hbase_snapshot_s3_access_key_id",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Client Config Overrides parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Client Config Overrides",
"name": "service_config_suppression_smon_client_config_overrides",
"value": "false"
},
{
"desc": "AWS secret access key required to access S3 to store remote snapshots.",
"display_name": "AWS S3 Secret Access Key for Remote Snapshots",
"name": "hbase_snapshot_s3_secret_access_key",
"value": null
},
{
"desc": "Number of rows to fetch when calling next on a scanner if it is not served from memory. Higher caching values enable faster scanners but require more memory and some calls of next may take longer when the cache is empty.",
"display_name": "HBase Client Scanner Caching",
"name": "hbase_client_scanner_caching",
"value": "100"
},
{
"desc": "Tables to exclude in the HBase Region Health Canary which will scan a row from every region.",
"display_name": "HBase Region Health Canary Exclude Tables",
"name": "hbase_region_health_canary_exclude_tables",
"value": ""
},
{
"desc": "The maximum amount of time the HBase master waits for a snapshot to complete.",
"display_name": "HBase Master Snapshot Timeout",
"name": "hbase_snapshot_master_timeout_millis",
"value": "60000"
},
{
"desc": "Allow HBase tables to be replicated.",
"display_name": "Enable Replication",
"name": "hbase_enable_replication",
"value": "false"
},
{
"desc": "Start a process to periodically check that RegionServer is alive when RegionServer is started. <b>Note</b>: This canary is different from the Cloudera Service Monitoring canary and is provided by the HBase service itself.",
"display_name": "Enable HBase Canary",
"name": "hbase_regionserver_enable_canary",
"value": "false"
},
{
"desc": "Enable HDFS short-circuit read. This allows a client colocated with the DataNode to read HDFS file blocks directly. This gives a performance boost to distributed clients that are aware of locality.",
"display_name": "Enable HDFS Short-Circuit Read",
"name": "dfs_client_read_shortcircuit",
"value": "true"
},
{
"desc": "Path to ZooKeeper Node holding root region location. This is written by the HBase Master and read by clients and RegionServers. If a relative path is given, the parent folder will be ${zookeeper.znode.parent}. By default, the root location is stored at /hbase/root-region-server.",
"display_name": "ZooKeeper Znode Rootserver",
"name": "zookeeper_znode_rootserver",
"value": "root-region-server"
},
{
"desc": "When computing the overall HBase cluster health, consider the health of the backup HBase Masters.",
"display_name": "Backup Masters Health Test",
"name": "hbase_backup_masters_health_enabled",
"value": "true"
},
{
"desc": "Password that protects the private key contained in the server keystore used for encrypted web UIs.",
"display_name": "TLS/SSL Server Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": null
},
{
"desc": "An alert is published if the HBase region health canary runs slowly.",
"display_name": "HBase Region Health Canary Slow Run Alert Enabled",
"name": "hbase_region_health_canary_slow_run_alert_enabled",
"value": "true"
},
{
"desc": "Name of the scheduler pool to use for MR jobs created during export/import of remote snapshots in AWS S3.",
"display_name": "Scheduler Pool for Remote Snapshots in AWS S3",
"name": "hbase_snapshot_s3_scheduler_pool",
"value": null
},
{
"desc": "Configure the type of encrypted communication to be used with RPC.",
"display_name": "HBase Transport Security",
"name": "hbase_rpc_protection",
"value": "authentication"
},
{
"desc": "An alert is published if the HBase region health canary detects at least this percentage of total regions are unhealthy. This threshold is used if the explicit count is not set via the hbase_canary_alert_unhealthy_region_count_threshold config.",
"display_name": "HBase Canary Unhealthy Region Percentage Alert Threshold",
"name": "hbase_canary_alert_unhealthy_region_percent_threshold",
"value": "0.1"
},
{
"desc": "Duration to wait before starting up a 'hedged' read.",
"display_name": "HDFS Hedged Read Delay Threshold",
"name": "hbase_server_dfs_client_hedged_read_threshold_millis",
"value": "500"
},
{
"desc": "Enable TLS/SSL encryption for HBase web UIs.",
"display_name": "Web UI TLS/SSL Encryption Enabled",
"name": "hbase_hadoop_ssl_enabled",
"value": "false"
},
{
"desc": "List of users or groups, who are allowed full privileges, regardless of stored ACLs, across the cluster. Only used when HBase security is enabled.",
"display_name": "HBase Superusers",
"name": "hbase_superuser",
"value": ""
},
{
"desc": "Use this to allow proxy users on thrift gateway, which is mainly needed for \"doAs\" functionality.",
"display_name": "Enable HBase Thrift Proxy Users",
"name": "hbase_thriftserver_support_proxyuser",
"value": "false"
},
{
"desc": "Maximum number of hlog entries to replicate in one go. If this is large, and a consumer takes a while to process the events, the HBase RPC call will time out.",
"display_name": "Replication Batch Size",
"name": "hbase_replication_source_nb_capacity",
"value": "1000"
},
{
"desc": "Whether to suppress configuration warnings produced by the Master Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Master Count Validator",
"name": "service_config_suppression_master_count_validator",
"value": "false"
},
{
"desc": "ZooKeeper session timeout in milliseconds. HBase passes this to the ZooKeeper quorum as the suggested maximum time for a session. See http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions The client sends a requested timeout, the server responds with the timeout that it can give the client.",
"display_name": "ZooKeeper Session Timeout",
"name": "zookeeper_session_timeout",
"value": "60000"
},
{
"desc": "Enable HBase row-level authorization.",
"display_name": "HBase Row-Level Authorization",
"name": "hbase_row_level_authorization",
"value": "false"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the HBase user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that does not correspond to a host name, such as '_no_host'. <b>Note:</b> This property is used only if HBase REST/Thrift Server Authentication is enabled.",
"display_name": "HBase Proxy User Hosts",
"name": "hbase_proxy_user_hosts_list",
"value": "*"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Proxy User Hosts parameter.",
"display_name": "Suppress Parameter Validation: HBase Proxy User Hosts",
"name": "service_config_suppression_hbase_proxy_user_hosts_list",
"value": "false"
},
{
"desc": "Amazon S3 path where remote snapshots should be stored.",
"display_name": "Amazon S3 Path for Remote Snapshots",
"name": "hbase_snapshot_s3_path",
"value": null
},
{
"desc": "Comma-delimited list of groups that you want to allow the HBase user to impersonate. The default '*' allows all groups. To disable entirely, use a string that does not correspond to a group name, such as '_no_group_'. <b>Note:</b> This property is used only if HBase REST/Thrift Server Authentication is enabled.",
"display_name": "HBase Proxy User Groups",
"name": "hbase_proxy_user_groups_list",
"value": "*"
},
{
"desc": "<p>\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n</p>\n\n<p>\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n</p>\n\n<p>Event trackers are defined in a JSON object like the following:</p>\n\n<pre>\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n</pre>\n\n<p>\nWhere:\n</p>\n\n<ul>\n <li>timeToLive: maximum amount of time an event will be tracked, in\n milliseconds. Must be provided. This defines how long, since it's\n first seen, an event will be tracked. A value of 0 disables tracking.</li>\n\n <li>fields: list of fields to compare when matching events against\n tracked events.</li>\n</ul>\n\n<p>\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n</p>\n\n<ul>\n <li>value: uses the field value for comparison.</li>\n\n <li>username: treats the field value as a user name, and ignores any\n host-specific data. This is useful for environment using Kerberos,\n so that only the principal name and realm are compared.</li>\n</ul>\n\n<p>\nThe following is the list of fields that can used to compare HBase events:\n</p>\n\n<ul>\n <li>operation: the HBase operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n <li>tableName: the name of the table affected by the operation.</li>\n <li>family: the column family affected by the operation.</li>\n <li>qualifier: the qualifier of the operation.</li>\n</ul>\n\n<p>\nThe default event tracker for HBase services defines equality by comparing the\nusername, operation, table name, family, and qualifier of the events.\n</p>\n",
"display_name": "Audit Event Tracker",
"name": "navigator_event_tracker",
"value": "{\n \"comment\" : [\n \"The default event tracker for HBase services defines equality by \",\n \"comparing the username, operation, table name, family, and qualifier of \",\n \"the events.\"\n ],\n \"timeToLive\" : 60000,\n \"fields\" : [\n { \"type\": \"value\", \"name\" : \"tableName\" },\n { \"type\": \"value\", \"name\" : \"family\" },\n { \"type\": \"value\", \"name\" : \"qualifier\" },\n { \"type\": \"value\", \"name\" : \"operation\" },\n { \"type\": \"username\", \"name\" : \"username\" }\n ]\n}\n"
},
{
"desc": "The tolerance window that will be used in HBase service tests that depend on detection of the active HBase Master.",
"display_name": "HBase Active Master Detection Window",
"name": "hbase_active_master_detecton_window",
"value": "3"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hbase-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "HBase Service Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
"name": "hbase_service_config_safety_valve",
"value": null
},
{
"desc": "Choose the authentication mechanism used by HBase.",
"display_name": "HBase Secure Authentication",
"name": "hbase_security_authentication",
"value": "simple"
},
{
"desc": "A general client pause time value. Used mostly as a time period to wait before retrying operations such as a failed get or region lookup.",
"display_name": "HBase Client Pause",
"name": "hbase_client_pause",
"value": "100"
},
{
"desc": "Name of the ZooKeeper service that this HBase service instance depends on.",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "If this is set, HBase Thrift Server authenticates its clients. HBase Proxy User Hosts and Groups should be configured to allow specific users to access HBase through Thrift Server.",
"display_name": "HBase Thrift Authentication",
"name": "hbase_thriftserver_security_authentication",
"value": "none"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "hbase"
},
{
"desc": "Whether to suppress configuration warnings produced by the HBase REST Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: HBase REST Server Count Validator",
"name": "service_config_suppression_hbaserestserver_count_validator",
"value": "false"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "hbase"
},
{
"desc": "The HDFS directory shared by HBase RegionServers.",
"display_name": "HDFS Root Directory",
"name": "hdfs_rootdir",
"value": "/hbase"
},
{
"desc": "Whether to suppress configuration warnings produced by the ZooKeeper Max Session Timeout Validator configuration validator.",
"display_name": "Suppress Configuration Validator: ZooKeeper Max Session Timeout Validator",
"name": "service_config_suppression_zookeeper_max_session_timeout_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Audit Log Directory",
"name": "service_config_suppression_audit_event_log_dir",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "HBase Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "hbase_service_env_safety_valve",
"value": null
},
{
"desc": "Whether the step to reload regions back onto the original RegionServers should be skipped during rolling restart. This can be used to increase the speed of rolling restart or upgrade operations, but can result in regions being moved multiple times, decreasing performance for clients during rolling restart.",
"display_name": "Skip Region Reload During Rolling Restart",
"name": "hbase_skip_reload_during_rr",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.client.properties</strong>.",
"display_name": "HBASE Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "navigator_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the TLS/SSL Server Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: TLS/SSL Server Keystore Key Password",
"name": "service_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
}
]

View File

@ -1,68 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Balancer parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Balancer",
"name": "role_config_suppression_balancer_java_opts",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Balancer",
"name": "balancer_java_opts",
"value": ""
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.IOException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketClosedException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.EOFException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.nio.channels.CancelledKeyException\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Unknown job [^ ]+ being deleted.*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Error executing shell command .+ No such process.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\".*attempt to override final parameter.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"[^ ]+ is a deprecated filesystem name. Use.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "Balancer Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "balancer_config_safety_valve",
"value": null
},
{
"desc": "The policy that should be used to rebalance HDFS storage. The default DataNode policy balances the storage at the DataNode level. This is similar to the balancing policy from prior releases. The BlockPool policy balances the storage at the block pool level as well as at the DataNode level. The BlockPool policy is relevant only to a Federated HDFS service.",
"display_name": "Rebalancing Policy",
"name": "rebalancing_policy",
"value": "DataNode"
},
{
"desc": "The percentage deviation from average utilization, after which a node will be rebalanced. (for example, '10.0' for 10%)",
"display_name": "Rebalancing Threshold",
"name": "rebalancer_threshold",
"value": "10.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Balancer Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Balancer Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_balancer_config_safety_valve",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Balancer in Bytes",
"name": "balancer_java_heapsize",
"value": "1073741824"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
}
]

View File

@ -1,644 +0,0 @@
[
{
"desc": "Specifies the maximum number of threads to use for transferring data in and out of the DataNode.",
"display_name": "Maximum Number of Transfer Threads",
"name": "dfs_datanode_max_xcievers",
"value": "4096"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Transceiver Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Transceiver Usage",
"name": "role_health_suppression_data_node_transceivers_usage",
"value": "false"
},
{
"desc": "In some workloads, the data read from HDFS is known to be significantly large enough that it is unlikely to be useful to cache it in the operating system buffer cache. In this case, the DataNode may be configured to automatically purge all data from the buffer cache after it is delivered to the client. This may improve performance for some workloads by freeing buffer cache spare usage for more cacheable data. This behavior will always be disabled for workloads that read only short sections of a block (e.g HBase random-IO workloads). This property is supported in CDH3u3 or later deployments.",
"display_name": "Enable purging cache after reads",
"name": "dfs_datanode_drop_cache_behind_reads",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether DataNodes should use DataNode hostnames when connecting to DataNodes for data transfer. This property is supported in CDH3u4 or later deployments.",
"display_name": "Use DataNode Hostname",
"name": "dfs_datanode_use_datanode_hostname",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "datanode_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Enables the health test that the DataNode's process state is consistent with the role configuration",
"display_name": "DataNode Process Health Test",
"name": "datanode_scm_health_enabled",
"value": "true"
},
{
"desc": "In some workloads, the data written to HDFS is known to be significantly large enough that it is unlikely to be useful to cache it in the operating system buffer cache. In this case, the DataNode may be configured to automatically purge all data from the buffer cache after it is written to disk. This may improve performance for some workloads by freeing buffer cache spare usage for more cacheable data. This property is supported in CDH3u3 or later deployments.",
"display_name": "Enable purging cache after writes",
"name": "dfs_datanode_drop_cache_behind_writes",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of DataNode in Bytes",
"name": "datanode_java_heapsize",
"value": "1073741824"
},
{
"desc": "Directory where DataNode will place its log files.",
"display_name": "DataNode Log Directory",
"name": "datanode_log_dir",
"value": "/var/log/hadoop-hdfs"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_data_node_web_metric_collection",
"value": "false"
},
{
"desc": "While reading block files, the DataNode can use the posix_fadvise system call to explicitly page data into the operating system buffer cache ahead of the current reader's position. This can improve performance especially when disks are highly contended. This configuration specifies the number of bytes ahead of the current read position which the DataNode will attempt to read ahead. A value of 0 disables this feature. This property is supported in CDH3u3 or later deployments.",
"display_name": "Number of read ahead bytes",
"name": "dfs_datanode_readahead_bytes",
"value": "4194304"
},
{
"desc": "Only used when the DataNode Volume Choosing Policy is set to Available Space. Controls how much DataNode volumes are allowed to differ in terms of bytes of free disk space before they are considered imbalanced. If the free space of all the volumes are within this range of each other, the volumes will be considered balanced and block assignments will be done on a pure round robin basis.",
"display_name": "Available Space Policy Balanced Threshold",
"name": "dfs_datanode_available_space_balanced_threshold",
"value": "10737418240"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "datanode_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's DataNode Data Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a DataNode Data Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "DataNode Data Directory Free Space Monitoring Percentage Thresholds",
"name": "datanode_data_directories_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The period to review when computing the moving average of extra time the pause monitor spent paused.",
"display_name": "Pause Duration Monitoring Period",
"name": "datanode_pause_duration_window",
"value": "5"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's DataNode Data Directory.",
"display_name": "DataNode Data Directory Free Space Monitoring Absolute Thresholds",
"name": "datanode_data_directories_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Permissions for the directories on the local file system where the DataNode stores its blocks. The permissions must be octal. 755 and 700 are typical values.",
"display_name": "DataNode Data Directory Permissions",
"name": "dfs_datanode_data_dir_perm",
"value": "700"
},
{
"desc": "Maximum amount of bandwidth that each DataNode can use for balancing. Specified in bytes per second.",
"display_name": "DataNode Balancing Bandwidth",
"name": "dfs_balance_bandwidthPerSec",
"value": "10485760"
},
{
"desc": "The health test thresholds of transceivers usage in a DataNode. Specified as a percentage of the total configured number of transceivers.",
"display_name": "DataNode Transceivers Usage Thresholds",
"name": "datanode_transceivers_usage_thresholds",
"value": "{\"critical\":\"95.0\",\"warning\":\"75.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the DataNode Data Directory parameter.",
"display_name": "Suppress Parameter Validation: DataNode Data Directory",
"name": "role_config_suppression_dfs_data_dir_list",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_data_node_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.IOException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketClosedException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.EOFException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.nio.channels.CancelledKeyException\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 5, \"content\":\"Datanode registration failed\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Got a command from standby NN - ignoring command:.*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Unknown job [^ ]+ being deleted.*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Error executing shell command .+ No such process.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\".*attempt to override final parameter.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"[^ ]+ is a deprecated filesystem name. Use.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
},
{
"desc": "Whether to suppress configuration warnings produced by the DataNode Failed Volumes Tolerated Validator configuration validator.",
"display_name": "Suppress Configuration Validator: DataNode Failed Volumes Tolerated Validator",
"name": "role_config_suppression_datanode_failed_volumes_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Data Directory Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Data Directory Status",
"name": "role_health_suppression_data_node_volume_failures",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the DataNode Data Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: DataNode Data Directory Free Space",
"name": "role_health_suppression_datanode_data_directories_free_space",
"value": "false"
},
{
"desc": "When computing the overall DataNode health, consider the host's health.",
"display_name": "DataNode Host Health Test",
"name": "datanode_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for DataNode parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for DataNode",
"name": "role_config_suppression_datanode_java_opts",
"value": "false"
},
{
"desc": "The maximum size, in megabytes, per log file for DataNode logs. Typically used by log4j or logback.",
"display_name": "DataNode Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for DataNode",
"name": "datanode_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the DateNode Plugins parameter.",
"display_name": "Suppress Parameter Validation: DateNode Plugins",
"name": "role_config_suppression_dfs_datanode_plugins_list",
"value": "false"
},
{
"desc": "Whether to suppress the results of the NameNode Connectivity heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: NameNode Connectivity",
"name": "role_health_suppression_data_node_ha_connectivity",
"value": "false"
},
{
"desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into <strong>hadoop-metrics2.properties</strong>.",
"display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "hadoop_metrics2_safety_valve",
"value": null
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Port for the DataNode HTTP web UI. Combined with the DataNode's hostname to build its HTTP address.",
"display_name": "DataNode HTTP Web UI Port",
"name": "dfs_datanode_http_port",
"value": "50075"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds of free space in a DataNode. Specified as a percentage of the capacity on the DataNode.",
"display_name": "DataNode Free Space Monitoring Thresholds",
"name": "datanode_free_space_thresholds",
"value": "{\"critical\":\"10.0\",\"warning\":\"20.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hadoop_metrics2_safety_valve",
"value": "false"
},
{
"desc": "Port for DataNode's XCeiver Protocol. Combined with the DataNode's hostname to build its address.",
"display_name": "DataNode Transceiver Port",
"name": "dfs_datanode_port",
"value": "50010"
},
{
"desc": "The health test thresholds of the number of blocks on a DataNode",
"display_name": "DataNode Block Count Thresholds",
"name": "datanode_block_count_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"500000.0\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "The health test thresholds of failed volumes in a DataNode.",
"display_name": "DataNode Volume Failures Thresholds",
"name": "datanode_volume_failures_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_data_node_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The minimum log level for DataNode logs",
"display_name": "DataNode Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_data_node_scm_health",
"value": "false"
},
{
"desc": "DataNode Policy for picking which volume should get a new block. The Available Space policy is only available starting with CDH 4.3.",
"display_name": "DataNode Volume Choosing Policy",
"name": "dfs_datanode_volume_choosing_policy",
"value": "org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the DataNode Data Directory Permissions parameter.",
"display_name": "Suppress Parameter Validation: DataNode Data Directory Permissions",
"name": "role_config_suppression_dfs_datanode_data_dir_perm",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average extra time the pause monitor spent paused. Specified as a percentage of elapsed wall clock time.",
"display_name": "Pause Duration Thresholds",
"name": "datanode_pause_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the DataNode Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: DataNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the DataNode Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: DataNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_datanode_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_data_node_host_health",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the DataNode Reserved Space Validator configuration validator.",
"display_name": "Suppress Configuration Validator: DataNode Reserved Space Validator",
"name": "role_config_suppression_datanode_reserved_space_validator",
"value": "false"
},
{
"desc": "Reserved space in bytes per volume for non Distributed File System (DFS) use.",
"display_name": "Reserved Space for Non DFS Use",
"name": "dfs_datanode_du_reserved",
"value": "10737418240"
},
{
"desc": "The maximum amount of memory a DataNode may use to cache data blocks in memory. Setting it to zero will disable caching.",
"display_name": "Maximum Memory Used for Caching",
"name": "dfs_datanode_max_locked_memory",
"value": "4294967296"
},
{
"desc": "Enables the health test that verifies the DataNode is connected to the NameNode",
"display_name": "DataNode Connectivity Health Test",
"name": "datanode_connectivity_health_enabled",
"value": "true"
},
{
"desc": "Minimum number of running threads for the Hue Thrift server running on each DataNode",
"display_name": "Hue Thrift Server Min Threadcount",
"name": "dfs_thrift_threads_min",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the DataNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: DataNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_datanode_config_safety_valve",
"value": "false"
},
{
"desc": "The base port where the secure DataNode web UI listens. Combined with the DataNode's hostname to build its secure web UI address.",
"display_name": "Secure DataNode Web UI Port (TLS/SSL)",
"name": "dfs_datanode_https_port",
"value": "50475"
},
{
"desc": "The amount of time to wait for the DataNode to fully start up and connect to the NameNode before enforcing the connectivity check.",
"display_name": "DataNode Connectivity Tolerance at Startup",
"name": "datanode_connectivity_tolerance",
"value": "180"
},
{
"desc": "Maximum number of running threads for the Hue Thrift server running on each DataNode",
"display_name": "Hue Thrift Server Max Threadcount",
"name": "dfs_thrift_threads_max",
"value": "20"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress the results of the Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Free Space",
"name": "role_health_suppression_data_node_free_space_remaining",
"value": "false"
},
{
"desc": "If this configuration is enabled, the DataNode will instruct the operating system to enqueue all written data to the disk immediately after it is written. This differs from the usual OS policy which may wait for up to 30 seconds before triggering writeback. This may improve performance for some workloads by smoothing the IO profile for data written to disk. This property is supported in CDH3u3 or later deployments.",
"display_name": "Enable immediate enqueuing of data to disk after writes",
"name": "dfs_datanode_sync_behind_writes",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_data_node_swap_memory_usage",
"value": "false"
},
{
"desc": "Comma-separated list of DataNode plug-ins to be activated. If one plug-in cannot be loaded, all the plug-ins are ignored.",
"display_name": "DateNode Plugins",
"name": "dfs_datanode_plugins_list",
"value": ""
},
{
"desc": "Comma-delimited list of directories on the local file system where the DataNode stores HDFS block data. Typical values are /data/N/dfs/dn for N = 1, 2, 3.... These directories should be mounted using the noatime option, and the disks should be configured using JBOD. RAID is not recommended. <strong>Warning: Be very careful when modifying this property. Removing or changing entries can result in data loss.</strong> To hot swap drives in CDH 5.4 and higher, override the value of this property for the specific DataNode role instance that has the drive to be hot-swapped; do not modify the property value in the role group. See <link><a class=\"bold\" href=\"http://tiny.cloudera.com/hot-swap\" target=\"_blank\">Configuring Hot Swap for DataNodes<i class=\"externalLink\"></i></a></link> for more information.",
"display_name": "DataNode Data Directory",
"name": "dfs_data_dir_list",
"value": null
},
{
"desc": "Whether to suppress the results of the Pause Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Pause Duration",
"name": "role_health_suppression_data_node_pause_duration",
"value": "false"
},
{
"desc": "The number of volumes that are allowed to fail before a DataNode stops offering service. By default, any volume failure will cause a DataNode to shutdown.",
"display_name": "DataNode Failed Volumes Tolerated",
"name": "dfs_datanode_failed_volumes_tolerated",
"value": "0"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "DataNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "If enabled, the DataNode binds to the wildcard address (\"0.0.0.0\") on all of its ports.",
"display_name": "Bind DataNode to Wildcard Address",
"name": "dfs_datanode_bind_wildcard",
"value": "false"
},
{
"desc": "The number of server threads for the DataNode.",
"display_name": "Handler Count",
"name": "dfs_datanode_handler_count",
"value": "3"
},
{
"desc": "The maximum number of rolled log files to keep for DataNode logs. Typically used by log4j or logback.",
"display_name": "DataNode Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "DataNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "DATANODE_role_env_safety_valve",
"value": null
},
{
"desc": "Timeout in seconds for the Hue Thrift server running on each DataNode",
"display_name": "Hue Thrift Server Timeout",
"name": "dfs_thrift_timeout",
"value": "60"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_data_node_log_directory_free_space",
"value": "false"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Block Count heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Block Count",
"name": "role_health_suppression_data_node_block_count",
"value": "false"
},
{
"desc": "Port for the various DataNode Protocols. Combined with the DataNode's hostname to build its IPC port address.",
"display_name": "DataNode Protocol Port",
"name": "dfs_datanode_ipc_port",
"value": "50020"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the DataNode Log Directory parameter.",
"display_name": "Suppress Parameter Validation: DataNode Log Directory",
"name": "role_config_suppression_datanode_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Heap Size of DataNode in Bytes parameter.",
"display_name": "Suppress Parameter Validation: Java Heap Size of DataNode in Bytes",
"name": "role_config_suppression_datanode_java_heapsize",
"value": "false"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "DataNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "datanode_config_safety_valve",
"value": null
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Only used when the DataNode Volume Choosing Policy is set to Available Space. Controls what percentage of new block allocations will be sent to volumes with more available disk space than others. This setting should be in the range 0.0 - 1.0, though in practice 0.5 - 1.0, since there should be no reason to prefer that volumes with less available disk space receive more block allocations.",
"display_name": "Available Space Policy Balanced Preference",
"name": "dfs_datanode_available_space_balanced_preference",
"value": "0.75"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_data_node_unexpected_exits",
"value": "false"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "datanode_web_metric_collection_enabled",
"value": "true"
}
]

View File

@ -1,332 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Directory where Failover Controller will place its log files.",
"display_name": "Failover Controller Log Directory",
"name": "failover_controller_log_dir",
"value": "/var/log/hadoop-hdfs"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The RPC timeout for the HA health monitor.",
"display_name": "HA Health Monitor RPC Timeout",
"name": "ha_health_monitor_rpc_timeout_ms",
"value": "45000"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "Failover Controller Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "fc_config_safety_valve",
"value": null
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hdfs_failovercontroller_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hdfs_failovercontroller_swap_memory_usage",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Failover Controller Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Failover Controller Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_failovercontroller_role_env_safety_valve",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hdfs_failovercontroller_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Failover Controller Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Failover Controller Log Directory",
"name": "role_config_suppression_failover_controller_log_dir",
"value": "false"
},
{
"desc": "The maximum size, in megabytes, per log file for Failover Controller logs. Typically used by log4j or logback.",
"display_name": "Failover Controller Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Failover Controller Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Failover Controller Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Failover Controller Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Failover Controller Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_fc_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Failover Controller in Bytes",
"name": "failover_controller_java_heapsize",
"value": "268435456"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hdfs_failovercontroller_log_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hdfs_failovercontroller_scm_health",
"value": "false"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hdfs_failovercontroller_host_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Failover Controller Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Failover Controller Environment Advanced Configuration Snippet (Safety Valve)",
"name": "FAILOVERCONTROLLER_role_env_safety_valve",
"value": null
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hdfs_failovercontroller_file_descriptor",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Failover Controller parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Failover Controller",
"name": "role_config_suppression_failover_controller_java_opts",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "failovercontroller_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "When computing the overall Failover Controller health, consider the host's health.",
"display_name": "Failover Controller Host Health Test",
"name": "failovercontroller_host_health_enabled",
"value": "true"
},
{
"desc": "Enables the health test that the Failover Controller's process state is consistent with the role configuration",
"display_name": "Failover Controller Process Health Test",
"name": "failovercontroller_scm_health_enabled",
"value": "true"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Failover Controller",
"name": "failover_controller_java_opts",
"value": ""
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "The maximum number of rolled log files to keep for Failover Controller logs. Typically used by log4j or logback.",
"display_name": "Failover Controller Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "The minimum log level for Failover Controller logs",
"display_name": "Failover Controller Logging Threshold",
"name": "log_threshold",
"value": "INFO"
}
]

View File

@ -1,116 +0,0 @@
[
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>hdfs-site.xml</strong>.",
"display_name": "HDFS Client Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "hdfs_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Gateway Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Deploy Directory parameter.",
"display_name": "Suppress Parameter Validation: Deploy Directory",
"name": "role_config_suppression_client_config_root_dir",
"value": "false"
},
{
"desc": "Whether HDFS clients will use the legacy block reader.",
"display_name": "Use Legacy Blockreader",
"name": "dfs_client_use_legacy_blockreader",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the HDFS Trash Enabled Validator configuration validator.",
"display_name": "Suppress Configuration Validator: HDFS Trash Enabled Validator",
"name": "role_config_suppression_hdfs_trash_disabled_validator",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java process heap memory. Passed to Java -Xmx.",
"display_name": "Client Java Heap Size in Bytes",
"name": "hdfs_client_java_heapsize",
"value": "268435456"
},
{
"desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.",
"display_name": "Alternatives Priority",
"name": "client_config_priority",
"value": "90"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Client Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HDFS Client Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_hdfs_client_config_safety_valve",
"value": "false"
},
{
"desc": "The directory where the client configs will be deployed",
"display_name": "Deploy Directory",
"name": "client_config_root_dir",
"value": "/etc/hadoop"
},
{
"desc": "These are Java command line arguments. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Client Java Configuration Options",
"name": "hbase_client_java_opts",
"value": "-Djava.net.preferIPv4Stack=true"
},
{
"desc": "Move deleted files to the trash so that they can be recovered if necessary. This client side configuration takes effect only if the HDFS service-wide trash is disabled (NameNode Filesystem Trash Interval set to 0) and is ignored otherwise. The trash is not automatically emptied when enabled with this configuration.",
"display_name": "Use Trash",
"name": "dfs_client_use_trash",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Enable HDFS short-circuit read. This allows a client colocated with the DataNode to read HDFS file blocks directly. This gives a performance boost to distributed clients that are aware of locality.",
"display_name": "Enable HDFS Short-Circuit Read",
"name": "dfs_client_read_shortcircuit",
"value": "false"
},
{
"desc": "The minimum log level for Gateway logs",
"display_name": "Gateway Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Client Environment Advanced Configuration Snippet (Safety Valve) for hadoop-env.sh parameter.",
"display_name": "Suppress Parameter Validation: HDFS Client Environment Advanced Configuration Snippet (Safety Valve) for hadoop-env.sh",
"name": "role_config_suppression_hdfs_client_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into the client configuration for <strong>hadoop-env.sh</strong>",
"display_name": "HDFS Client Environment Advanced Configuration Snippet (Safety Valve) for hadoop-env.sh",
"name": "hdfs_client_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Client Java Configuration Options parameter.",
"display_name": "Suppress Parameter Validation: Client Java Configuration Options",
"name": "role_config_suppression_hbase_client_java_opts",
"value": "false"
}
]

View File

@ -1,440 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Kerberos principal used by the HttpFS roles.",
"display_name": "Role-Specific Kerberos Principal",
"name": "kerberos_role_princ_name",
"value": "httpfs"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_httpfs_scm_health",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "httpfs_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that HttpFS might connect to. This is used when HttpFS is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "HttpFS TLS/SSL Certificate Trust Store File",
"name": "httpfs_https_truststore_file",
"value": null
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_httpfs_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS Keystore File parameter.",
"display_name": "Suppress Parameter Validation: HttpFS Keystore File",
"name": "role_config_suppression_httpfs_https_keystore_file",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS Log Directory parameter.",
"display_name": "Suppress Parameter Validation: HttpFS Log Directory",
"name": "role_config_suppression_httpfs_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "role_config_suppression_httpfs_process_username",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_httpfs_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "When computing the overall HttpFS health, consider the host's health.",
"display_name": "HttpFS Host Health Test",
"name": "httpfs_host_health_enabled",
"value": "true"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The group that the HttpFS server process should run as.",
"display_name": "System Group",
"name": "httpfs_process_groupname",
"value": "httpfs"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role-Specific Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Role-Specific Kerberos Principal",
"name": "role_config_suppression_kerberos_role_princ_name",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HttpFS Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Enables the health test that the HttpFS's process state is consistent with the role configuration",
"display_name": "HttpFS Process Health Test",
"name": "httpfs_scm_health_enabled",
"value": "true"
},
{
"desc": "Password of the keystore used by the HttpFS role for TLS/SSL.",
"display_name": "HttpFS Keystore Password",
"name": "httpfs_https_keystore_password",
"value": null
},
{
"desc": "The password for the HttpFS TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "HttpFS TLS/SSL Certificate Trust Store Password",
"name": "httpfs_https_truststore_password",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "role_config_suppression_httpfs_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: HttpFS TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_httpfs_https_truststore_file",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Signature Secret parameter.",
"display_name": "Suppress Parameter Validation: Signature Secret",
"name": "role_config_suppression_hdfs_httpfs_signature_secret",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS Advanced Configuration Snippet (Safety Valve) for httpfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HttpFS Advanced Configuration Snippet (Safety Valve) for httpfs-site.xml",
"name": "role_config_suppression_httpfs_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HttpFS parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for HttpFS",
"name": "role_config_suppression_httpfs_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS Keystore Password parameter.",
"display_name": "Suppress Parameter Validation: HttpFS Keystore Password",
"name": "role_config_suppression_httpfs_https_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_httpfs_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HttpFS Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_httpfs_role_env_safety_valve",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Directory where HttpFS will place its log files.",
"display_name": "HttpFS Log Directory",
"name": "httpfs_log_dir",
"value": "/var/log/hadoop-httpfs"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "HttpFS Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>httpfs-site.xml</strong> for this role only.",
"display_name": "HttpFS Advanced Configuration Snippet (Safety Valve) for httpfs-site.xml",
"name": "httpfs_config_safety_valve",
"value": null
},
{
"desc": "The maximum size, in megabytes, per log file for HttpFS logs. Typically used by log4j or logback.",
"display_name": "HttpFS Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for HttpFS",
"name": "httpfs_java_opts",
"value": ""
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "HttpFS Environment Advanced Configuration Snippet (Safety Valve)",
"name": "HTTPFS_role_env_safety_valve",
"value": null
},
{
"desc": "The secret to use for signing client authentication tokens.",
"display_name": "Signature Secret",
"name": "hdfs_httpfs_signature_secret",
"value": "hadoop httpfs secret"
},
{
"desc": "Address of the load balancer used for HttpFS roles. Should be specified in host:port format. <b>Note:</b> Changing this property will regenerate Kerberos keytabs for all HttpFS roles.",
"display_name": "HttpFS Load Balancer",
"name": "httpfs_load_balancer",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Location of the keystore file used by the HttpFS role for TLS/SSL.",
"display_name": "HttpFS Keystore File",
"name": "httpfs_https_keystore_file",
"value": "/var/run/hadoop-httpfs/.keystore"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of HttpFS in Bytes",
"name": "httpfs_java_heapsize",
"value": "268435456"
},
{
"desc": "The port for the administration interface.",
"display_name": "Administration Port",
"name": "hdfs_httpfs_admin_port",
"value": "14001"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_httpfs_file_descriptor",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for HttpFS logs. Typically used by log4j or logback.",
"display_name": "HttpFS Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS Load Balancer parameter.",
"display_name": "Suppress Parameter Validation: HttpFS Load Balancer",
"name": "role_config_suppression_httpfs_load_balancer",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "The port where the REST interface to HDFS is available. The REST interface is served over HTTPS if TLS/SSL is enabled for HttpFS, or over HTTP otherwise.",
"display_name": "REST Port",
"name": "hdfs_httpfs_http_port",
"value": "14000"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_httpfs_host_health",
"value": "false"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_httpfs_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HttpFS TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: HttpFS TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_httpfs_https_truststore_password",
"value": "false"
},
{
"desc": "The minimum log level for HttpFS logs",
"display_name": "HttpFS Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Use TLS/SSL for HttpFS.",
"display_name": "Use TLS/SSL",
"name": "httpfs_use_ssl",
"value": "false"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The user that the HttpFS server process should run as.",
"display_name": "System User",
"name": "httpfs_process_username",
"value": "httpfs"
}
]

View File

@ -1,446 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "JournalNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "JOURNALNODE_role_env_safety_valve",
"value": null
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_journal_node_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Fsync Latency heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Fsync Latency",
"name": "role_health_suppression_journal_node_fsync_latency",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JournalNode Log Directory parameter.",
"display_name": "Suppress Parameter Validation: JournalNode Log Directory",
"name": "role_config_suppression_journalnode_log_dir",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of JournalNode in Bytes",
"name": "journalNode_java_heapsize",
"value": "268435456"
},
{
"desc": "If enabled, the JournalNode binds to the wildcard address (\"0.0.0.0\") on all of its ports.",
"display_name": "Bind JournalNode to Wildcard Address",
"name": "journalnode_bind_wildcard",
"value": "false"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "journalnode_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_journal_node_scm_health",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "journalnode_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_journal_node_host_health",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Enables the health test that the JournalNode's process state is consistent with the role configuration",
"display_name": "JournalNode Process Health Test",
"name": "journalnode_scm_health_enabled",
"value": "true"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JournalNode Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: JournalNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_journal_node_unexpected_exits",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JournalNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: JournalNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_jn_config_safety_valve",
"value": "false"
},
{
"desc": "Port for the JournalNode's RPC. Combined with the JournalNode's hostname to build its RPC address.",
"display_name": "JournalNode RPC Port",
"name": "dfs_journalnode_rpc_port",
"value": "8485"
},
{
"desc": "Directory on the local file system where NameNode edits are written.",
"display_name": "JournalNode Edits Directory",
"name": "dfs_journalnode_edits_dir",
"value": null
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's JournalNode Edits Directory.",
"display_name": "JournalNode Edits Directory Free Space Monitoring Absolute Thresholds",
"name": "journalnode_edits_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_journal_node_swap_memory_usage",
"value": "false"
},
{
"desc": "Enables the health check that verifies the active NameNode's sync status to the JournalNode",
"display_name": "Active NameNode Sync Status Health Check",
"name": "journalnode_sync_status_enabled",
"value": "true"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "When computing the overall JournalNode health, consider the host's health.",
"display_name": "JournalNode Host Health Test",
"name": "journalnode_host_health_enabled",
"value": "true"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for JournalNode parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for JournalNode",
"name": "role_config_suppression_journalnode_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Sync Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Sync Status",
"name": "role_health_suppression_journal_node_sync_status",
"value": "false"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "The base port where the secure JournalNode web UI listens. Combined with the JournalNode's hostname to build its secure web UI address.",
"display_name": "Secure JournalNode Web UI Port (TLS/SSL)",
"name": "dfs_journalnode_https_port",
"value": "8481"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "journalnode_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JournalNode Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: JournalNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_journalnode_role_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "JournalNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The health test thresholds for JournalNode fsync latency.",
"display_name": "JournalNode Fsync Latency Thresholds",
"name": "journalnode_fsync_latency_thresholds",
"value": "{\"critical\":\"3000.0\",\"warning\":\"1000.0\"}"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "JournalNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "jn_config_safety_valve",
"value": null
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JournalNode Edits Directory parameter.",
"display_name": "Suppress Parameter Validation: JournalNode Edits Directory",
"name": "role_config_suppression_dfs_journalnode_edits_dir",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_journal_node_web_metric_collection",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for JournalNode logs. Typically used by log4j or logback.",
"display_name": "JournalNode Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "journalnode_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_journal_node_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "journalnode_gc_duration_window",
"value": "5"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's JournalNode Edits Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a JournalNode Edits Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "JournalNode Edits Directory Free Space Monitoring Percentage Thresholds",
"name": "journalnode_edits_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_journal_node_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Directory where JournalNode will place its log files.",
"display_name": "JournalNode Log Directory",
"name": "journalnode_log_dir",
"value": "/var/log/hadoop-hdfs"
},
{
"desc": "Whether to suppress the results of the JournalNode Edits Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: JournalNode Edits Directory Free Space",
"name": "role_health_suppression_journal_node_edits_directory_free_space",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for JournalNode",
"name": "journalNode_java_opts",
"value": ""
},
{
"desc": "The maximum number of rolled log files to keep for JournalNode logs. Typically used by log4j or logback.",
"display_name": "JournalNode Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "The amount of time at JournalNode startup allowed for the active NameNode to get in sync with the JournalNode.",
"display_name": "Active NameNode Sync Status Startup Tolerance",
"name": "journalnode_sync_status_startup_tolerance",
"value": "180"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Port for the JournalNode HTTP web UI. Combined with the JournalNode hostname to build its HTTP address.",
"display_name": "JournalNode HTTP Port",
"name": "dfs_journalnode_http_port",
"value": "8480"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The minimum log level for JournalNode logs",
"display_name": "JournalNode Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_journal_node_gc_duration",
"value": "false"
}
]

View File

@ -1,812 +0,0 @@
[
{
"desc": "Whether to suppress the results of the Checkpoint Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Checkpoint Status",
"name": "role_health_suppression_name_node_ha_checkpoint_age",
"value": "false"
},
{
"desc": "Name of the journal located on each JournalNode filesystem.",
"display_name": "Quorum-based Storage Journal name",
"name": "dfs_namenode_quorum_journal_name",
"value": null
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Indicate whether or not to avoid reading from stale DataNodes for which heartbeat messages have not been received by the NameNode for more than Stale DataNode Time Interval. Stale DataNodes are moved to the end of the node list returned for reading. See dfs.namenode.avoid.write.stale.datanode for a similar setting for writes.",
"display_name": "Avoid Reading Stale DataNode",
"name": "dfs_namenode_avoid_read_stale_datanode",
"value": "false"
},
{
"desc": "The base port where the DFS NameNode web UI listens. If the port number is 0, then the server starts on a free port. Combined with the NameNode's hostname to build its HTTP address.",
"display_name": "NameNode Web UI Port",
"name": "dfs_http_port",
"value": "50070"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for NameNode parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for NameNode",
"name": "role_config_suppression_namenode_java_opts",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_name_node_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "namenode_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "The health check thresholds of the NameNode's RPC latency.",
"display_name": "NameNode RPC Latency Thresholds",
"name": "namenode_rpc_latency_thresholds",
"value": "{\"critical\":\"5000.0\",\"warning\":\"1000.0\"}"
},
{
"desc": "The period to review when computing the moving average of extra time the pause monitor spent paused.",
"display_name": "Pause Duration Monitoring Period",
"name": "namenode_pause_duration_window",
"value": "5"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_name_node_log_directory_free_space",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress the results of the RPC Latency heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: RPC Latency",
"name": "role_health_suppression_name_node_rpc_latency",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_name_node_file_descriptor",
"value": "false"
},
{
"desc": "The maximum number of outgoing replication threads a node can have at one time. This limit is waived for the highest priority replications. Configure dfs.namenode.replication.max-streams-hard-limit to set the absolute limit, including the highest-priority replications.",
"display_name": "Maximum Number of Replication Threads on a DataNode",
"name": "dfs_namenode_replication_max_streams",
"value": "20"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "NameNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "When the ratio of number stale DataNodes to total DataNodes marked is greater than this ratio, permit writing to stale nodes to prevent causing hotspots.",
"display_name": "Write Stale DataNode Ratio",
"name": "dfs_namenode_write_stale_datanode_ratio",
"value": "0.5"
},
{
"desc": "The health test thresholds of the number of transactions since the last HDFS namespace checkpoint. Specified as a percentage of the configured checkpointing transaction limit.",
"display_name": "Filesystem Checkpoint Transactions Monitoring Thresholds",
"name": "namenode_checkpoint_transactions_thresholds",
"value": "{\"critical\":\"400.0\",\"warning\":\"200.0\"}"
},
{
"desc": "The health test thresholds for the weighted average extra time the pause monitor spent paused. Specified as a percentage of elapsed wall clock time.",
"display_name": "Pause Duration Thresholds",
"name": "namenode_pause_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_name_node_swap_memory_usage",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress the results of the NameNode Data Directories Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: NameNode Data Directories Free Space",
"name": "role_health_suppression_name_node_data_directories_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Filesystem Trash Interval On Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Filesystem Trash Interval On Validator",
"name": "role_config_suppression_fs_trash_interval_minimum_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: NameNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_namenode_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Safe Mode Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Safe Mode Status",
"name": "role_health_suppression_name_node_safe_mode",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_name_node_web_metric_collection",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Plugins parameter.",
"display_name": "Suppress Parameter Validation: NameNode Plugins",
"name": "role_config_suppression_dfs_namenode_plugins_list",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Nameservice parameter.",
"display_name": "Suppress Parameter Validation: NameNode Nameservice",
"name": "role_config_suppression_dfs_federation_namenode_nameservice",
"value": "false"
},
{
"desc": "Enables the health test of the rolling metadata upgrade status of the NameNode. This covers rolling metadata upgrades. Nonrolling metadata upgrades are covered in a separate health test.",
"display_name": "HDFS Rolling Metadata Upgrade Status Health Test",
"name": "namenode_rolling_upgrade_status_enabled",
"value": "true"
},
{
"desc": "Mount points that are mapped to this NameNode's nameservice.",
"display_name": "Mount Points",
"name": "nameservice_mountpoints",
"value": "/"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "namenode_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.IOException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketClosedException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.EOFException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.nio.channels.CancelledKeyException\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Unknown job [^ ]+ being deleted.*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Error executing shell command .+ No such process.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\".*attempt to override final parameter.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"[^ ]+ is a deprecated filesystem name. Use.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"},\n {\"alert\": false, \"rate\": 1, \"threshold\":\"INFO\", \"content\":\"Triggering checkpoint.*\"}\n ]\n}\n"
},
{
"desc": "Enables the health test that the NameNode is not in safemode",
"display_name": "NameNode Safemode Health Test",
"name": "namenode_safe_mode_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Edits Directories parameter.",
"display_name": "Suppress Parameter Validation: NameNode Edits Directories",
"name": "role_config_suppression_dfs_namenode_edits_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Advanced Configuration Snippet (Safety Valve) for dfs_hosts_exclude.txt parameter.",
"display_name": "Suppress Parameter Validation: NameNode Advanced Configuration Snippet (Safety Valve) for dfs_hosts_exclude.txt",
"name": "role_config_suppression_namenode_hosts_exclude_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the NameNode Service Handler Count Minimum Validator configuration validator.",
"display_name": "Suppress Configuration Validator: NameNode Service Handler Count Minimum Validator",
"name": "role_config_suppression_dfs_namenode_service_handler_count_minimum_validator",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of NameNode in Bytes",
"name": "namenode_java_heapsize",
"value": "1073741824"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the JournalNode Sync Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: JournalNode Sync Status",
"name": "role_health_suppression_name_node_journal_node_sync_status",
"value": "false"
},
{
"desc": "The maximum size, in megabytes, per log file for NameNode logs. Typically used by log4j or logback.",
"display_name": "NameNode Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Determines where on the local file system the NameNode should store the name table (fsimage). For redundancy, enter a comma-delimited list of directories to replicate the name table in all of the directories. Typical values are /data/N/dfs/nn where N=1..3.",
"display_name": "NameNode Data Directories",
"name": "dfs_name_dir_list",
"value": null
},
{
"desc": "Whether to suppress the results of the Upgrade Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Upgrade Status",
"name": "role_health_suppression_name_node_upgrade_status",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Shared Edits Directory parameter.",
"display_name": "Suppress Parameter Validation: Shared Edits Directory",
"name": "role_config_suppression_dfs_namenode_shared_edits_dir",
"value": "false"
},
{
"desc": "Default time interval for marking a DataNode as \"stale\". If the NameNode has not received heartbeat messages from a DataNode for more than this time interval, the DataNode is marked and treated as \"stale\" by default.",
"display_name": "Stale DataNode Time Interval",
"name": "dfs_namenode_stale_datanode_interval",
"value": "30000"
},
{
"desc": "Whether to suppress the results of the Pause Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Pause Duration",
"name": "role_health_suppression_name_node_pause_duration",
"value": "false"
},
{
"desc": "The port where the NameNode runs the HDFS protocol. Combined with the NameNode's hostname to build its address.",
"display_name": "NameNode Port",
"name": "namenode_port",
"value": "8020"
},
{
"desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into <strong>hadoop-metrics2.properties</strong>.",
"display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "hadoop_metrics2_safety_valve",
"value": null
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The period to review when computing the moving average of the NameNode's RPC latency.",
"display_name": "NameNode RPC Latency Monitoring Window",
"name": "namenode_rpc_latency_window",
"value": "5"
},
{
"desc": "Optional port for the service-rpc address which can be used by HDFS daemons instead of sharing the RPC address used by the clients.",
"display_name": "NameNode Service RPC Port",
"name": "dfs_namenode_servicerpc_address",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Quorum-based Storage Journal name parameter.",
"display_name": "Suppress Parameter Validation: Quorum-based Storage Journal name",
"name": "role_config_suppression_dfs_namenode_quorum_journal_name",
"value": "false"
},
{
"desc": "The health check thresholds for the number of out-of-sync JournalNodes for this NameNode.",
"display_name": "NameNode Out-Of-Sync JournalNodes Thresholds",
"name": "namenode_out_of_sync_journal_nodes_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds of the age of the HDFS namespace checkpoint. Specified as a percentage of the configured checkpoint interval.",
"display_name": "Filesystem Checkpoint Age Monitoring Thresholds",
"name": "namenode_checkpoint_age_thresholds",
"value": "{\"critical\":\"400.0\",\"warning\":\"200.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hadoop_metrics2_safety_valve",
"value": "false"
},
{
"desc": "Enables the health test that the NameNode's process state is consistent with the role configuration",
"display_name": "NameNode Process Health Test",
"name": "namenode_scm_health_enabled",
"value": "true"
},
{
"desc": "The absolute maximum number of outgoing replication threads a given node can have at one time. The regular limit (dfs.namenode.replication.max-streams) is waived for highest-priority block replications. Highest replication priority is for blocks that are at a very high risk of loss if the disk or server on which they remain fails. These are usually blocks with only one copy, or blocks with zero live copies but a copy in a node being decommissioned. dfs.namenode.replication.max-streams-hard-limit provides a limit on the total number of outgoing replication threads, including threads of all priorities.",
"display_name": "Hard Limit on the Number of Replication Threads on a Datanode",
"name": "dfs_namenode_replication_max_streams_hard_limit",
"value": "40"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "NameNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "NAMENODE_role_env_safety_valve",
"value": null
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Directory where NameNode will place its log files.",
"display_name": "NameNode Log Directory",
"name": "namenode_log_dir",
"value": "/var/log/hadoop-hdfs"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Enables the health test of the metadata upgrade status of the NameNode. This covers nonrolling metadata upgrades. Rolling metadata upgrades are covered in a separate health test.",
"display_name": "HDFS Metadata Upgrade Status Health Test",
"name": "namenode_upgrade_status_enabled",
"value": "true"
},
{
"desc": "Nameservice of this NameNode. The Nameservice represents the interface to this NameNode and its High Availability partner. The Nameservice also represents the namespace associated with a federated NameNode.",
"display_name": "NameNode Nameservice",
"name": "dfs_federation_namenode_nameservice",
"value": null
},
{
"desc": "The minimum log level for NameNode logs",
"display_name": "NameNode Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Specifies the percentage of blocks that should satisfy the minimal replication requirement defined by dfs.replication.min. Enter a value less than or equal to 0 to wait for any particular percentage of blocks before exiting safemode. Values greater than 1 will make safemode permanent.",
"display_name": "Safemode Threshold Percentage",
"name": "dfs_safemode_threshold_pct",
"value": "0.999"
},
{
"desc": "Directories on the local file system to store the NameNode edits. If not set, the edits are stored in the NameNode's Data Directories. The value of this configuration is automatically generated to be the Quorum-based Storage URI if there are JournalNodes and this NameNode is not Highly Available.",
"display_name": "NameNode Edits Directories",
"name": "dfs_namenode_edits_dir",
"value": null
},
{
"desc": "If set to false and if one of the replicas of the NameNode storage fails, such as temporarily failure of NFS, this directory is not used until the NameNode restarts. If enabled, failed storage is re-checked on every checkpoint and, if it becomes valid, the NameNode will try to restore the edits and fsimage.",
"display_name": "Restore NameNode Directories at Checkpoint Time",
"name": "dfs_name_dir_restore",
"value": "false"
},
{
"desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.",
"display_name": "Health Check Startup Tolerance",
"name": "namenode_startup_tolerance",
"value": "5"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for NameNode",
"name": "namenode_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether to suppress configuration warnings produced by the Java Heap Size of NameNode in Bytes Minimum Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Java Heap Size of NameNode in Bytes Minimum Validator",
"name": "role_config_suppression_namenode_java_heapsize_minimum_validator",
"value": "false"
},
{
"desc": "Enable Automatic Failover to maintain High Availability. Requires a ZooKeeper service and a High Availability NameNode partner.",
"display_name": "Enable Automatic Failover",
"name": "autofailover_enabled",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NameNode Data Directories. Specified as a percentage of the capacity on that filesystem. This setting is not used if a NameNode Data Directories Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "NameNode Data Directories Free Space Monitoring Percentage Thresholds",
"name": "namenode_data_directories_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>dfs_hosts_exclude.txt</strong> for this role only.",
"display_name": "NameNode Advanced Configuration Snippet (Safety Valve) for dfs_hosts_exclude.txt",
"name": "namenode_hosts_exclude_safety_valve",
"value": null
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NameNode Data Directories.",
"display_name": "NameNode Data Directories Free Space Monitoring Absolute Thresholds",
"name": "namenode_data_directories_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: NameNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_namenode_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Rolling Upgrade Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Rolling Upgrade Status",
"name": "role_health_suppression_name_node_rolling_upgrade_status",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: NameNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "This determines the percentage amount of block invalidations (deletes) to do over a single DataNode heartbeat deletion command. The final deletion count is determined by applying this percentage to the number of live nodes in the system. The resultant number is the number of blocks from the deletion list chosen for proper invalidation over a single heartbeat of a single DataNode.",
"display_name": "Invalidate Work Percentage Per Iteration",
"name": "dfs_namenode_invalidate_work_pct_per_iteration",
"value": "0.32"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "namenode_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "Timeout when writing edits to a JournalNode. This only applies when NameNode high availability is enabled.",
"display_name": "JournalNode Write Transactions Timeout",
"name": "dfs_qjournal_write_txns_timeout_ms",
"value": "20000"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The time between two periodic file system checkpoints.",
"display_name": "Filesystem Checkpoint Period",
"name": "fs_checkpoint_period",
"value": "3600"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "The access time for HDFS file is precise upto this value. Setting the value of 0 disables access times for HDFS. When using the NFS Gateway role, make sure this property is enabled.",
"display_name": "Access Time Precision",
"name": "dfs_access_time_precision",
"value": "3600000"
},
{
"desc": "Minimum number of running threads for the Hue Thrift server running on the NameNode",
"display_name": "Hue Thrift Server Min Threadcount",
"name": "dfs_thrift_threads_min",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Topology Script File Name parameter.",
"display_name": "Suppress Parameter Validation: Topology Script File Name",
"name": "role_config_suppression_topology_script_file_name",
"value": "false"
},
{
"desc": "Maximum number of running threads for the Hue Thrift server running on the NameNode",
"display_name": "Hue Thrift Server Max Threadcount",
"name": "dfs_thrift_threads_max",
"value": "20"
},
{
"desc": "Whether to suppress configuration warnings produced by the NameNode Handler Count Minimum Validator configuration validator.",
"display_name": "Suppress Configuration Validator: NameNode Handler Count Minimum Validator",
"name": "role_config_suppression_dfs_namenode_handler_count_minimum_validator",
"value": "false"
},
{
"desc": "Full path to a custom topology script on the host file system. The topology script is used to determine the rack location of nodes. If left blank, a topology script will be provided that uses your hosts' rack information, visible in the \"Hosts\" page.",
"display_name": "Topology Script File Name",
"name": "topology_script_file_name",
"value": null
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The minimum log level for NameNode block state change log messages. Setting this to WARN or higher greatly reduces the amount of log output related to block state changes.",
"display_name": "NameNode Block State Change Logging Threshold",
"name": "namenode_blockstatechange_log_threshold",
"value": "INFO"
},
{
"desc": "Indicate whether or not to avoid writing to stale DataNodes for which heartbeat messages have not been received by the NameNode for more than Stale DataNode Time Interval. Writes avoid using stale DataNodes unless more than a configured ratio (dfs.namenode.write.stale.datanode.ratio) of DataNodes are marked as stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting for reads.",
"display_name": "Avoid Writing Stale DataNode",
"name": "dfs_namenode_avoid_write_stale_datanode",
"value": "false"
},
{
"desc": "The health test thresholds of failed status directories in a NameNode.",
"display_name": "NameNode Directory Failures Thresholds",
"name": "namenode_directory_failures_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_name_node_unexpected_exits",
"value": "false"
},
{
"desc": "Number of minutes between trash checkpoints. Also controls the number of minutes after which a trash checkpoint directory is deleted. To disable the trash feature, enter 0.",
"display_name": "Filesystem Trash Interval",
"name": "fs_trash_interval",
"value": "1440"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "NameNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "namenode_config_safety_valve",
"value": null
},
{
"desc": "This determines the total amount of block transfers to begin in parallel at a DataNode for replication, when such a command list is being sent over a DataNode heartbeat by the NameNode. The actual number is obtained by multiplying this value by the total number of live nodes in the cluster. The result number is the number of blocks to transfer immediately, per DataNode heartbeat.",
"display_name": "Replication Work Multiplier Per Iteration",
"name": "dfs_namenode_replication_work_multiplier_per_iteration",
"value": "10"
},
{
"desc": "The maximum number of rolled log files to keep for NameNode logs. Typically used by log4j or logback.",
"display_name": "NameNode Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Determines extension of safemode in milliseconds after the threshold level is reached.",
"display_name": "Safemode Extension",
"name": "dfs_safemode_extension",
"value": "30000"
},
{
"desc": "Timeout in seconds for the Hue Thrift server running on the NameNode",
"display_name": "Hue Thrift Server Timeout",
"name": "dfs_thrift_timeout",
"value": "60"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The number of server threads for the NameNode used for service calls. Only used when NameNode Service RPC Port is configured.",
"display_name": "NameNode Service Handler Count",
"name": "dfs_namenode_service_handler_count",
"value": "30"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Advanced Configuration Snippet (Safety Valve) for dfs_hosts_allow.txt parameter.",
"display_name": "Suppress Parameter Validation: NameNode Advanced Configuration Snippet (Safety Valve) for dfs_hosts_allow.txt",
"name": "role_config_suppression_namenode_hosts_allow_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Data Directories parameter.",
"display_name": "Suppress Parameter Validation: NameNode Data Directories",
"name": "role_config_suppression_dfs_name_dir_list",
"value": "false"
},
{
"desc": "The number of transactions after which the NameNode or SecondaryNameNode will create a checkpoint of the namespace, regardless of whether the checkpoint period has expired.",
"display_name": "Filesystem Checkpoint Transaction Threshold",
"name": "fs_checkpoint_txns",
"value": "1000000"
},
{
"desc": "When computing the overall NameNode health, consider the host's health.",
"display_name": "NameNode Host Health Test",
"name": "namenode_host_health_enabled",
"value": "true"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_name_node_scm_health",
"value": "false"
},
{
"desc": "The base port where the secure NameNode web UI listens.",
"display_name": "Secure NameNode Web UI Port (TLS/SSL)",
"name": "dfs_https_port",
"value": "50470"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>dfs_hosts_allow.txt</strong> for this role only.",
"display_name": "NameNode Advanced Configuration Snippet (Safety Valve) for dfs_hosts_allow.txt",
"name": "namenode_hosts_allow_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Mount Points parameter.",
"display_name": "Suppress Parameter Validation: Mount Points",
"name": "role_config_suppression_nameservice_mountpoints",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "If enabled, the NameNode binds to the wildcard address (\"0.0.0.0\") on all of its ports.",
"display_name": "Bind NameNode to Wildcard Address",
"name": "namenode_bind_wildcard",
"value": "false"
},
{
"desc": "Comma-separated list of NameNode plug-ins to be activated. If one plug-in cannot be loaded, all the plug-ins are ignored.",
"display_name": "NameNode Plugins",
"name": "dfs_namenode_plugins_list",
"value": ""
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_name_node_host_health",
"value": "false"
},
{
"desc": "Directory on a shared storage device, such as a Quorum-based Storage URI or a local directory that is an NFS mount from a NAS, to store the NameNode edits. The value of this configuration is automatically generated to be the Quourm Journal URI if there are JournalNodes and this NameNode is Highly Available.",
"display_name": "Shared Edits Directory",
"name": "dfs_namenode_shared_edits_dir",
"value": null
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The number of server threads for the NameNode.",
"display_name": "NameNode Handler Count",
"name": "dfs_namenode_handler_count",
"value": "30"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NameNode Log Directory parameter.",
"display_name": "Suppress Parameter Validation: NameNode Log Directory",
"name": "role_config_suppression_namenode_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Name Directory Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Name Directory Status",
"name": "role_health_suppression_name_node_directory_failures",
"value": "false"
},
{
"desc": "Specifies the number of DataNodes that must be live before the name node exits safemode. Enter a value less than or equal to 0 to take the number of live DataNodes into account when deciding whether to remain in safemode during startup. Values greater than the number of DataNodes in the cluster will make safemode permanent.",
"display_name": "Safemode Minimum DataNodes",
"name": "dfs_safemode_min_datanodes",
"value": "0"
}
]

View File

@ -1,386 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NFS Gateway Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: NFS Gateway Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_nfsgateway_role_env_safety_valve",
"value": "false"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_nfsgateway_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NFS Gateway Log Directory parameter.",
"display_name": "Suppress Parameter Validation: NFS Gateway Log Directory",
"name": "role_config_suppression_nfsgateway_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "nfsgateway_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "NFS clients often reorder writes. As a result, sequential writes can arrive at the NFS Gateway in random order. This directory is used to temporarily save out-of-order writes before writing to HDFS. For each file, the out-of-order writes are dumped after they are accumulated to exceed certain threshold (e.g., 1MB) in memory. Please make sure this directory has enough space. For example, if the application uploads 10 files with each having 100MB, it is recommended that this directory have roughly 1GB of space in case write reorder happens (in the worst case) to every file.",
"display_name": "Temporary Dump Directory",
"name": "dfs_nfs3_dump_dir",
"value": "/tmp/.hdfs-nfs"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of NFS Gateway in Bytes",
"name": "nfsgateway_java_heapsize",
"value": "268435456"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "NFS Gateway Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "nfsgateway_config_safety_valve",
"value": null
},
{
"desc": "The port number of the system portmap or rpcbind service. This configuration is used by Cloudera Manager to verify if the system portmap or rpcbind service is running before starting NFS Gateway role. Cloudera Manager does not manage the system portmap or rpcbind service.",
"display_name": "Portmap (or Rpcbind) Port",
"name": "nfs3_portmap_port",
"value": "111"
},
{
"desc": "The NFS Gateway server port.",
"display_name": "NFS Gateway Server Port",
"name": "nfs3_server_port",
"value": "2049"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for NFS Gateway",
"name": "nfsgateway_java_opts",
"value": ""
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NFS Gateway Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: NFS Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_nfsgateway_swap_memory_usage",
"value": "false"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "When computing the overall NFS Gateway health, consider the host's health.",
"display_name": "NFS Gateway Host Health Test",
"name": "nfsgateway_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Temporary Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Temporary Dump Directory",
"name": "role_config_suppression_dfs_nfs3_dump_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "By default, NFS Gateway exported directories can be mounted by any client. For better access control, update this property with a list of host names and access privileges separated by whitespace characters. Host name format can be a single host, a Java regular expression, or an IPv4 address. The access privilege uses <strong>rw</strong> to specify readwrite and <strong>ro</strong> to specify readonly access. If the access privilege is not provided, the default is read-only. Examples of host name format and access privilege: \"192.168.0.0/22 rw\", \"host.*.example.com\", \"host1.test.org ro\".",
"display_name": "Allowed Hosts and Privileges",
"name": "dfs_nfs_exports_allowed_hosts",
"value": "* rw"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The port number of the mount daemon implemented inside the NFS Gateway server role.",
"display_name": "NFS Gateway MountD Port",
"name": "nfs3_mountd_port",
"value": "4242"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "NFS Gateway Environment Advanced Configuration Snippet (Safety Valve)",
"name": "NFSGATEWAY_role_env_safety_valve",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "NFS Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_nfsgateway_host_health",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_nfsgateway_log_directory_free_space",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for NFS Gateway logs. Typically used by log4j or logback.",
"display_name": "NFS Gateway Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_nfsgateway_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Allowed Hosts and Privileges parameter.",
"display_name": "Suppress Parameter Validation: Allowed Hosts and Privileges",
"name": "role_config_suppression_dfs_nfs_exports_allowed_hosts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_nfsgateway_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Temporary Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Temporary Dump Directory Free Space",
"name": "role_health_suppression_nfsgateway_dump_directory_free_space",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "The maximum number of rolled log files to keep for NFS Gateway logs. Typically used by log4j or logback.",
"display_name": "NFS Gateway Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Temporary Dump Directory.",
"display_name": "Temporary Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "nfsgateway_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for NFS Gateway parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for NFS Gateway",
"name": "role_config_suppression_nfsgateway_java_opts",
"value": "false"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_nfsgateway_unexpected_exits",
"value": "false"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The minimum log level for NFS Gateway logs",
"display_name": "NFS Gateway Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NFS Gateway Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: NFS Gateway Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_nfsgateway_config_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Temporary Dump Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Temporary Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Temporary Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "nfsgateway_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Directory where NFS Gateway will place its log files.",
"display_name": "NFS Gateway Log Directory",
"name": "nfsgateway_log_dir",
"value": "/var/log/hadoop-hdfs"
},
{
"desc": "Enables the health test that the NFS Gateway's process state is consistent with the role configuration",
"display_name": "NFS Gateway Process Health Test",
"name": "nfsgateway_scm_health_enabled",
"value": "true"
}
]

View File

@ -1,446 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.IOException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketClosedException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.EOFException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.nio.channels.CancelledKeyException\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Unknown job [^ ]+ being deleted.*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Error executing shell command .+ No such process.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\".*attempt to override final parameter.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"[^ ]+ is a deprecated filesystem name. Use.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"},\n {\"alert\": false, \"rate\": 1, \"threshold\":\"INFO\", \"content\":\"Triggering checkpoint.*\"}\n ]\n}\n"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_secondary_name_node_web_metric_collection",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Secondary NameNode",
"name": "secondarynamenode_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's HDFS Checkpoint Directories.",
"display_name": "HDFS Checkpoint Directories Free Space Monitoring Absolute Thresholds",
"name": "secondarynamenode_checkpoint_directories_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Secondary NameNode in Bytes",
"name": "secondary_namenode_java_heapsize",
"value": "1073741824"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SecondaryNameNode Nameservice parameter.",
"display_name": "Suppress Parameter Validation: SecondaryNameNode Nameservice",
"name": "role_config_suppression_dfs_secondarynamenode_nameservice",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_secondary_name_node_scm_health",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "secondarynamenode_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SecondaryNameNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: SecondaryNameNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "role_config_suppression_secondarynamenode_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_secondary_name_node_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SecondaryNameNode Log Directory parameter.",
"display_name": "Suppress Parameter Validation: SecondaryNameNode Log Directory",
"name": "role_config_suppression_secondarynamenode_log_dir",
"value": "false"
},
{
"desc": "Enables the health test that the SecondaryNameNode's process state is consistent with the role configuration",
"display_name": "SecondaryNameNode Process Health Test",
"name": "secondarynamenode_scm_health_enabled",
"value": "true"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SecondaryNameNode Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: SecondaryNameNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "The number of transactions after which the NameNode or SecondaryNameNode will create a checkpoint of the namespace, regardless of whether the checkpoint period has expired.",
"display_name": "Filesystem Checkpoint Transaction Threshold",
"name": "fs_checkpoint_txns",
"value": "1000000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Checkpoint Directories parameter.",
"display_name": "Suppress Parameter Validation: HDFS Checkpoint Directories",
"name": "role_config_suppression_fs_checkpoint_dir_list",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "secondarynamenode_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "SecondaryNameNode Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "secondarynamenode_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Secondary NameNode parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Secondary NameNode",
"name": "role_config_suppression_secondarynamenode_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_secondary_name_node_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_secondary_name_node_gc_duration",
"value": "false"
},
{
"desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into <strong>hadoop-metrics2.properties</strong>.",
"display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "hadoop_metrics2_safety_valve",
"value": null
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "SecondaryNameNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "SECONDARYNAMENODE_role_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_secondary_name_node_unexpected_exits",
"value": "false"
},
{
"desc": "If enabled, the SecondaryNameNode binds to the wildcard address (\"0.0.0.0\") on all of its ports.",
"display_name": "Bind SecondaryNameNode to Wildcard Address",
"name": "secondary_namenode_bind_wildcard",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "SecondaryNameNode Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The time between two periodic file system checkpoints.",
"display_name": "Filesystem Checkpoint Period",
"name": "fs_checkpoint_period",
"value": "3600"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SecondaryNameNode Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: SecondaryNameNode Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_secondarynamenode_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the HDFS Checkpoint Directories Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: HDFS Checkpoint Directories Free Space",
"name": "role_health_suppression_secondary_name_node_checkpoint_directories_free_space",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_secondary_name_node_host_health",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "secondarynamenode_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's HDFS Checkpoint Directories. Specified as a percentage of the capacity on that filesystem. This setting is not used if a HDFS Checkpoint Directories Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "HDFS Checkpoint Directories Free Space Monitoring Percentage Thresholds",
"name": "secondarynamenode_checkpoint_directories_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "When computing the overall SecondaryNameNode health, consider the host's health.",
"display_name": "SecondaryNameNode Host Health Test",
"name": "secondarynamenode_host_health_enabled",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for SecondaryNameNode logs. Typically used by log4j or logback.",
"display_name": "SecondaryNameNode Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_secondary_name_node_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "secondarynamenode_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hadoop_metrics2_safety_valve",
"value": "false"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "secondarynamenode_gc_duration_window",
"value": "5"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_secondary_name_node_log_directory_free_space",
"value": "false"
},
{
"desc": "Determines where on the local file system the DFS SecondaryNameNode should store the temporary images to merge. For redundancy, enter a comma-delimited list of directories to replicate the image in all of the directories. Typical values are /data/N/dfs/snn for N = 1, 2, 3...",
"display_name": "HDFS Checkpoint Directories",
"name": "fs_checkpoint_dir_list",
"value": null
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "The maximum number of rolled log files to keep for SecondaryNameNode logs. Typically used by log4j or logback.",
"display_name": "SecondaryNameNode Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Nameservice of this SecondaryNameNode",
"display_name": "SecondaryNameNode Nameservice",
"name": "dfs_secondarynamenode_nameservice",
"value": null
},
{
"desc": "The minimum log level for SecondaryNameNode logs",
"display_name": "SecondaryNameNode Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The base port where the secure SecondaryNameNode web UI listens.",
"display_name": "Secure SecondaryNameNode Web UI Port (TLS/SSL)",
"name": "dfs_secondary_https_port",
"value": "50495"
},
{
"desc": "Directory where SecondaryNameNode will place its log files.",
"display_name": "SecondaryNameNode Log Directory",
"name": "secondarynamenode_log_dir",
"value": "/var/log/hadoop-hdfs"
},
{
"desc": "The SecondaryNameNode HTTP port. If the port is 0, then the server starts on a free port. Combined with the SecondaryNameNode's hostname to build its HTTP address.",
"display_name": "SecondaryNameNode Web UI Port",
"name": "dfs_secondary_http_port",
"value": "50090"
}
]

File diff suppressed because it is too large Load Diff

View File

@ -1,98 +0,0 @@
[
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>hive-site.xml</strong>.",
"display_name": "Hive Client Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "hive_client_config_safety_valve",
"value": null
},
{
"desc": "The directory where the client configs will be deployed",
"display_name": "Deploy Directory",
"name": "client_config_root_dir",
"value": "/etc/hive"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Gateway Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Client Java Configuration Options parameter.",
"display_name": "Suppress Parameter Validation: Client Java Configuration Options",
"name": "role_config_suppression_hive_client_java_opts",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into the client configuration for <strong>hive-env.sh</strong>",
"display_name": "Gateway Client Environment Advanced Configuration Snippet (Safety Valve) for hive-env.sh",
"name": "hive_client_env_safety_valve",
"value": null
},
{
"desc": "Timeout for requests to the Hive Metastore Server. Consider increasing this if you have tables with a lot of metadata and see timeout errors. Used by most Hive Metastore clients such as Hive CLI and HiveServer2, but not by Impala. Impala has a separately configured timeout.",
"display_name": "Hive Metastore Connection Timeout",
"name": "hive_metastore_timeout",
"value": "300"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Deploy Directory parameter.",
"display_name": "Suppress Parameter Validation: Deploy Directory",
"name": "role_config_suppression_client_config_root_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Client Advanced Configuration Snippet (Safety Valve) for hive-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Hive Client Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "role_config_suppression_hive_client_config_safety_valve",
"value": "false"
},
{
"desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.",
"display_name": "Alternatives Priority",
"name": "client_config_priority",
"value": "90"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java process heap memory. Passed to Java -Xmx.",
"display_name": "Client Java Heap Size in Bytes",
"name": "hive_client_java_heapsize",
"value": "2147483648"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Gateway Client Environment Advanced Configuration Snippet (Safety Valve) for hive-env.sh parameter.",
"display_name": "Suppress Parameter Validation: Gateway Client Environment Advanced Configuration Snippet (Safety Valve) for hive-env.sh",
"name": "role_config_suppression_hive_client_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "The minimum log level for Gateway logs",
"display_name": "Gateway Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "These are Java command line arguments. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Client Java Configuration Options",
"name": "hive_client_java_opts",
"value": "-XX:MaxPermSize=512M -Djava.net.preferIPv4Stack=true"
}
]

View File

@ -1,392 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Hive Metastore Server",
"name": "hive_metastore_java_opts",
"value": "-XX:MaxPermSize=512M -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Hive Metastore Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Hive Metastore Server",
"name": "role_config_suppression_hive_metastore_java_opts",
"value": "false"
},
{
"desc": "The period to review when computing the moving average of extra time the pause monitor spent paused.",
"display_name": "Pause Duration Monitoring Period",
"name": "hivemetastore_pause_duration_window",
"value": "5"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hivemetastore_host_health",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hivemetastore_log_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Maximum number of worker threads in the Hive Metastore Server's thread pool",
"display_name": "Max Hive Metastore Server Threads",
"name": "hive_metastore_max_threads",
"value": "100000"
},
{
"desc": "Enables the health test that checks that basic Hive Metastore operations succeed",
"display_name": "Hive Metastore Canary Health Test",
"name": "metastore_canary_health_enabled",
"value": "true"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hivemetastore_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Metrics Sample File Location parameter.",
"display_name": "Suppress Parameter Validation: Metrics Sample File Location",
"name": "role_config_suppression_hive_metrics_sample_file_location",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Minimum number of worker threads in the Hive Metastore Server's thread pool",
"display_name": "Min Hive Metastore Server Threads",
"name": "hive_metastore_min_threads",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hive_metastore_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hivemetastore_swap_memory_usage",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hivemetastore_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "hivemetastore_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Server Log Directory",
"name": "role_config_suppression_hive_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Controls whether the Hive metrics subsystem is enabled for the role.",
"display_name": "Enable Metrics Subsystem",
"name": "hive_metrics_enabled",
"value": "true"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "When computing the overall Hive Metastore Server health, consider the host's health.",
"display_name": "Hive Metastore Server Host Health Test",
"name": "hivemetastore_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average extra time the pause monitor spent paused. Specified as a percentage of elapsed wall clock time.",
"display_name": "Pause Duration Thresholds",
"name": "hivemetastore_pause_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Hive Metastore Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Hive Metastore Server in Bytes",
"name": "hive_metastore_java_heapsize",
"value": "8589934592"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hivemetastore_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Port on which Hive Metastore Server will listen for connections.",
"display_name": "Hive Metastore Server Port",
"name": "hive_metastore_port",
"value": "9083"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress the results of the Pause Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Pause Duration",
"name": "role_health_suppression_hivemetastore_pause_duration",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for Hive Metastore Server logs. Typically used by log4j or logback.",
"display_name": "Hive Metastore Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hive-site.xml</strong> for this role only.",
"display_name": "Hive Metastore Server Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "hive_metastore_config_safety_valve",
"value": null
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Hive Metastore Canary heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Hive Metastore Canary",
"name": "role_health_suppression_hivemetastore_canary_health",
"value": "false"
},
{
"desc": "Enables the health test that the Hive Metastore Server's process state is consistent with the role configuration",
"display_name": "Hive Metastore Server Process Health Test",
"name": "hivemetastore_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hivemetastore_unexpected_exits",
"value": "false"
},
{
"desc": "The frequency at which the metrics are logged to the sample file. The setting only has an effect if \"Enable Metrics Subsystem\" is set to true.",
"display_name": "Metrics Sample File Logging Frequency",
"name": "hive_metrics_sample_logging_frequency",
"value": "30000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Server Advanced Configuration Snippet (Safety Valve) for hive-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Server Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "role_config_suppression_hive_metastore_config_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "The maximum number of rolled log files to keep for Hive Metastore Server logs. Typically used by log4j or logback.",
"display_name": "Hive Metastore Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "The full path to a file with a sample of metrics exposed by the role. The sample is updated at the frequency configured by Metrics Sample File Logging Frequency. By default, the sample file is logged to a directory under the role log directory, e.g., /var/log/hive/metrics-hivemetastore/metrics.log. The setting only has an effect if \"Enable Metrics Subsystem\" is set to true.",
"display_name": "Metrics Sample File Location",
"name": "hive_metrics_sample_file_location",
"value": null
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Directory where Hive Metastore Server will place its log files.",
"display_name": "Hive Metastore Server Log Directory",
"name": "hive_log_dir",
"value": "/var/log/hive"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The delegation token store implementation class. Use DBTokenStore for Highly Available Metastore Configuration.",
"display_name": "Hive Metastore Delegation Token Store",
"name": "hive_metastore_delegation_token_store",
"value": "org.apache.hadoop.hive.thrift.MemoryTokenStore"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The minimum log level for Hive Metastore Server logs",
"display_name": "Hive Metastore Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Hive Metastore Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "hive_metastore_env_safety_valve",
"value": null
}
]

View File

@ -1,686 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Hive Downloaded Resources Directory.",
"display_name": "Hive Downloaded Resources Directory Free Space Monitoring Absolute Thresholds",
"name": "hiveserver2_downloaded_resources_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Local directory where Hive stores jars downloaded for remote file systems (HDFS). If not specified, Hive uses a default location.",
"display_name": "Hive Downloaded Resources Directory",
"name": "hiveserver2_downloaded_resources_dir",
"value": null
},
{
"desc": "The period to review when computing the moving average of extra time the pause monitor spent paused.",
"display_name": "Pause Duration Monitoring Period",
"name": "hiveserver2_pause_duration_window",
"value": "5"
},
{
"desc": "Minimum number of worker threads in HiveServer2's thread pool",
"display_name": "Min HiveServer2 Threads",
"name": "hiveserver2_min_threads",
"value": "5"
},
{
"desc": "When enabled, Spark will add and remove executors dynamically to Hive jobs. This is done based on the workload.",
"display_name": "Enable Dynamic Executor Allocation",
"name": "hiveserver2_spark_dynamic_allocation_enabled",
"value": "true"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "The check interval for session/operation timeout, in milliseconds, which can be disabled by setting to zero or a negative value. ",
"display_name": "Session Check Interval",
"name": "hiveserver2_session_check_interval",
"value": "3600000"
},
{
"desc": "The maximum percentage of heap to be used for hash in ReduceSink operator for Top-K selection. 0 means the optimization is disabled. Accepted values are between 0 and 1.",
"display_name": "Maximum ReduceSink Top-K Memory Usage",
"name": "hiveserver2_limit_pushdown_memory_usage",
"value": "0.1"
},
{
"desc": "Whether to try sorted merge bucket (SMB) join.",
"display_name": "Hive Optimize Sorted Merge Bucket Join",
"name": "hiveserver2_optimize_bucketmapjoin_sortedmerge",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Number of Spark executors assigned to each application. This should not be set when Dynamic Executor Allocation is enabled.",
"display_name": "Spark Executors Per Application",
"name": "hiveserver2_spark_executor_instances",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "HiveServer2 Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 Advanced Configuration Snippet (Safety Valve) for hive-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "role_config_suppression_hive_hs2_config_safety_valve",
"value": "false"
},
{
"desc": "Some select queries can be converted to a single FETCH task instead of a MapReduce task, minimizing latency. A value of none disables all conversion, minimal converts simple queries such as SELECT * and filter on partition columns, and more converts SELECT queries including FILTERS.",
"display_name": "Fetch Task Query Conversion",
"name": "hiveserver2_fetch_task_conversion",
"value": "minimal"
},
{
"desc": "Portion of total memory used in map-side partial aggregation. When exceeded, the partially aggregated results will be flushed from the map task to the reducers.",
"display_name": "Ratio of Memory Usage for Map-Side Aggregation",
"name": "hiveserver2_map_aggr_hash_memory_ratio",
"value": "0.5"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hiveserver2_host_health",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Upper bound on the number of executors used by the application at any given time. This is used by dynamic executor allocation.",
"display_name": "Upper Bound on Number of Executors",
"name": "hiveserver2_spark_dynamic_allocation_max_executors",
"value": "2147483647"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of HiveServer2 in Bytes",
"name": "hiveserver2_java_heapsize",
"value": "4294967296"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "When the number of ReduceSink operators after merging is less than this number, the ReduceDeDuplication optimization will be disabled.",
"display_name": "Mininum Reducers for ReduceDeDuplication Optimization",
"name": "hiveserver2_optimize_reducededuplication_min_reducer",
"value": "4"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hiveserver2_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The desired file size after merging. This should be larger than hive.merge.smallfiles.avgsize.",
"display_name": "Desired File Size After Merging",
"name": "hiveserver2_merge_size_per_task",
"value": "268435456"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hiveserver2_log_directory_free_space",
"value": "false"
},
{
"desc": "HiveServer2 will impersonate the beeline client user when talking to other services such as MapReduce and HDFS.",
"display_name": "HiveServer2 Enable Impersonation",
"name": "hiveserver2_enable_impersonation",
"value": "true"
},
{
"desc": "Enable optimization that checks if a query can be answered using statistics. If so, answers the query using only statistics stored in metastore.",
"display_name": "Enable Stats Optimization",
"name": "hiveserver2_compute_query_using_stats",
"value": "true"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Metrics Sample File Location parameter.",
"display_name": "Suppress Parameter Validation: Metrics Sample File Location",
"name": "role_config_suppression_hive_metrics_sample_file_location",
"value": "false"
},
{
"desc": "Enable optimization that converts common join into MapJoin based on input file size.",
"display_name": "Enable MapJoin Optimization",
"name": "hiveserver2_enable_mapjoin",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hiveserver2_swap_memory_usage",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Hive Local Scratch Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Hive Local Scratch Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Hive Local Scratch Directory Free Space Monitoring Percentage Thresholds",
"name": "hiveserver2_exec_local_scratch_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "In vectorized group-by, the number of row entries added to the hash table before re-checking average variable size for memory usage estimation.",
"display_name": "Vectorized GroupBy Check Interval",
"name": "hiveserver2_vectorized_groupby_checkinterval",
"value": "4096"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Hive Local Scratch Directory.",
"display_name": "Hive Local Scratch Directory Free Space Monitoring Absolute Thresholds",
"name": "hiveserver2_exec_local_scratch_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hive-site.xml</strong> for this role only.",
"display_name": "HiveServer2 Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "hive_hs2_config_safety_valve",
"value": null
},
{
"desc": "Ratio between 0.0 and 1.0 of entries in the vectorized group-by aggregation hash that is flushed when the memory threshold is exceeded.",
"display_name": "Vectorized GroupBy Flush Ratio",
"name": "hiveserver2_vectorized_groupby_flush_ratio",
"value": "0.1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HiveServer2 parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for HiveServer2",
"name": "role_config_suppression_hiveserver2_java_opts",
"value": "false"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "When computing the overall HiveServer2 health, consider the host's health.",
"display_name": "HiveServer2 Host Health Test",
"name": "hiveserver2_host_health_enabled",
"value": "true"
},
{
"desc": "Maximum size of each Spark driver's Java heap memory when Hive is running on Spark.",
"display_name": "Spark Driver Maximum Java Heap Size",
"name": "hiveserver2_spark_driver_memory",
"value": "268435456"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hiveserver2_scm_health",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Local Directory where Hive stores jars and data when performing a MapJoin optimization. If not specified, Hive uses a default location.",
"display_name": "Hive Local Scratch Directory",
"name": "hiveserver2_exec_local_scratchdir",
"value": null
},
{
"desc": "Initial number of executors used by the application at any given time. This is required if the dynamic executor allocation feature is enabled.",
"display_name": "Initial Number of Executors",
"name": "hiveserver2_spark_dynamic_allocation_initial_executors",
"value": "1"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Merge small files at the end of a map-only job. When enabled, a map-only job is created to merge the files in the destination table/partitions.",
"display_name": "Enable Merging Small Files - Map-Only Job",
"name": "hiveserver2_merge_mapfiles",
"value": "true"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Lower bound on the number of executors used by the application at any given time. This is used by dynamic executor allocation.",
"display_name": "Lower Bound on Number of Executors",
"name": "hiveserver2_spark_dynamic_allocation_min_executors",
"value": "1"
},
{
"desc": "The number of rows with the same key value to be cached in memory per SMB-joined table.",
"display_name": "Hive SMB Join Cache Rows",
"name": "hiveserver2_smbjoin_cache_rows",
"value": "10000"
},
{
"desc": "Maximum number of worker threads in HiveServer2's thread pool",
"display_name": "Max HiveServer2 Threads",
"name": "hiveserver2_max_threads",
"value": "100"
},
{
"desc": "The minimum log level for HiveServer2 logs",
"display_name": "HiveServer2 Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "This is the amount of extra off-heap memory that can be requested from YARN, per executor process. This, together with spark.executor.memory, is the total memory that YARN can use to create JVM for an executor process.",
"display_name": "Spark Executor Memory Overhead",
"name": "hiveserver2_spark_yarn_executor_memory_overhead",
"value": "26"
},
{
"desc": "Enable optimization that vectorizes query execution by streamlining operations by processing a block of 1024 rows at a time.",
"display_name": "Enable Vectorization Optimization",
"name": "hiveserver2_vectorized_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hive_hs2_env_safety_valve",
"value": "false"
},
{
"desc": "Number of cores per Spark executor.",
"display_name": "Spark Executor Cores",
"name": "hiveserver2_spark_executor_cores",
"value": "1"
},
{
"desc": "Address of the load balancer used for HiveServer2 roles, specified in host:port format. If port is not specified, the port used by HiveServer2 is used. <b>Note:</b> Changing this property regenerates Kerberos keytabs for all HiveServer2 roles.",
"display_name": "HiveServer2 Load Balancer",
"name": "hiverserver2_load_balancer",
"value": null
},
{
"desc": "The full path to a file with a sample of metrics exposed by the role. The sample is updated at the frequency configured by Metrics Sample File Logging Frequency. By default, the sample file is logged to a directory under the role log directory, e.g., /var/log/hive/metrics-hivemetastore/metrics.log. The setting only has an effect if \"Enable Metrics Subsystem\" is set to true.",
"display_name": "Metrics Sample File Location",
"name": "hive_metrics_sample_file_location",
"value": null
},
{
"desc": "Port on which HiveServer2 will listen for connections.",
"display_name": "HiveServer2 Port",
"name": "hs2_thrift_address_port",
"value": "10000"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for HiveServer2",
"name": "hiveserver2_java_opts",
"value": "-XX:MaxPermSize=512M -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "HiveServer2 Environment Advanced Configuration Snippet (Safety Valve)",
"name": "hive_hs2_env_safety_valve",
"value": null
},
{
"desc": "Maximum size of each Spark executor's Java heap memory when Hive is running on Spark.",
"display_name": "Spark Executor Maximum Java Heap Size",
"name": "hiveserver2_spark_executor_memory",
"value": "268435456"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hiveserver2_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Remove extra map-reduce jobs if the data is already clustered by the same key, eliminating the need to repartition the dataset again.",
"display_name": "Enable ReduceDeDuplication Optimization",
"name": "hiveserver2_optimize_reducededuplication",
"value": "true"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress the results of the Hive Local Scratch Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Hive Local Scratch Directory Free Space",
"name": "role_health_suppression_hiveserver2_exec_local_scratch_directory_free_space",
"value": "false"
},
{
"desc": "Whether to vectorize the reduce side of query execution.",
"display_name": "Enable Reduce-Side Vectorization",
"name": "hiveserver2_vectorized_reduce_enabled",
"value": "false"
},
{
"desc": "Merge small files at the end of a map-reduce job. When enabled, a map-only job is created to merge the files in the destination table/partitions.",
"display_name": "Enable Merging Small Files - Map-Reduce Job",
"name": "hiveserver2_merge_mapredfiles",
"value": "false"
},
{
"desc": "The frequency at which the metrics are logged to the sample file. The setting only has an effect if \"Enable Metrics Subsystem\" is set to true.",
"display_name": "Metrics Sample File Logging Frequency",
"name": "hive_metrics_sample_logging_frequency",
"value": "30000"
},
{
"desc": "Enables the health test that the HiveServer2's process state is consistent with the role configuration",
"display_name": "HiveServer2 Process Health Test",
"name": "hiveserver2_scm_health_enabled",
"value": "true"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Directory where HiveServer2 will place its log files.",
"display_name": "HiveServer2 Log Directory",
"name": "hive_log_dir",
"value": "/var/log/hive"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Operation will be closed when not accessed for this duration of time, in milliseconds; disable by setting to zero. For a positive value, checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR). For a negative value, checked for all of the operations regardless of state.",
"display_name": "Idle Operation Timeout",
"name": "hiveserver2_idle_operation_timeout",
"value": "259200000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Downloaded Resources Directory parameter.",
"display_name": "Suppress Parameter Validation: Hive Downloaded Resources Directory",
"name": "role_config_suppression_hiveserver2_downloaded_resources_dir",
"value": "false"
},
{
"desc": "When dynamic partition is enabled, reducers keep only one record writer at all times, which lowers the memory pressure on reducers.",
"display_name": "Enable Sorted Dynamic Partition Optimizer",
"name": "hiveserver2_optimize_sort_dynamic_partition",
"value": "false"
},
{
"desc": "Controls whether the Hive metrics subsystem is enabled for the role.",
"display_name": "Enable Metrics Subsystem",
"name": "hive_metrics_enabled",
"value": "true"
},
{
"desc": "The health test thresholds for the weighted average extra time the pause monitor spent paused. Specified as a percentage of elapsed wall clock time.",
"display_name": "Pause Duration Thresholds",
"name": "hiveserver2_pause_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hiveserver2_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for HiveServer2 logs. Typically used by log4j or logback.",
"display_name": "HiveServer2 Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "If Hive auto convert join is on, and the sum of the size for n-1 of the tables/partitions for a n-way join is smaller than the specified size, the join is directly converted to a MapJoin (there is no conditional task).",
"display_name": "Hive Auto Convert Join Noconditional Size",
"name": "hiveserver2_auto_convert_join_noconditionaltask_size",
"value": "20971520"
},
{
"desc": "Whether to suppress the results of the Pause Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Pause Duration",
"name": "role_health_suppression_hiveserver2_pause_duration",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 Log Directory parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 Log Directory",
"name": "role_config_suppression_hive_log_dir",
"value": "false"
},
{
"desc": "When enabled, HiveServer2 logs EXPLAIN EXTENDED output for every query at INFO log4j level.",
"display_name": "Enable Explain Logging",
"name": "hiveserver2_enable_explain_output",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Hive Downloaded Resources Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Hive Downloaded Resources Directory Free Space",
"name": "role_health_suppression_hiveserver2_downloaded_resources_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Directory in HDFS where Hive writes intermediate data between MapReduce jobs. If not specified, Hive uses a default location.",
"display_name": "Hive HDFS Scratch Directory",
"name": "hiveserver2_exec_scratchdir",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 Load Balancer parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 Load Balancer",
"name": "role_config_suppression_hiverserver2_load_balancer",
"value": "false"
},
{
"desc": "Session will be closed when not accessed for this duration of time, in milliseconds; disable by setting to zero or a negative value.",
"display_name": "Idle Session Timeout",
"name": "hiveserver2_idle_session_timeout",
"value": "604800000"
},
{
"desc": "Merge small files at the end of a Spark job. When enabled, a map-only job is created to merge the files in the destination table/partitions.",
"display_name": "Enable Merging Small Files - Spark Job",
"name": "hiveserver2_merge_sparkfiles",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for HiveServer2 logs. Typically used by log4j or logback.",
"display_name": "HiveServer2 Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "hiveserver2_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Enable map-side partial aggregation, which cause the mapper to generate fewer rows. This reduces the data to be sorted and distributed to reducers.",
"display_name": "Enable Map-Side Aggregation",
"name": "hiveserver2_map_aggr",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Local Scratch Directory parameter.",
"display_name": "Suppress Parameter Validation: Hive Local Scratch Directory",
"name": "role_config_suppression_hiveserver2_exec_local_scratchdir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive HDFS Scratch Directory parameter.",
"display_name": "Suppress Parameter Validation: Hive HDFS Scratch Directory",
"name": "role_config_suppression_hiveserver2_exec_scratchdir",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Hive Downloaded Resources Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Hive Downloaded Resources Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Hive Downloaded Resources Directory Free Space Monitoring Percentage Thresholds",
"name": "hiveserver2_downloaded_resources_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "When the average output file size of a job is less than the value of this property, Hive will start an additional map-only job to merge the output files into bigger files. This is only done for map-only jobs if hive.merge.mapfiles is true, for map-reduce jobs if hive.merge.mapredfiles is true, and for Spark jobs if hive.merge.sparkfiles is true.",
"display_name": "Small File Average Size Merge Threshold",
"name": "hiveserver2_merge_smallfiles_avgsize",
"value": "16777216"
},
{
"desc": "Above this size, queries are converted to fetch tasks.",
"display_name": "Fetch Task Query Conversion Threshold",
"name": "hiveserver2_fetch_task_conversion_threshold",
"value": "268435456"
},
{
"desc": "This is the amount of extra off-heap memory that can be requested from YARN, per driver. This, together with spark.driver.memory, is the total memory that YARN can use to create JVM for a driver process.",
"display_name": "Spark Driver Memory Overhead",
"name": "hiveserver2_spark_yarn_driver_memory_overhead",
"value": "26"
},
{
"desc": "Enabled the Calcite-based Cost-Based Optimizer for HiveServer2.",
"display_name": "Enable Cost-Based Optimizer for Hive",
"name": "hiveserver2_enable_cbo",
"value": "false"
}
]

View File

@ -1,15 +0,0 @@
<property>
<name>hive.metastore.client.impl</name>
<value>org.apache.sentry.binding.metastore.SentryHiveMetaStoreClient</value>
<description>Sets custom Hive metastore client which Sentry uses to filter out metadata.</description>
</property>
<property>
<name>hive.metastore.pre.event.listeners</name>
<value>org.apache.sentry.binding.metastore.MetastoreAuthzBinding</value>
<description>list of comma separated listeners for metastore events.</description>
</property>
<property>
<name>hive.metastore.event.listeners</name>
<value>org.apache.sentry.binding.metastore.SentryMetastorePostEventListener</value>
<description>list of comma separated listeners for metastore, post events.</description>
</property>

View File

@ -1,12 +0,0 @@
<property>
<name>hive.security.authorization.task.factory</name>
<value>org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl</value>
</property>
<property>
<name>hive.server2.session.hook</name>
<value>org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook</value>
</property>
<property>
<name>hive.sentry.conf.url</name>
<value>file:///{{CMF_CONF_DIR}}/sentry-site.xml</value>
</property>

View File

@ -1,704 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties parameter.",
"display_name": "Suppress Parameter Validation: Hive Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "service_config_suppression_navigator_client_config_safety_valve",
"value": "false"
},
{
"desc": "Maximum number of rolled-over audit logs to retain. The logs are not deleted if they contain audit events that have not yet been propagated to the Audit Server.",
"display_name": "Number of Audit Logs to Retain",
"name": "navigator_client_max_num_audit_log",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hive Sentry Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hive Sentry Validator",
"name": "service_config_suppression_hive_sentry_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the HiveServer2 Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: HiveServer2 Count Validator",
"name": "service_config_suppression_hiveserver2_count_validator",
"value": "false"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "hive"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that HiveServer2 might connect to. This is used when HiveServer2 is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "HiveServer2 TLS/SSL Certificate Trust Store File",
"name": "hiveserver2_truststore_file",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Database Name parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Database Name",
"name": "service_config_suppression_hive_metastore_database_name",
"value": "false"
},
{
"desc": "Port number of Hive Metastore database",
"display_name": "Hive Metastore Database Port",
"name": "hive_metastore_database_port",
"value": "3306"
},
{
"desc": "Smaller than this size, Hive uses a single-threaded copy; larger than this size, Hive uses DistCp.",
"display_name": "Hive Copy Large File Size",
"name": "hive_exec_copyfile_maxsize",
"value": "33554432"
},
{
"desc": "Path to the directory where audit logs will be written. The directory will be created if it doesn't exist.",
"display_name": "Audit Log Directory",
"name": "audit_event_log_dir",
"value": "/var/log/hive/audit"
},
{
"desc": "Name of Hive Metastore database",
"display_name": "Hive Metastore Database Name",
"name": "hive_metastore_database_name",
"value": "metastore"
},
{
"desc": "The class to use in Sentry authorization for user to group mapping. Sentry authorization may be configured to use either Hadoop user to group mapping or local groups defined in the policy file. Hadoop user to group mapping may be configured in the Cloudera Manager HDFS service configuration page under the Security section.",
"display_name": "Sentry User to Group Mapping Class",
"name": "hive_sentry_provider",
"value": "org.apache.sentry.provider.file.HadoopGroupResourceAuthorizationProvider"
},
{
"desc": "Use Sentry to enable role-based, fine-grained authorization. This configuration enables Sentry using policy files. To enable Sentry using the Sentry service instead, add the Sentry service as a dependency to the Hive service. <strong>The Sentry service provides concurrent and secure access to authorization policy metadata and is the recommended option for enabling Sentry. </strong> Sentry is supported only on CDH 4.4 or later deployments. Before enabling Sentry, read the requirements and configuration steps in <a class=\"bold\" href=\"http://tiny.cloudera.com/sentry-guide-cm5\" target=\"_blank\">Setting Up Hive Authorization with Sentry<i class=\"externalLink\"></i></a>.",
"display_name": "Enable Sentry Authorization using Policy Files",
"name": "sentry_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hive Bypass Metastore Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hive Bypass Metastore Validator",
"name": "service_config_suppression_hive_bypass_metastore_validator",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "Name of the Spark on YARN service that this Hive service instance depends on. If selected and Enable Hive on Spark is set to true, Hive jobs can use the Spark execution engine instead of MapReduce2. Requires that Hive depends on YARN. See <a class=\"bold\" href=\"http://tiny.cloudera.com/cm-hive-on-spark-5\" target=\"_blank\">Configuring Hive on Spark<i class=\"externalLink\"></i></a> for more information about Hive on Spark.",
"display_name": "Spark On YARN Service",
"name": "spark_on_yarn_service",
"value": null
},
{
"desc": "Action to take when the audit event queue is full. Drop the event or shutdown the affected process.",
"display_name": "Audit Queue Policy",
"name": "navigator_audit_queue_policy",
"value": "DROP"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hive-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Hive Service Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "hive_service_config_safety_valve",
"value": null
},
{
"desc": "Instead of talking to Hive Metastore Server for Metastore information, Hive clients will talk directly to the Metastore database.",
"display_name": "Bypass Hive Metastore Server",
"name": "hive_bypass_metastore_server",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Database Host parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Database Host",
"name": "service_config_suppression_hive_metastore_database_host",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Tracker parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Tracker",
"name": "service_config_suppression_navigator_event_tracker",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Gateway Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Gateway Count Validator",
"name": "service_config_suppression_gateway_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Hive Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "hive_service_env_safety_valve",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>sentry-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Hive Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "hive_server2_sentry_safety_valve",
"value": null
},
{
"desc": "Size per reducer. If the input size is 10GiB and this is set to 1GiB, Hive will use 10 reducers.",
"display_name": "Hive Bytes Per Reducer",
"name": "hive_bytes_per_reducer",
"value": "67108864"
},
{
"desc": "The directory in which Hive on Spark lineage log files are written.",
"display_name": "Hive on Spark Lineage Log Directory",
"name": "lineage_event_log_dir",
"value": "/var/log/hive/lineage"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 TLS/SSL Certificate Trust Store File",
"name": "service_config_suppression_hiveserver2_truststore_file",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Hive Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "service_config_suppression_hive_server2_sentry_safety_valve",
"value": "false"
},
{
"desc": "Password for Hive Metastore database",
"display_name": "Hive Metastore Database Password",
"name": "hive_metastore_database_password",
"value": ""
},
{
"desc": "The password for the HiveServer2 TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "HiveServer2 TLS/SSL Certificate Trust Store Password",
"name": "hiveserver2_truststore_password",
"value": null
},
{
"desc": "<p>\nEvent filters are defined in a JSON object like the following:\n</p>\n\n<pre>\n{\n \"defaultAction\" : (\"accept\", \"discard\"),\n \"rules\" : [\n {\n \"action\" : (\"accept\", \"discard\"),\n \"fields\" : [\n {\n \"name\" : \"fieldName\",\n \"match\" : \"regex\"\n }\n ]\n }\n ]\n}\n</pre>\n\n<p>\nA filter has a default action and a list of rules, in order of precedence.\nEach rule defines an action, and a list of fields to match against the\naudit event.\n</p>\n\n<p>\nA rule is \"accepted\" if all the listed field entries match the audit\nevent. At that point, the action declared by the rule is taken.\n</p>\n\n<p>\nIf no rules match the event, the default action is taken. Actions\ndefault to \"accept\" if not defined in the JSON object.\n</p>\n\n<p>\nThe following is the list of fields that can be filtered for Hive events:\n</p>\n\n<ul>\n <li>userName: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>operation: the Hive operation being performed.</li> \n <li>databaseName: the databaseName for the operation.</li>\n <li>tableName: the tableName for the operation.</li>\n</ul>\n\n<p>\nThe default Hive audit event filter discards HDFS directory events generated by\nHive jobs that reference the /tmp directory.\n</p>\n",
"display_name": "Audit Event Filter",
"name": "navigator_audit_event_filter",
"value": "{\n \"comment\" : [\n \"The default Hive audit event filter discards HDFS directory events \",\n \"generated by Hive jobs that reference the /tmp directory.\"\n ],\n \"defaultAction\" : \"accept\",\n \"rules\" : [\n {\n \"action\" : \"discard\",\n \"fields\" : [\n { \"name\" : \"operation\", \"match\" : \"QUERY\" },\n { \"name\" : \"objectType\", \"match\" : \"DFS_DIR\"},\n { \"name\" : \"resourcePath\", \"match\" : \"/tmp/hive-(?:.+)?/hive_(?:.+)?/-mr-.*\" }\n ]\n }\n ]\n}\n"
},
{
"desc": "Whether to suppress configuration warnings produced by the WebHCat Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: WebHCat Server Count Validator",
"name": "service_config_suppression_webhcat_count_validator",
"value": "false"
},
{
"desc": "Directory name where Hive Metastore's database is stored (only for Derby)",
"display_name": "Hive Metastore Derby Path",
"name": "hive_metastore_derby_path",
"value": "/var/lib/hive/cloudera_manager/derby/metastore_db"
},
{
"desc": "Perform DataNucleus validation of metadata during startup. <strong>Note</strong>: when enabled, Hive will log DataNucleus warnings even though Hive will function normally.",
"display_name": "Hive Metastore Database DataNucleus Metadata Validation",
"name": "hive_metastore_database_datanucleus_metadata_validation",
"value": "false"
},
{
"desc": "Name of the Sentry service that this Hive service instance depends on. If selected, Hive uses this Sentry service to look up authorization privileges. Before enabling Sentry, read the requirements and configuration steps in <a class=\"bold\" href=\"http://tiny.cloudera.com/sentry-service-cm5\" target=\"_blank\">Setting Up The Sentry Service<i class=\"externalLink\"></i></a>.",
"display_name": "Sentry Service",
"name": "sentry_service",
"value": null
},
{
"desc": "Max number of reducers to use. If the configuration parameter Hive Reduce Tasks is negative, Hive will limit the number of reducers to the value of this parameter.",
"display_name": "Hive Max Reducers",
"name": "hive_max_reducers",
"value": "1099"
},
{
"desc": "Type of Hive Metastore database. Note that Derby is not recommended and Cloudera Impala does not support Derby.",
"display_name": "Hive Metastore Database Type",
"name": "hive_metastore_database_type",
"value": "mysql"
},
{
"desc": "Whether to suppress the results of the HiveServer2 Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: HiveServer2 Health",
"name": "service_health_suppression_hive_hiveserver2s_healthy",
"value": "false"
},
{
"desc": "The URI of the LDAP server to use if LDAP authentication is enabled. The URI must be prefixed with ldap:// or ldaps://. Usernames and passwords are transmitted in the clear unless an \"ldaps://\" URI is specified. The URI can optionally specify the port; for example, ldaps://ldap_server.example.com:636.",
"display_name": "LDAP URI",
"name": "hiveserver2_ldap_uri",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hive Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_hive_service_env_safety_valve",
"value": "false"
},
{
"desc": "Host name of Hive Metastore database",
"display_name": "Hive Metastore Database Host",
"name": "hive_metastore_database_host",
"value": "localhost"
},
{
"desc": "Maximum size of audit log file in MB before it is rolled over.",
"display_name": "Maximum Audit Log File Size",
"name": "navigator_audit_log_max_file_size",
"value": "100"
},
{
"desc": "Prevent Metastore operations in the event of schema version incompatibility. Consider setting this to true to reduce probability of schema corruption during Metastore operations. Note that setting this property to true will also set datanucleus.autoCreateSchema property to false and datanucleus.fixedDatastore property to true. Any values set in Cloudera Manager for these properties will be overridden.",
"display_name": "Strict Hive Metastore Schema Validation",
"name": "hive_metastore_schema_verification",
"value": "true"
},
{
"desc": "This configuration <strong>overrides</strong> the value set for Hive Proxy User Groups configuration in HDFS service for use by Hive Metastore Server. Specify a comma-delimited list of groups that you want to <strong>allow access to Hive Metastore metadata</strong> and allow the Hive user to impersonate. A value of '*' allows all groups. The default value of empty inherits the value set for Hive Proxy User Groups configuration in the HDFS service.",
"display_name": "Hive Metastore Access Control and Proxy User Groups Override",
"name": "hive_proxy_user_groups_list",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Warehouse Directory parameter.",
"display_name": "Suppress Parameter Validation: Hive Warehouse Directory",
"name": "service_config_suppression_hive_warehouse_directory",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>core-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Hive Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "hive_core_site_safety_valve",
"value": null
},
{
"desc": "For advanced use only, a list of configuration properties that will be used by the Service Monitor instead of the current client configuration for the service.",
"display_name": "Service Monitor Client Config Overrides",
"name": "smon_client_config_overrides",
"value": "<property><name>hive.metastore.client.socket.timeout</name><value>60</value></property>"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Bypass Sentry Authorization Users parameter.",
"display_name": "Suppress Parameter Validation: Bypass Sentry Authorization Users",
"name": "service_config_suppression_sentry_metastore_service_users",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 TLS/SSL Certificate Trust Store Password",
"name": "service_config_suppression_hiveserver2_truststore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Auxiliary JARs Directory parameter.",
"display_name": "Suppress Parameter Validation: Hive Auxiliary JARs Directory",
"name": "service_config_suppression_hive_aux_jars_path_dir",
"value": "false"
},
{
"desc": "The health test thresholds of the overall WebHCat Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" WebHCat Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" WebHCat Servers falls below the critical threshold.",
"display_name": "Healthy WebHCat Server Monitoring Thresholds",
"name": "hive_webhcats_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "Enable collection of audit events from the service's roles.",
"display_name": "Enable Audit Collection",
"name": "navigator_audit_enabled",
"value": "true"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hive-site.xml</strong>. Applies to all Hive Replication jobs.",
"display_name": "Hive Replication Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "hive_service_replication_config_safety_valve",
"value": null
},
{
"desc": "When set, this parameter is used to convert the username into the LDAP Distinguished Name (DN), so that the resulting DN looks like uid=username,X. For example, if this parameter is set to \"ou=People,dc=cloudera,dc=com\", and the username passed in is \"mike\", the resulting authentication passed to the LDAP server looks like \"uid=mike,ou=People,dc=cloudera,dc=com\". This parameter is generally most useful when authenticating against an OpenLDAP server. This parameter is mutually exclusive with LDAP Domain.",
"display_name": "LDAP BaseDN",
"name": "hiveserver2_ldap_basedn",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Global Policy File parameter.",
"display_name": "Suppress Parameter Validation: Sentry Global Policy File",
"name": "service_config_suppression_hive_sentry_provider_resource",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Filter parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Filter",
"name": "service_config_suppression_navigator_audit_event_filter",
"value": "false"
},
{
"desc": "Hive warehouse directory is the location in HDFS where Hive's tables are stored. Note that Hive's default value for its warehouse directory is '/user/hive/warehouse'.",
"display_name": "Hive Warehouse Directory",
"name": "hive_warehouse_directory",
"value": "/user/hive/warehouse"
},
{
"desc": "The password for the HiveServer2 JKS keystore file.",
"display_name": "HiveServer2 TLS/SSL Server JKS Keystore File Password",
"name": "hiveserver2_keystore_password",
"value": null
},
{
"desc": "Whether to suppress the results of the WebHCat Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: WebHCat Server Health",
"name": "service_health_suppression_hive_webhcats_healthy",
"value": "false"
},
{
"desc": "When set, this value is appended to all usernames before authenticating with the LDAP server. For example, if this parameter is set to \"my.domain.com\", and the user authenticating is \"mike\", then \"mike@my.domain.com\" is passed to the LDAP server. If this field is unset, the username is unaltered before being passed to the LDAP server. LDAP Domain is most useful when authenticating against an Active Directory server. This parameter is mutually exclusive with LDAP BaseDN.",
"display_name": "LDAP Domain",
"name": "hiveserver2_ldap_domain",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the Hive Metastore Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hive Metastore Server Count Validator",
"name": "service_config_suppression_hivemetastore_count_validator",
"value": "false"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Client Config Overrides parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Client Config Overrides",
"name": "service_config_suppression_smon_client_config_overrides",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Domain parameter.",
"display_name": "Suppress Parameter Validation: LDAP Domain",
"name": "service_config_suppression_hiveserver2_ldap_domain",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Derby Path parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Derby Path",
"name": "service_config_suppression_hive_metastore_derby_path",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hive Proxy Groups Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hive Proxy Groups Validator",
"name": "service_config_suppression_hive_proxy_groups_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 TLS/SSL Server JKS Keystore File Password",
"name": "service_config_suppression_hiveserver2_keystore_password",
"value": "false"
},
{
"desc": "When checked, LDAP-based authentication for users is enabled. This option is incompatible with Kerberos authentication for Hive Server 2 at this time.",
"display_name": "Enable LDAP Authentication",
"name": "hiveserver2_enable_ldap_auth",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Service Advanced Configuration Snippet (Safety Valve) for core-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Hive Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "service_config_suppression_hive_core_site_safety_valve",
"value": "false"
},
{
"desc": "Default number of reduce tasks per job. Usually set to a prime number close to the number of available hosts. Ignored when mapred.job.tracker is \"local\". Hadoop sets this to 1 by default, while Hive uses -1 as the default. When set to -1, Hive will automatically determine an appropriate number of reducers for each job.",
"display_name": "Hive Reduce Tasks",
"name": "hive_reduce_tasks",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Server Name for Sentry Authorization parameter.",
"display_name": "Suppress Parameter Validation: Server Name for Sentry Authorization",
"name": "service_config_suppression_hive_sentry_server",
"value": "false"
},
{
"desc": "The health test thresholds of the overall Hive Metastore Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Hive Metastore Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Hive Metastore Servers falls below the critical threshold.",
"display_name": "Healthy Hive Metastore Server Monitoring Thresholds",
"name": "hive_hivemetastores_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "The health test thresholds of the overall HiveServer2 health. The check returns \"Concerning\" health if the percentage of \"Healthy\" HiveServer2s falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" HiveServer2s falls below the critical threshold.",
"display_name": "Healthy HiveServer2 Monitoring Thresholds",
"name": "hive_hiveserver2s_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "Automatically create or upgrade tables in the Hive Metastore database when needed. Consider setting this to false and managing the schema manually.",
"display_name": "Auto Create and Upgrade Hive Metastore Database Schema",
"name": "hive_metastore_database_auto_create_schema",
"value": "false"
},
{
"desc": "<p>\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n</p>\n\n<p>\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n</p>\n\n<p>Event trackers are defined in a JSON object like the following:</p>\n\n<pre>\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n</pre>\n\n<p>\nWhere:\n</p>\n\n<ul>\n <li>timeToLive: maximum amount of time an event will be tracked, in\n milliseconds. Must be provided. This defines how long, since it's\n first seen, an event will be tracked. A value of 0 disables tracking.</li>\n\n <li>fields: list of fields to compare when matching events against\n tracked events.</li>\n</ul>\n\n<p>\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n</p>\n\n<ul>\n <li>value: uses the field value for comparison.</li>\n\n <li>userName: treats the field value as a userNname, and ignores any\n host-specific data. This is useful for environment using Kerberos,\n so that only the principal name and realm are compared.</li>\n</ul>\n\n<p>\nThe following is the list of fields that can be used to compare Hive events:\n</p>\n\n<ul>\n <li>operation: the Hive operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n <li>databaseName: the database affected by the operation.</li>\n <li>tableName: the table or view affected by the operation.</li>\n <li>objectType: the type of object affected by the operation.</li>\n <li>resourcePath: the path of the resource affected by the operation.</li>\n</ul>\n\n",
"display_name": "Audit Event Tracker",
"name": "navigator_event_tracker",
"value": null
},
{
"desc": "Enable collection of lineage from the service's roles.",
"display_name": "Enable Lineage Collection",
"name": "navigator_lineage_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP URI parameter.",
"display_name": "Suppress Parameter Validation: LDAP URI",
"name": "service_config_suppression_hiveserver2_ldap_uri",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Replication Advanced Configuration Snippet (Safety Valve) for hive-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Hive Replication Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "service_config_suppression_hive_service_replication_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Client Advanced Configuration Snippet (Safety Valve) for navigator.lineage.client.properties parameter.",
"display_name": "Suppress Parameter Validation: Hive Client Advanced Configuration Snippet (Safety Valve) for navigator.lineage.client.properties",
"name": "service_config_suppression_navigator_lineage_client_config_safety_valve",
"value": "false"
},
{
"desc": "Name of the HBase service that this Hive service instance depends on.",
"display_name": "HBase Service",
"name": "hbase_service",
"value": null
},
{
"desc": "Directory containing auxiliary JARs used by Hive. This should be a directory location and not a classpath containing one or more JARs. This directory must be created and managed manually on hosts that run the Hive Metastore Server, HiveServer2, or the Hive CLI. The directory location is set in the environment as HIVE_AUX_JARS_PATH and will generally override the hive.aux.jars.path property set in XML files, even if hive.aux.jars.path is set in an advanced configuration snippet.",
"display_name": "Hive Auxiliary JARs Directory",
"name": "hive_aux_jars_path_dir",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Database User parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Database User",
"name": "service_config_suppression_hive_metastore_database_user",
"value": "false"
},
{
"desc": "HDFS path to the global policy file for Sentry authorization. This should be a relative path (and not a full HDFS URL). The global policy file must be in Sentry policy file format.",
"display_name": "Sentry Global Policy File",
"name": "hive_sentry_provider_resource",
"value": "/user/hive/sentry/sentry-provider.ini"
},
{
"desc": "The server name used when defining privilege rules in Sentry authorization. Sentry uses this name as an alias for the Hive service. It does not correspond to any physical server name.",
"display_name": "Server Name for Sentry Authorization",
"name": "hive_sentry_server",
"value": "server1"
},
{
"desc": "Whether to suppress the results of the Hive Metastore Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Hive Metastore Server Health",
"name": "service_health_suppression_hive_hivemetastores_healthy",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HiveServer2 TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: HiveServer2 TLS/SSL Server JKS Keystore File Location",
"name": "service_config_suppression_hiveserver2_keystore_path",
"value": "false"
},
{
"desc": "Encrypt communication between clients and HiveServer2 using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for HiveServer2",
"name": "hiveserver2_enable_ssl",
"value": "false"
},
{
"desc": "The maximum size, in megabytes, per log file for Hive on Spark lineage logs. Typically used by log4j or logback.",
"display_name": "Hive on Spark Maximum Lineage Log File Size",
"name": "max_lineage_log_file_size",
"value": "100"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP BaseDN parameter.",
"display_name": "Suppress Parameter Validation: LDAP BaseDN",
"name": "service_config_suppression_hiveserver2_ldap_basedn",
"value": "false"
},
{
"desc": "Name of the ZooKeeper service that this Hive service instance depends on.",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "List of users that are allowed to bypass Sentry Authorization in the Hive metastore. These are usually service users that already ensure that all activity has been authorized, such as hive and impala. Only applies when Hive is using Sentry Service.",
"display_name": "Bypass Sentry Authorization Users",
"name": "sentry_metastore_service_users",
"value": "hive,impala,hue,hdfs"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when HiveServer2 is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "HiveServer2 TLS/SSL Server JKS Keystore File Location",
"name": "hiveserver2_keystore_path",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the Hive Derby Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hive Derby Validator",
"name": "service_config_suppression_hive_derby_validator",
"value": "false"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "hive"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "hive"
},
{
"desc": "Let the table directories inherit the permission of the Warehouse or Database directory instead of being created with the permissions derived from dfs umask. This allows Impala to insert into tables created via Hive.",
"display_name": "Hive Warehouse Subdirectories Inherit Permissions",
"name": "hive_warehouse_subdir_inherit_perms",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the Client TLS/SSL In Use With LDAP Authentication Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Client TLS/SSL In Use With LDAP Authentication Validator",
"name": "service_config_suppression_hive_client_ssl_recommended_with_ldap_auth_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Database Password parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Database Password",
"name": "service_config_suppression_hive_metastore_database_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive on Spark Lineage Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Hive on Spark Lineage Log Directory",
"name": "service_config_suppression_lineage_event_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Metastore Access Control and Proxy User Groups Override parameter.",
"display_name": "Suppress Parameter Validation: Hive Metastore Access Control and Proxy User Groups Override",
"name": "service_config_suppression_hive_proxy_user_groups_list",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Audit Log Directory",
"name": "service_config_suppression_audit_event_log_dir",
"value": "false"
},
{
"desc": "User for Hive Metastore database",
"display_name": "Hive Metastore Database User",
"name": "hive_metastore_database_user",
"value": "hive"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hive Concurrency Configuration Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hive Concurrency Configuration Validator",
"name": "service_config_suppression_hive_concurrency_validator",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.lineage.client.properties</strong>.",
"display_name": "Hive Client Advanced Configuration Snippet (Safety Valve) for navigator.lineage.client.properties",
"name": "navigator_lineage_client_config_safety_valve",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.client.properties</strong>.",
"display_name": "Hive Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "navigator_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Allows URIs when defining privileges in per-database policy files. <strong>Warning:</strong> Typically, this configuration should be disabled. Enabling it would allow database policy file owner (which is generally not Hive admin user) to grant load privileges to any directory with read access to Hive admin user, including databases controlled by other database policy files.",
"display_name": "Allow URIs in Database Policy File",
"name": "sentry_allow_uri_db_policyfile",
"value": "false"
},
{
"desc": "In unsecure mode, setting this property to true will cause the Metastore Server to execute DFS operations using the client's reported user and group permissions. Cloudera Manager will set this for all clients and servers.",
"display_name": "Set User and Group Information",
"name": "hive_set_ugi",
"value": "true"
},
{
"desc": "<strong>Cloudera does not support Hive on Spark in CDH 5.4 or CDH 5.5.</strong> Enable Hive to use Spark for execution even though it is not supported. For evaluation purposes only. This configuration only takes effect when Hive is configured with a Spark on YARN Service. See <a class=\"bold\" href=\"http://tiny.cloudera.com/cm-hive-on-spark-5\" target=\"_blank\">Configuring Hive on Spark<i class=\"externalLink\"></i></a> for more information about using Hive on Spark.",
"display_name": "Enable Hive on Spark (Unsupported)",
"name": "enable_hive_on_spark",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hive Service Advanced Configuration Snippet (Safety Valve) for hive-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Hive Service Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "service_config_suppression_hive_service_config_safety_valve",
"value": "false"
},
{
"desc": "MapReduce jobs are run against this service.",
"display_name": "MapReduce Service",
"name": "mapreduce_yarn_service",
"value": null
}
]

View File

@ -1,61 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--Autogenerated by Cloudera Manager-->
<configuration>
<property>
<name>hive.metastore.local</name>
<value>false</value>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://test-master-001.novalocal:9083</value>
</property>
<property>
<name>hive.metastore.client.socket.timeout</name>
<value>300</value>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
<property>
<name>hive.warehouse.subdir.inherit.perms</name>
<value>true</value>
</property>
<property>
<name>mapred.reduce.tasks</name>
<value>-1</value>
</property>
<property>
<name>hive.exec.reducers.bytes.per.reducer</name>
<value>1073741824</value>
</property>
<property>
<name>hive.exec.copyfile.maxsize</name>
<value>33554432</value>
</property>
<property>
<name>hive.exec.reducers.max</name>
<value>999</value>
</property>
<property>
<name>hive.metastore.execute.setugi</name>
<value>true</value>
</property>
<property>
<name>hive.cluster.delegation.token.store.class</name>
<value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>
</property>
<property>
<name>hive.server2.enable.doAs</name>
<value>true</value>
</property>
<property>
<name>fs.hdfs.impl.disable.cache</name>
<value>true</value>
</property>
<property>
<name>hive.server2.use.SSL</name>
<value>false</value>
</property>
</configuration>

View File

@ -1,332 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_webhcat_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_webhcat_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_webhcat_log_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_webhcat_scm_health",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Port on which WebHCat Server will listen for connections.",
"display_name": "WebHCat Server Port",
"name": "hive_webhcat_address_port",
"value": "50111"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the WebHCat Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: WebHCat Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hive_webhcat_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the WebHCat Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: WebHCat Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of WebHCat Server in Bytes",
"name": "hive_webhcat_java_heapsize",
"value": "268435456"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the WebHCat Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: WebHCat Server Log Directory",
"name": "role_config_suppression_hcatalog_log_dir",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hive-site.xml</strong> for this role only.",
"display_name": "WebHCat Server Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "hive_webhcat_hive_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "webhcat_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "WebHCat Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "WebHCat Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "hive_webhcat_env_safety_valve",
"value": null
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for WebHCat Server",
"name": "hive_webhcat_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for WebHCat Server logs. Typically used by log4j or logback.",
"display_name": "WebHCat Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Enables the health test that the WebHCat Server's process state is consistent with the role configuration",
"display_name": "WebHCat Server Process Health Test",
"name": "webhcat_scm_health_enabled",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_webhcat_swap_memory_usage",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>webhcat-site.xml</strong> for this role only.",
"display_name": "WebHCat Server Advanced Configuration Snippet (Safety Valve) for webhcat-site.xml",
"name": "hive_webhcat_config_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the WebHCat Server Advanced Configuration Snippet (Safety Valve) for hive-site.xml parameter.",
"display_name": "Suppress Parameter Validation: WebHCat Server Advanced Configuration Snippet (Safety Valve) for hive-site.xml",
"name": "role_config_suppression_hive_webhcat_hive_config_safety_valve",
"value": "false"
},
{
"desc": "When computing the overall WebHCat Server health, consider the host's health.",
"display_name": "WebHCat Server Host Health Test",
"name": "webhcat_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_webhcat_host_health",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_webhcat_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for WebHCat Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for WebHCat Server",
"name": "role_config_suppression_hive_webhcat_java_opts",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for WebHCat Server logs. Typically used by log4j or logback.",
"display_name": "WebHCat Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the WebHCat Server Advanced Configuration Snippet (Safety Valve) for webhcat-site.xml parameter.",
"display_name": "Suppress Parameter Validation: WebHCat Server Advanced Configuration Snippet (Safety Valve) for webhcat-site.xml",
"name": "role_config_suppression_hive_webhcat_config_safety_valve",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The minimum log level for WebHCat Server logs",
"display_name": "WebHCat Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Directory where WebHCat Server will place its log files.",
"display_name": "WebHCat Server Log Directory",
"name": "hcatalog_log_dir",
"value": "/var/log/hcatalog"
}
]

View File

@ -1,236 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hue_load_balancer_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Load Balancer Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Hue Load Balancer Log Directory",
"name": "role_config_suppression_hue_load_balancer_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hue_load_balancer_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Path to TLS/SSL Private Key File parameter.",
"display_name": "Suppress Parameter Validation: Path to TLS/SSL Private Key File",
"name": "role_config_suppression_ssl_certificate_key",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hue_load_balancer_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hue_load_balancer_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Load Balancer Advanced Configuration Snippet (Safety Valve) for httpd.conf parameter.",
"display_name": "Suppress Parameter Validation: Load Balancer Advanced Configuration Snippet (Safety Valve) for httpd.conf",
"name": "role_config_suppression_hue_load_balancer_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Path to TLS/SSL Certificate File parameter.",
"display_name": "Suppress Parameter Validation: Path to TLS/SSL Certificate File",
"name": "role_config_suppression_ssl_certificate",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Load Balancer Environment Advanced Configuration Snippet (Safety Valve)",
"name": "HUE_LOAD_BALANCER_role_env_safety_valve",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>httpd.conf</strong> for this role only. This can only add options to the configuration, and cannot override previously defined options.",
"display_name": "Load Balancer Advanced Configuration Snippet (Safety Valve) for httpd.conf",
"name": "hue_load_balancer_safety_valve",
"value": null
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "hue_load_balancer_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Port to use to connect to the Hue through the Load Balancer.",
"display_name": "Hue Load Balancer Port",
"name": "listen",
"value": "8889"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hue_load_balancer_host_health",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Load Balancer Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Load Balancer Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hue_load_balancer_role_env_safety_valve",
"value": "false"
},
{
"desc": "Path to the TLS/SSL private key file on the host running the Hue Load Balancer, used to encrypt traffic between the browser and the Load Balancer. This file must be in PEM format, and must be readable by the Hue system user. The Hue Load Balancer only supports a key without a passphrase.",
"display_name": "Path to TLS/SSL Private Key File",
"name": "ssl_certificate_key",
"value": null
},
{
"desc": "Directory where Hue Load Balancer will place its log files.",
"display_name": "Hue Load Balancer Log Directory",
"name": "hue_load_balancer_log_dir",
"value": "/var/log/hue-httpd"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "When computing the overall Load Balancer health, consider the host's health.",
"display_name": "Load Balancer Host Health Test",
"name": "hue_load_balancer_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hue_load_balancer_unexpected_exits",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Path to the TLS/SSL certificate file on the host running the Hue Load Balancer, used to encrypt traffic between the browser and the Load Balancer. This file must be in PEM format, and must be readable by the Hue system user.",
"display_name": "Path to TLS/SSL Certificate File",
"name": "ssl_certificate",
"value": null
},
{
"desc": "Enables the health test that the Load Balancer's process state is consistent with the role configuration",
"display_name": "Load Balancer Process Health Test",
"name": "hue_load_balancer_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hue_load_balancer_swap_memory_usage",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
}
]

View File

@ -1,356 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Top Banner Custom HTML parameter.",
"display_name": "Suppress Parameter Validation: Top Banner Custom HTML",
"name": "role_config_suppression_banner_html",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hue TLS/SSL Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hue TLS/SSL Validator",
"name": "role_config_suppression_hue_ssl_validator",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Path to TLS/SSL Private Key parameter.",
"display_name": "Suppress Parameter Validation: Path to TLS/SSL Private Key",
"name": "role_config_suppression_ssl_private_key",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "hue_server_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "An optional, custom one-line HTML code to display as a banner on top of all Hue Server web pages. Useful in displaying cluster identity of the Hue Server.",
"display_name": "Top Banner Custom HTML",
"name": "banner_html",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the TLS/SSL Private Key Password parameter.",
"display_name": "Suppress Parameter Validation: TLS/SSL Private Key Password",
"name": "role_config_suppression_ssl_private_key_password",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hue_server_unexpected_exits",
"value": "false"
},
{
"desc": "Enable HTTPS for the Hue web server.",
"display_name": "Enable Hue Server HTTPS",
"name": "ssl_enable",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Directory where Hue Server will place its log files.",
"display_name": "Hue Server Log Directory",
"name": "hue_server_log_dir",
"value": "/var/log/hue"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hue_server_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hue Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hue_server_role_env_safety_valve",
"value": "false"
},
{
"desc": "Timeout in seconds for Thrift calls to HiveServer2 and Impala.",
"display_name": "HiveServer2 and Impala Thrift Connection Timeout",
"name": "hs2_conn_timeout",
"value": "120"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Path to TLS/SSL Certificate parameter.",
"display_name": "Suppress Parameter Validation: Path to TLS/SSL Certificate",
"name": "role_config_suppression_ssl_certificate",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_hue_server_web_metric_collection",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Hue Server Log Directory",
"name": "role_config_suppression_hue_server_log_dir",
"value": "false"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hue_server_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hue_server_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hue_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Location on HDFS where the jobsub examples and templates are stored.",
"display_name": "Jobsub Examples and Templates Directory",
"name": "hue_server_remote_data_dir",
"value": "/user/hue/jobsub"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hue_server_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hue_server_host_health",
"value": "false"
},
{
"desc": "When computing the overall Hue Server health, consider the host's health.",
"display_name": "Hue Server Host Health Test",
"name": "hue_server_host_health_enabled",
"value": "true"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Port to use to connect to the Hue server.",
"display_name": "Hue HTTP Port",
"name": "hue_http_port",
"value": "8888"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Jobsub Examples and Templates Directory parameter.",
"display_name": "Suppress Parameter Validation: Jobsub Examples and Templates Directory",
"name": "role_config_suppression_hue_server_remote_data_dir",
"value": "false"
},
{
"desc": "The frequency at which the metrics are logged to the sample file.",
"display_name": "Metrics Sample File Logging Frequency",
"name": "hue_metrics_sample_logging_frequency",
"value": "30000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Path to the TLS/SSL certificate on the host running the Hue web server. This file must be in PEM format, and must be readable by the Hue system user.",
"display_name": "Path to TLS/SSL Certificate",
"name": "ssl_certificate",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Secret Key parameter.",
"display_name": "Suppress Parameter Validation: Secret Key",
"name": "role_config_suppression_secret_key",
"value": "false"
},
{
"desc": "The full path to a file with a sample of metrics exposed by the role. The sample is updated at the frequency configured by Metrics Sample File Logging Frequency. By default, the sample file is logged to a directory under the role log directory, e.g., /var/log/hue/metrics-hue_server/metrics.log.",
"display_name": "Metrics Sample File Location",
"name": "hue_metrics_sample_file_location",
"value": null
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Hue Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "HUE_SERVER_role_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Server Advanced Configuration Snippet (Safety Valve) for hue_safety_valve_server.ini parameter.",
"display_name": "Suppress Parameter Validation: Hue Server Advanced Configuration Snippet (Safety Valve) for hue_safety_valve_server.ini",
"name": "role_config_suppression_hue_server_hue_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "hue_server_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Metrics Sample File Location parameter.",
"display_name": "Suppress Parameter Validation: Metrics Sample File Location",
"name": "role_config_suppression_hue_metrics_sample_file_location",
"value": "false"
},
{
"desc": "Path to the TLS/SSL truststore on the host running the Hue web server. This file must be readable by the Hue system user and in PEM format. The truststore contains certificates of trusted servers, or of Certificate Authorities trusted to identify servers. This file is created by concatenating together, in PEM format, all of the appropriate certificates. If not set, the system's default OpenSSL trusted certificate authority list is used. This option replaces the former REQUESTS_CA_BUNDLE environment variable.",
"display_name": "TLS/SSL Truststore",
"name": "ssl_cacerts",
"value": null
},
{
"desc": "Path to the TLS/SSL private key on the host running the Hue web server. This file must be in PEM format, and must be readable by the Hue system user.",
"display_name": "Path to TLS/SSL Private Key",
"name": "ssl_private_key",
"value": null
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hue_safety_valve_server.ini</strong> for this role only.",
"display_name": "Hue Server Advanced Configuration Snippet (Safety Valve) for hue_safety_valve_server.ini",
"name": "hue_server_hue_safety_valve",
"value": null
},
{
"desc": "The password protecting the TLS/SSL Private Key. Leave blank if there is no password.",
"display_name": "TLS/SSL Private Key Password",
"name": "ssl_private_key_password",
"value": null
},
{
"desc": "If enabled, the Hue server binds to the wildcard address (\"0.0.0.0\") for its ports.",
"display_name": "Bind Hue Server to Wildcard Address",
"name": "hue_server_bind_wildcard",
"value": "false"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "hue_server_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "Enables the health test that the Hue Server's process state is consistent with the role configuration",
"display_name": "Hue Server Process Health Test",
"name": "hue_server_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the TLS/SSL Truststore parameter.",
"display_name": "Suppress Parameter Validation: TLS/SSL Truststore",
"name": "role_config_suppression_ssl_cacerts",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Random string used for secure hashing in the session store.",
"display_name": "Secret Key",
"name": "secret_key",
"value": null
}
]

View File

@ -1,200 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_kt_renewer_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kt_renewer_swap_memory_usage",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Ticket Renewer Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Ticket Renewer Log Directory",
"name": "role_config_suppression_kt_renewer_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kt_renewer_unexpected_exits",
"value": "false"
},
{
"desc": "Interval in seconds with which Hue's Kerberos ticket will get renewed.",
"display_name": "Hue Keytab Renewal Interval",
"name": "keytab_reinit_frequency",
"value": "3600"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kt_renewer_host_health",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kerberos Ticket Renewer Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KT_RENEWER_role_env_safety_valve",
"value": null
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "When computing the overall Kerberos Ticket Renewer health, consider the host's health.",
"display_name": "Kerberos Ticket Renewer Host Health Test",
"name": "kt_renewer_host_health_enabled",
"value": "true"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kt_renewer_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Enables the health test that the Kerberos Ticket Renewer's process state is consistent with the role configuration",
"display_name": "Kerberos Ticket Renewer Process Health Test",
"name": "kt_renewer_scm_health_enabled",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Directory where Kerberos Ticket Renewer will place its log files.",
"display_name": "Kerberos Ticket Renewer Log Directory",
"name": "kt_renewer_log_dir",
"value": "/var/log/hue"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Ticket Renewer Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Ticket Renewer Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kt_renewer_role_env_safety_valve",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kt_renewer_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_kt_renewer_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kt_renewer_scm_health",
"value": "false"
}
]

View File

@ -1,674 +0,0 @@
[
{
"desc": "Create users in Hue when they try to login with their LDAP credentials. For use when using LdapBackend for Hue authentication.",
"display_name": "Create LDAP users on login",
"name": "create_users_on_login",
"value": "true"
},
{
"desc": "Number of threads used by the Hue web server.",
"display_name": "Hue Web Server Threads",
"name": "cherrypy_server_threads",
"value": "50"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HUE Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties parameter.",
"display_name": "Suppress Parameter Validation: HUE Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "service_config_suppression_navigator_client_config_safety_valve",
"value": "false"
},
{
"desc": "Comma-separated list of regular expressions, which match 'host:port' of requested proxy target.",
"display_name": "Whitelist",
"name": "whitelist",
"value": "(localhost|127\\.0\\.0\\.1):(50030|50070|50060|50075)"
},
{
"desc": "The health test thresholds of the overall Load Balancer health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Load Balancers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Load Balancers falls below the critical threshold.",
"display_name": "Healthy Load Balancer Monitoring Thresholds",
"name": "hue_load_balancer_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the HBase Thrift Usage Validator configuration validator.",
"display_name": "Suppress Configuration Validator: HBase Thrift Usage Validator",
"name": "service_config_suppression_hue_hbase_thrift_server_validator",
"value": "false"
},
{
"desc": "The health test thresholds of the overall Kerberos Ticket Renewer health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Kerberos Ticket Renewers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Kerberos Ticket Renewers falls below the critical threshold.",
"display_name": "Healthy Kerberos Ticket Renewer Monitoring Thresholds",
"name": "hue_kt_renewers_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "hue"
},
{
"desc": "The distinguished name to use as a search base for finding users and groups. This should be similar to 'dc=hadoop,dc=mycompany,dc=com'.",
"display_name": "LDAP Search Base",
"name": "base_dn",
"value": null
},
{
"desc": "Whether to suppress the results of the Kerberos Ticket Renewer Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Kerberos Ticket Renewer Health",
"name": "service_health_suppression_hue_kt_renewers_healthy",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Database Password parameter.",
"display_name": "Suppress Parameter Validation: Hue Database Password",
"name": "service_config_suppression_database_password",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Hue Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Hue Server Health",
"name": "service_health_suppression_hue_hue_servers_healthy",
"value": "false"
},
{
"desc": "The name of a default group that users will be added to at creation time.",
"display_name": "Default User Group",
"name": "default_user_group",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the Kerberos Ticket Renewer Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kerberos Ticket Renewer Count Validator",
"name": "service_config_suppression_kt_renewer_count_validator",
"value": "false"
},
{
"desc": "Distinguished name of the user to bind as. Not required if the LDAP server supports anonymous searches. For Active Directory, this would be similar to 'hadoop-admin@mycompany.com'.",
"display_name": "LDAP Bind User",
"name": "bind_dn",
"value": null
},
{
"desc": "Whether to use StartTLS (as opposed to ldaps) to communicate securely with the LDAP server. This is only effective when the LDAP certificate is specified.",
"display_name": "Use StartTLS",
"name": "use_start_tls",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Username Pattern parameter.",
"display_name": "Suppress Parameter Validation: LDAP Username Pattern",
"name": "service_config_suppression_ldap_username_pattern",
"value": "false"
},
{
"desc": "The password of the bind user.",
"display_name": "LDAP Bind Password",
"name": "bind_password",
"value": null
},
{
"desc": "Path to the directory where audit logs will be written. The directory will be created if it doesn't exist.",
"display_name": "Audit Log Directory",
"name": "audit_event_log_dir",
"value": "/var/log/hue/audit"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default User Group parameter.",
"display_name": "Suppress Parameter Validation: Default User Group",
"name": "service_config_suppression_default_user_group",
"value": "false"
},
{
"desc": "Type of database used for Hue",
"display_name": "Hue Database Type",
"name": "database_type",
"value": "sqlite3"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the PAM Backend Service Name parameter.",
"display_name": "Suppress Parameter Validation: PAM Backend Service Name",
"name": "service_config_suppression_pam_auth_service",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Tracker parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Tracker",
"name": "service_config_suppression_navigator_event_tracker",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Web Interface Role parameter.",
"display_name": "Suppress Parameter Validation: HDFS Web Interface Role",
"name": "service_config_suppression_hue_webhdfs",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Bind User parameter.",
"display_name": "Suppress Parameter Validation: LDAP Bind User",
"name": "service_config_suppression_bind_dn",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Database Username parameter.",
"display_name": "Suppress Parameter Validation: Hue Database Username",
"name": "service_config_suppression_database_user",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Name of host where the Hue database is running. Not necessary for SQLite3.",
"display_name": "Hue Database Hostname",
"name": "database_host",
"value": "localhost"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Temporary Directory parameter.",
"display_name": "Suppress Parameter Validation: HDFS Temporary Directory",
"name": "service_config_suppression_hdfs_tmp_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP URL parameter.",
"display_name": "Suppress Parameter Validation: LDAP URL",
"name": "service_config_suppression_ldap_url",
"value": "false"
},
{
"desc": "Maximum size of audit log file in MB before it is rolled over.",
"display_name": "Maximum Audit Log File Size",
"name": "navigator_audit_log_max_file_size",
"value": "100"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Group Name Attribute parameter.",
"display_name": "Suppress Parameter Validation: LDAP Group Name Attribute",
"name": "service_config_suppression_group_name_attr",
"value": "false"
},
{
"desc": "LDAP certificate for authentication over TLS",
"display_name": "LDAP Certificate",
"name": "ldap_cert",
"value": null
},
{
"desc": "HTTPFS role or Namenode (if webhdfs is enabled) that hue can use to communicate with HDFS.",
"display_name": "HDFS Web Interface Role",
"name": "hue_webhdfs",
"value": null
},
{
"desc": "Whether to suppress the results of the Load Balancer Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Load Balancer Health",
"name": "service_health_suppression_hue_load_balancer_healthy",
"value": "false"
},
{
"desc": "Name of the Sqoop service that this Hue service instance depends on",
"display_name": "Sqoop Service",
"name": "sqoop_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "Name of Hue database.",
"display_name": "Hue Database Name",
"name": "database_name",
"value": "hue"
},
{
"desc": "The attribute of the group object that identifies the members of the group. For Active Directory, this is typically 'member'.",
"display_name": "LDAP Group Membership Attribute",
"name": "group_member_attr",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Blacklist parameter.",
"display_name": "Suppress Parameter Validation: Blacklist",
"name": "service_config_suppression_blacklist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Whitelist parameter.",
"display_name": "Suppress Parameter Validation: Whitelist",
"name": "service_config_suppression_whitelist",
"value": "false"
},
{
"desc": "Enable collection of audit events from the service's roles.",
"display_name": "Enable Audit Collection",
"name": "navigator_audit_enabled",
"value": "true"
},
{
"desc": "The base filter for searching for users. For Active Directory, this is typically '(objectClass=user)'.",
"display_name": "LDAP User Filter",
"name": "user_filter",
"value": null
},
{
"desc": "Name of the Impala service that this Hue service instance depends on",
"display_name": "Impala Service",
"name": "impala_service",
"value": null
},
{
"desc": "Name of the Oozie service that this Hue service instance depends on",
"display_name": "Oozie Service",
"name": "oozie_service",
"value": null
},
{
"desc": "Name of the HBase service that this Hue service instance depends on",
"display_name": "HBase Service",
"name": "hbase_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Group Membership Attribute parameter.",
"display_name": "Suppress Parameter Validation: LDAP Group Membership Attribute",
"name": "service_config_suppression_group_member_attr",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Database Hostname parameter.",
"display_name": "Suppress Parameter Validation: Hue Database Hostname",
"name": "service_config_suppression_database_host",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Service Advanced Configuration Snippet (Safety Valve) for hue_safety_valve.ini parameter.",
"display_name": "Suppress Parameter Validation: Hue Service Advanced Configuration Snippet (Safety Valve) for hue_safety_valve.ini",
"name": "service_config_suppression_hue_service_safety_valve",
"value": "false"
},
{
"desc": "Name of the Solr service that this Hue service instance depends on",
"display_name": "Solr Service",
"name": "solr_service",
"value": null
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Certificate parameter.",
"display_name": "Suppress Parameter Validation: LDAP Certificate",
"name": "service_config_suppression_ldap_cert",
"value": "false"
},
{
"desc": "The username to use to log into the Hue database. Not necessary for SQLite3.",
"display_name": "Hue Database Username",
"name": "database_user",
"value": "hue"
},
{
"desc": "In debug mode, Django displays a detailed traceback when an exception occurs. Debugging information may contain sensitive data. Django remembers every SQL query it executes in debug mode, which will rapidly consume memory.",
"display_name": "Enable Django Debug Mode",
"name": "django_debug_enable",
"value": "false"
},
{
"desc": "Comma-separated list of regular expressions, which match any prefix of 'host:port/path' of requested proxy target. This does not support matching GET parameters.",
"display_name": "Blacklist",
"name": "blacklist",
"value": "()"
},
{
"desc": "Search Bind Authentication connects to the LDAP server using credentials provided in the 'bind_dn' and 'bind_password' configurations. If these configurations are not set, then an anonymous search is performed.",
"display_name": "Use Search Bind Authentication",
"name": "search_bind_authentication",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Username Attribute parameter.",
"display_name": "Suppress Parameter Validation: LDAP Username Attribute",
"name": "service_config_suppression_user_name_attr",
"value": "false"
},
{
"desc": "Mode of authenticating login credentials. Select desktop.auth.backend.LdapBackend to use LDAP to authenticate login credentials. LDAP requires you to also set the LDAP URL, Active Directory Domain, and optionally LDAP certificate if you are using secure LDAP. Select desktop.auth.backend.PamBackend to use PAM to authenticate login credentials.",
"display_name": "Authentication Backend",
"name": "auth_backend",
"value": "desktop.auth.backend.AllowFirstUserDjangoBackend"
},
{
"desc": "Whether to suppress configuration warnings produced by the Load Balancer Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Load Balancer Count Validator",
"name": "service_config_suppression_hue_load_balancer_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Group Filter parameter.",
"display_name": "Suppress Parameter Validation: LDAP Group Filter",
"name": "service_config_suppression_group_filter",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Active Directory Domain parameter.",
"display_name": "Suppress Parameter Validation: Active Directory Domain",
"name": "service_config_suppression_nt_domain",
"value": "false"
},
{
"desc": "Enable debug output in HTTP Internal Server Error (status 500) responses. Debugging information may contain sensitive data. If Enable Django Debug Mode is set, this is automatically enabled.",
"display_name": "Enable Debugging of Internal Server Error Responses",
"name": "http_500_debug_enable",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Database Directory parameter.",
"display_name": "Suppress Parameter Validation: Hue Database Directory",
"name": "service_config_suppression_database_dir",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>sentry-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Hue Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "hue_sentry_safety_valve",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Hue Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "hue_service_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the User Augmentor parameter.",
"display_name": "Suppress Parameter Validation: User Augmentor",
"name": "service_config_suppression_user_augmentor",
"value": "false"
},
{
"desc": "The username attribute in the LDAP schema. For Active Directory, this is typically 'sAMAccountName'.",
"display_name": "LDAP Username Attribute",
"name": "user_name_attr",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Search Base parameter.",
"display_name": "Suppress Parameter Validation: LDAP Search Base",
"name": "service_config_suppression_base_dn",
"value": "false"
},
{
"desc": "Password for Hue database. Not necessary for SQLite3.",
"display_name": "Hue Database Password",
"name": "database_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the HDFS HTTPFS Usage Validator configuration validator.",
"display_name": "Suppress Configuration Validator: HDFS HTTPFS Usage Validator",
"name": "service_config_suppression_hdfs_httpfs_present_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Hue Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "service_config_suppression_hue_sentry_safety_valve",
"value": "false"
},
{
"desc": "Thrift server to use for HBase app.",
"display_name": "HBase Thrift Server",
"name": "hue_hbase_thrift",
"value": null
},
{
"desc": "Name of the Hive service that this Hue service instance depends on",
"display_name": "Hive Service",
"name": "hive_service",
"value": null
},
{
"desc": "Name of the Sentry service that this Hue service instance depends on",
"display_name": "Sentry Service",
"name": "sentry_service",
"value": null
},
{
"desc": "Base filter for searching for groups. For Active Directory, this is typically '(objectClass=group)'.",
"display_name": "LDAP Group Filter",
"name": "group_filter",
"value": null
},
{
"desc": "Time zone name.",
"display_name": "Time Zone",
"name": "time_zone",
"value": "America/Los_Angeles"
},
{
"desc": "<p>\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n</p>\n\n<p>\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n</p>\n\n<p>Event trackers are defined in a JSON object like the following:</p>\n\n<pre>\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n</pre>\n\n<p>\nWhere:\n</p>\n\n<ul>\n <li>timeToLive: maximum amount of time an event will be tracked, in\n milliseconds. Must be provided. This defines how long, since it's\n first seen, an event will be tracked. A value of 0 disables tracking.</li>\n\n <li>fields: list of fields to compare when matching events against\n tracked events.</li>\n</ul>\n\n<p>\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n</p>\n\n<ul>\n <li>value: uses the field value for comparison.</li>\n\n <li>userName: treats the field value as a userName, and ignores any\n host-specific data. This is useful for environment using Kerberos,\n so that only the principal name and realm are compared.</li>\n</ul>\n\n<p>\nThe following is the list of fields that can be used to compare Hue events:\n</p>\n\n<ul>\n <li>operation: the Hue operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n</ul>\n",
"display_name": "Audit Event Tracker",
"name": "navigator_event_tracker",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Database Dump File parameter.",
"display_name": "Suppress Parameter Validation: Database Dump File",
"name": "service_config_suppression_database_dump_file",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Beeswax Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Beeswax Server Health",
"name": "service_health_suppression_hue_beeswax_server_health",
"value": "false"
},
{
"desc": "The health test thresholds of the overall Hue Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Hue Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Hue Servers falls below the critical threshold.",
"display_name": "Healthy Hue Server Monitoring Thresholds",
"name": "hue_hue_servers_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "The group name attribute in the LDAP schema. For Active Directory, this is typically 'cn'.",
"display_name": "LDAP Group Name Attribute",
"name": "group_name_attr",
"value": null
},
{
"desc": "HDFS directory used for storing temporary files.",
"display_name": "HDFS Temporary Directory",
"name": "hdfs_tmp_dir",
"value": "/tmp"
},
{
"desc": "Default encoding for site data.",
"display_name": "Default Site Encoding",
"name": "default_site_encoding",
"value": "utf"
},
{
"desc": "If the database is SQLite3, this is the filename of the database to use, and the directory of this file must be writable by the 'hue' user.",
"display_name": "Hue Database Directory",
"name": "database_dir",
"value": "/var/lib/hue/desktop.db"
},
{
"desc": "When you enable anonymous usage data collection Hue tracks anonymised pages and application versions in order to gather information about each application's usage levels. The data collected does not include any hostnames or IDs. Data collection option is available on CDH 4.4 and later deployments.",
"display_name": "Enable Usage Data Collection",
"name": "usage_data_collection_enable",
"value": "true"
},
{
"desc": "Name of the ZooKeeper service that this Hue service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Site Encoding parameter.",
"display_name": "Suppress Parameter Validation: Default Site Encoding",
"name": "service_config_suppression_default_site_encoding",
"value": "false"
},
{
"desc": "The URL of the LDAP Server; similar to 'ldap://auth.mycompany.com' or 'ldaps://auth.mycompany.com'.",
"display_name": "LDAP URL",
"name": "ldap_url",
"value": null
},
{
"desc": "The PAM service name to use when authenticating over desktop.auth.backend.PamBackend. This is typically the name of a file under /etc/pam.d/ on the Hue host.",
"display_name": "PAM Backend Service Name",
"name": "pam_auth_service",
"value": "login"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hue Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hue Server Count Validator",
"name": "service_config_suppression_hue_server_count_validator",
"value": "false"
},
{
"desc": "Only applies to Active Directory. The Active Directory Domain will be similar to 'MYCOMPANY.COM'.",
"display_name": "Active Directory Domain",
"name": "nt_domain",
"value": null
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "hue"
},
{
"desc": "Class that defines extra accessor methods for user objects.",
"display_name": "User Augmentor",
"name": "user_augmentor",
"value": "desktop.auth.backend.DefaultUserAugmentor"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hue Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_hue_service_env_safety_valve",
"value": "false"
},
{
"desc": "File where the database gets dumped to or loaded from.",
"display_name": "Database Dump File",
"name": "database_dump_file",
"value": "/tmp/hue_database_dump.json"
},
{
"desc": "LDAP Username Pattern for use with non-Active Directory LDAP implementations. Must contain the special '&ltusername&gt' string for replacement during authentication.",
"display_name": "LDAP Username Pattern",
"name": "ldap_username_pattern",
"value": null
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "hue"
},
{
"desc": "When computing the overall HUE health, consider Beeswax Server's health",
"display_name": "Beeswax Server Role Health Test",
"name": "hue_beeswax_server_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP User Filter parameter.",
"display_name": "Suppress Parameter Validation: LDAP User Filter",
"name": "service_config_suppression_user_filter",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Bind Password parameter.",
"display_name": "Suppress Parameter Validation: LDAP Bind Password",
"name": "service_config_suppression_bind_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Audit Log Directory",
"name": "service_config_suppression_audit_event_log_dir",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Time Zone parameter.",
"display_name": "Suppress Parameter Validation: Time Zone",
"name": "service_config_suppression_time_zone",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.client.properties</strong>.",
"display_name": "HUE Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "navigator_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hue Database Name parameter.",
"display_name": "Suppress Parameter Validation: Hue Database Name",
"name": "service_config_suppression_database_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hue_safety_valve.ini</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Hue Service Advanced Configuration Snippet (Safety Valve) for hue_safety_valve.ini",
"name": "hue_service_safety_valve",
"value": null
},
{
"desc": "Port on host where the Hue database is running. Not necessary for SQLite3.",
"display_name": "Hue Database Port",
"name": "database_port",
"value": "3306"
}
]

View File

@ -1,404 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SSL/TLS Certificate for Catalog Server Webserver parameter.",
"display_name": "Suppress Parameter Validation: SSL/TLS Certificate for Catalog Server Webserver",
"name": "role_config_suppression_webserver_certificate_file",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_catalogserver_swap_memory_usage",
"value": "false"
},
{
"desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.",
"display_name": "Health Check Startup Tolerance",
"name": "catalogserver_startup_tolerance",
"value": "5"
},
{
"desc": "Port where Catalog Server debug web server runs.",
"display_name": "Catalog Server HTTP Server Port",
"name": "catalogserver_webserver_port",
"value": "25020"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server Web Server User Password parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server Web Server User Password",
"name": "role_config_suppression_webserver_htpassword_password",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "catalogserver_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "catalogserver_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Port where Catalog Server is exported.",
"display_name": "Catalog Server Service Port",
"name": "catalog_service_port",
"value": "26000"
},
{
"desc": "Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; 1 means buffer WARNING only, ...)",
"display_name": "Catalog Server Log Buffer Level",
"name": "logbuflevel",
"value": "0"
},
{
"desc": "If true, loads catalog metadata in the background. If false, metadata is loaded lazily (on access). Only effective in CDH 5 and Impala 1.2.4 and higher.",
"display_name": "Load Catalog in Background",
"name": "load_catalog_in_background",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_catalogserver_log_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_catalogserver_host_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server HBase Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server HBase Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_catalogd_hbase_conf_safety_valve",
"value": "false"
},
{
"desc": "The amount of time to wait for the Catalog Server to fully start up and connect to the StateStore before enforcing the connectivity check.",
"display_name": "Catalog Server Connectivity Tolerance at Startup",
"name": "catalogserver_connectivity_tolerance",
"value": "180"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Timeout for requests to the Hive Metastore Server from Catalog Server. Consider increasing this if you have tables with a lot of metadata and see timeout errors.",
"display_name": "Catalog Server Hive Metastore Connection Timeout",
"name": "hive_metastore_timeout",
"value": "3600"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Catalog Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Catalog Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_catalogserver_role_env_safety_valve",
"value": "false"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Username for Catalog Server web server authentication.",
"display_name": "Catalog Server Web Server Username",
"name": "webserver_htpassword_user",
"value": null
},
{
"desc": "Password for Catalog Server web server authentication.",
"display_name": "Catalog Server Web Server User Password",
"name": "webserver_htpassword_password",
"value": null
},
{
"desc": "Enables the health test that the Impala Catalog Server's process state is consistent with the role configuration",
"display_name": "Impala Catalog Server Process Health Test",
"name": "catalogserver_scm_health_enabled",
"value": "true"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
"display_name": "Catalog Server HBase Advanced Configuration Snippet (Safety Valve)",
"name": "catalogd_hbase_conf_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to Catalog Server command line flags. Key names should begin with a hyphen(-). <strong>For example</strong>: -log_filename=foo.log",
"display_name": "Catalog Server Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "catalogd_cmd_args_safety_valve",
"value": null
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "Catalog Server HDFS Advanced Configuration Snippet (Safety Valve)",
"name": "catalogd_hdfs_site_conf_safety_valve",
"value": null
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_catalogserver_web_metric_collection",
"value": "false"
},
{
"desc": "Directory where Catalog Server will place its log files.",
"display_name": "Catalog Server Log Directory",
"name": "log_dir",
"value": "/var/log/catalogd"
},
{
"desc": "Enables the health test that verifies the Catalog Server is connected to the StateStore",
"display_name": "Catalog Server Connectivity Health Test",
"name": "catalogserver_connectivity_health_enabled",
"value": "true"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "catalogserver_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_catalogserver_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server Command Line Argument Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_catalogd_cmd_args_safety_valve",
"value": "false"
},
{
"desc": "Enable/Disable Catalog Server web server. This web server contains useful information about Catalog Server daemon.",
"display_name": "Enable Catalog Server Web Server",
"name": "catalogd_enable_webserver",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_catalogserver_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Impala Catalog Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "CATALOGSERVER_role_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Resident Set Size heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Resident Set Size",
"name": "role_health_suppression_catalogserver_memory_rss_health",
"value": "false"
},
{
"desc": "When computing the overall Impala Catalog Server health, consider the host's health.",
"display_name": "Impala Catalog Server Host Health Test",
"name": "catalogserver_host_health_enabled",
"value": "true"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server Core Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server Core Dump Directory",
"name": "role_config_suppression_core_dump_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server Hive Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server Hive Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_catalogd_hive_conf_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server Web Server Username parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server Web Server Username",
"name": "role_config_suppression_webserver_htpassword_user",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_catalogserver_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hive-site.xml</strong> for this role only.",
"display_name": "Catalog Server Hive Advanced Configuration Snippet (Safety Valve)",
"name": "catalogd_hive_conf_safety_valve",
"value": null
},
{
"desc": "Verbose logging level for the GLog logger. These messages are always logged at 'INFO' log level, so this setting has no effect if Logging Threshold is set to 'WARN' or above.",
"display_name": "Catalog Server Verbose Log Level",
"name": "log_verbose_level",
"value": "1"
},
{
"desc": "The health test thresholds on the resident size of the process.",
"display_name": "Resident Set Size Thresholds",
"name": "process_resident_set_size_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The maximum size, in megabytes, per log file for Impala Catalog Server logs. Typically used by log4j or logback.",
"display_name": "Impala Catalog Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Catalog Server HDFS Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Catalog Server HDFS Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_catalogd_hdfs_site_conf_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the StateStore Connectivity heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: StateStore Connectivity",
"name": "role_health_suppression_catalogserver_connectivity",
"value": "false"
},
{
"desc": "Directory where the Catalog Server core dump is placed.",
"display_name": "Catalog Server Core Dump Directory",
"name": "core_dump_dir",
"value": "/var/log/catalogd"
},
{
"desc": "Local path to the certificate presented by the Catalog Server debug webserver. This file must be in PEM format. If empty, webserver SSL/TLS support is not enabled.",
"display_name": "SSL/TLS Certificate for Catalog Server Webserver",
"name": "webserver_certificate_file",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The minimum log level for Impala Catalog Server logs",
"display_name": "Impala Catalog Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The number of log files that are kept for each severity level before all older log files are removed. The number has to be greater than 1 to keep at least the current log file open. If set to 0, all log files are retained and log rotation is effectively disabled.",
"display_name": "Catalog Server Maximum Log Files",
"name": "max_log_files",
"value": "10"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_catalogserver_scm_health",
"value": "false"
}
]

View File

@ -1,662 +0,0 @@
[
{
"desc": "Whether to suppress the results of the Impala Daemon Scratch Directories Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Impala Daemon Scratch Directories Free Space",
"name": "role_health_suppression_impalad_scratch_directories_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala Lineage Enabled Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala Lineage Enabled Validator",
"name": "role_config_suppression_impala_lineage_enabled_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemons Load Balancer parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemons Load Balancer",
"name": "role_config_suppression_impalad_load_balancer",
"value": "false"
},
{
"desc": "Encrypt and verify the integrity of all data spilled to disk as part of a query. This feature is only supported for Impala 2.0 and higher and CDH 5.2 and higher (which includes Impala 2.0).",
"display_name": "Disk Spill Encryption",
"name": "disk_spill_encryption",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Command Line Argument Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_impalad_cmd_args_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala Audit Enabled Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala Audit Enabled Validator",
"name": "role_config_suppression_impala_audit_enabled_validator",
"value": "false"
},
{
"desc": "Directory where an Impala Daemon core dump is placed.",
"display_name": "Impala Daemon Core Dump Directory",
"name": "core_dump_dir",
"value": "/var/log/impalad"
},
{
"desc": "Port on which HiveServer2 client requests are served by Impala Daemons.",
"display_name": "Impala Daemon HiveServer2 Port",
"name": "hs2_port",
"value": "21050"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Fair Scheduler Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Fair Scheduler Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_impalad_fair_scheduler_safety_valve",
"value": "false"
},
{
"desc": "Maximum number of seconds that Impala attempts to register or re-register with Llama. If registration is unsuccessful, Impala cancels the action with an error, which could result in an impalad startup failure or a cancelled query. A setting of -1 seconds means try indefinitely.",
"display_name": "Llama Registration Timeout Seconds",
"name": "llama_registration_timeout_secs",
"value": "30"
},
{
"desc": "When computing the overall Impala Daemon health, consider the host's health.",
"display_name": "Impala Daemon Host Health Test",
"name": "impalad_host_health_enabled",
"value": "true"
},
{
"desc": "The directory in which Impala daemon audit event log files are written. If \"Impala Audit Event Generation\" property is enabled, Impala will generate its audit logs in this directory.",
"display_name": "Impala Daemon Audit Log Directory",
"name": "audit_event_log_dir",
"value": "/var/log/impalad/audit"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "A list of key-value pairs of additional query options to pass to the Impala Daemon command line, separated by ','.",
"display_name": "Impala Daemon Query Options Advanced Configuration Snippet (Safety Valve)",
"name": "default_query_options",
"value": ""
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_impalad_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Web Server User Password parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Web Server User Password",
"name": "role_config_suppression_webserver_htpassword_password",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Impala Daemon Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Enable or disable the Impala Daemon web server. This web server contains useful information about Impala Daemon.",
"display_name": "Enable Impala Daemon Web Server",
"name": "impalad_enable_webserver",
"value": "true"
},
{
"desc": "An XML snippet to append to llama-site.xml for Impala Daemons. This configuration only has effect on Impala versions 1.3 or greater.",
"display_name": "Impala Daemon Llama Site Advanced Configuration Snippet (Safety Valve)",
"name": "impala_llama_site_conf_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "impalad_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Impala Daemon Environment Advanced Configuration Snippet (Safety Valve)",
"name": "IMPALAD_role_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Web Server Username parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Web Server Username",
"name": "role_config_suppression_webserver_htpassword_user",
"value": "false"
},
{
"desc": "Enables the health test that verifies the Impala Daemon is connected to the StateStore.",
"display_name": "Impala Daemon Connectivity Health Test",
"name": "impalad_connectivity_health_enabled",
"value": "true"
},
{
"desc": "The amount of time at Impala Daemon startup allowed for the Impala Daemon to start accepting new queries for processing.",
"display_name": "Impala Daemon Ready Status Startup Tolerance",
"name": "impalad_ready_status_check_startup_tolerance",
"value": "180"
},
{
"desc": "The maximum size (in queries) of the Impala Daemon audit event log file before a new one is created.",
"display_name": "Impala Daemon Maximum Audit Log File Size",
"name": "max_audit_event_log_file_size",
"value": "5000"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_impalad_web_metric_collection",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Local UDF Library Dir parameter.",
"display_name": "Suppress Parameter Validation: Local UDF Library Dir",
"name": "role_config_suppression_local_library_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Server CA Certificate parameter.",
"display_name": "Suppress Parameter Validation: LDAP Server CA Certificate",
"name": "role_config_suppression_impalad_ldap_ca_certificate",
"value": "false"
},
{
"desc": "The directory in which Impala daemon lineage log files are written. If \"Impala Lineage Generation\" property is enabled, Impala generates its lineage logs in this directory.",
"display_name": "Impala Daemon Lineage Log Directory",
"name": "lineage_event_log_dir",
"value": "/var/log/impalad/lineage"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Llama Site Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Llama Site Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_impala_llama_site_conf_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SSL/TLS Certificate for Impala Daemon Webserver parameter.",
"display_name": "Suppress Parameter Validation: SSL/TLS Certificate for Impala Daemon Webserver",
"name": "role_config_suppression_webserver_certificate_file",
"value": "false"
},
{
"desc": "User-defined function (UDF) libraries are copied from HDFS into this local directory.",
"display_name": "Local UDF Library Dir",
"name": "local_library_dir",
"value": "/var/lib/impala/udfs"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Audit Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Audit Log Directory",
"name": "role_config_suppression_audit_event_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "impalad_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_impalad_unexpected_exits",
"value": "false"
},
{
"desc": "The maximum size, in megabytes, per log file for Impala Daemon logs. Typically used by log4j or logback.",
"display_name": "Impala Daemon Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Username for Impala Daemon webserver authentication.",
"display_name": "Impala Daemon Web Server Username",
"name": "webserver_htpassword_user",
"value": null
},
{
"desc": "Password for Impala Daemon webserver authentication.",
"display_name": "Impala Daemon Web Server User Password",
"name": "webserver_htpassword_password",
"value": null
},
{
"desc": "Directory where Impala Daemon will place its log files.",
"display_name": "Impala Daemon Log Directory",
"name": "log_dir",
"value": "/var/log/impalad"
},
{
"desc": "An XML string to use verbatim for the contents of fair-scheduler.xml for Impala Daemons. This configuration only has effect on Impala versions 1.3 or greater.",
"display_name": "Impala Daemon Fair Scheduler Advanced Configuration Snippet (Safety Valve)",
"name": "impalad_fair_scheduler_safety_valve",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to Impala Daemon command-line flags. Key names should begin with a hyphen(-). <strong>For example</strong>: -log_filename=foo.log",
"display_name": "Impala Daemon Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "impalad_cmd_args_safety_valve",
"value": null
},
{
"desc": "Number of seconds to wait between attempts during Llama registration.",
"display_name": "Llama Registration Wait Seconds",
"name": "llama_registration_wait_secs",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Query Options Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Query Options Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_default_query_options",
"value": "false"
},
{
"desc": "Port on which ImpalaBackendService is exported.",
"display_name": "Impala Daemon Backend Port",
"name": "be_port",
"value": "22000"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
"display_name": "Impala Daemon HBase Advanced Configuration Snippet (Safety Valve)",
"name": "impalad_hbase_conf_safety_valve",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hive-site.xml</strong> for this role only.",
"display_name": "Impala Daemon Hive Advanced Configuration Snippet (Safety Valve)",
"name": "impala_hive_conf_safety_valve",
"value": null
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds on the resident size of the process.",
"display_name": "Resident Set Size Thresholds",
"name": "process_resident_set_size_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon HBase Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon HBase Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_impalad_hbase_conf_safety_valve",
"value": "false"
},
{
"desc": "Verbose logging level for the GLog logger. These messages are always logged at 'INFO' log level, so this setting has no effect if Logging Threshold is set to 'WARN' or above. ",
"display_name": "Impala Daemon Verbose Log Level",
"name": "log_verbose_level",
"value": "1"
},
{
"desc": "Whether to suppress the results of the Resident Set Size heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Resident Set Size",
"name": "role_health_suppression_impalad_memory_rss_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon HDFS Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon HDFS Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_impala_hdfs_site_conf_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_impalad_swap_memory_usage",
"value": "false"
},
{
"desc": "The timeout used by the Cloudera Manager Agent's query monitor when communicating with the Impala Daemon web server, specified in seconds.",
"display_name": "Query Monitoring Timeout",
"name": "executing_queries_timeout_seconds",
"value": "5.0"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_impalad_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Hive Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Hive Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_impala_hive_conf_safety_valve",
"value": "false"
},
{
"desc": "Enables audit event generation by Impala daemons. The audit log file will be placed in the directory specified by 'Impala Daemon Audit Log Directory' parameter.",
"display_name": "Enable Impala Audit Event Generation",
"name": "enable_audit_event_log",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Scratch Directories parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Scratch Directories",
"name": "role_config_suppression_scratch_dirs",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_impalad_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_impalad_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Impala Daemon Ready Check heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Impala Daemon Ready Check",
"name": "role_health_suppression_impalad_ready_status",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"any\"}"
},
{
"desc": "Enables the health check that determines if the Impala daemon is ready to process queries.",
"display_name": "Impala Daemon Ready Status Health Check",
"name": "impalad_ready_status_check_enabled",
"value": "true"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Core Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Core Dump Directory",
"name": "role_config_suppression_core_dump_dir",
"value": "false"
},
{
"desc": "The polling period of the Impala query monitor in the Cloudera Manager Agent, specified in seconds. If set to zero, query monitoring is disabled.",
"display_name": "Query Monitoring Period",
"name": "query_monitoring_period_seconds",
"value": "1.0"
},
{
"desc": "Port on which Beeswax client requests are served by Impala Daemons.",
"display_name": "Impala Daemon Beeswax Port",
"name": "beeswax_port",
"value": "21000"
},
{
"desc": "Local path to the certificate presented by the Impala daemon debug webserver. This file must be in .pem format. If empty, webserver SSL/TLS support is not enabled.",
"display_name": "SSL/TLS Certificate for Impala Daemon Webserver",
"name": "webserver_certificate_file",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_impalad_role_env_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Impala Daemon Scratch Directories. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Impala Daemon Scratch Directories Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Impala Daemon Scratch Directories Free Space Monitoring Percentage Thresholds",
"name": "impalad_scratch_directories_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Maximum number of query results a client may request to be cached on a per-query basis to support restarting fetches. This option guards against unreasonably large result caches requested by clients. Requests exceeding this maximum will be rejected.",
"display_name": "Result Cache Maximum Size",
"name": "impalad_result_cache_max_size",
"value": "100000"
},
{
"desc": "Memory limit in bytes for Impala Daemon, enforced by the daemon itself. If reached, queries running on the Impala Daemon may be killed. Leave it blank to let Impala pick its own limit. Use a value of -1 B to specify no limit.",
"display_name": "Impala Daemon Memory Limit",
"name": "impalad_memory_limit",
"value": null
},
{
"desc": "Abort Impala startup if there are improper configs or running on unsupported hardware.",
"display_name": "Abort on Config Error",
"name": "abort_on_config_error",
"value": "true"
},
{
"desc": "Directories where Impala Daemon will write data such as spilling information to disk to free up memory. This can potentially be large amounts of data.",
"display_name": "Impala Daemon Scratch Directories",
"name": "scratch_dirs",
"value": null
},
{
"desc": "Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; 1 means buffer WARNING only, ...)",
"display_name": "Impala Daemon Log Buffer Level",
"name": "logbuflevel",
"value": "0"
},
{
"desc": "The amount of time to wait for the Impala Daemon to fully start up and connect to the StateStore before enforcing the connectivity check.",
"display_name": "Impala Daemon Connectivity Tolerance at Startup",
"name": "impalad_connectivity_tolerance",
"value": "180"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_impalad_host_health",
"value": "false"
},
{
"desc": "The maximum size (in entries) of the Impala daemon lineage log file before a new one is created.",
"display_name": "Impala Daemon Maximum Lineage Log File Size",
"name": "max_lineage_log_file_size",
"value": "5000"
},
{
"desc": "The location on disk of the certificate, in .pem format, used to confirm the authenticity of the LDAP server certificate. This is the Certificate Authority (CA) certificate, and it was used to sign the LDAP server certificate. If not set, Impala by default trusts all certificates supplied by the LDAP server, which means that an attacker could potentially intercept otherwise encrypted usernames and passwords.",
"display_name": "LDAP Server CA Certificate",
"name": "impalad_ldap_ca_certificate",
"value": null
},
{
"desc": "Timeout for requests to the Hive Metastore Server from Impala. Consider increasing this if you have tables with a lot of metadata and see timeout errors.",
"display_name": "Impala Daemon Hive Metastore Connection Timeout",
"name": "hive_metastore_timeout",
"value": "3600"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Port where StateStoreSubscriberService is running.",
"display_name": "StateStoreSubscriber Service Port",
"name": "state_store_subscriber_port",
"value": "23000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Daemon Lineage Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Impala Daemon Lineage Log Directory",
"name": "role_config_suppression_lineage_event_log_dir",
"value": "false"
},
{
"desc": "Address of the load balancer used for Impala daemons. Should be specified in host:port format. If this is specified and Kerberos is enabled, Cloudera Manager adds a principal for 'impala/&ltload_balancer_host&gt@&ltrealm&gt' to the keytab for all Impala daemons.",
"display_name": "Impala Daemons Load Balancer",
"name": "impalad_load_balancer",
"value": null
},
{
"desc": "Maximum number of times a request to reserve, expand, or release resources is attempted until the request is cancelled.",
"display_name": "Llama Maximum Request Attempts",
"name": "llama_max_request_attempts",
"value": "5"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The minimum log level for Impala Daemon logs",
"display_name": "Impala Daemon Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Port where Impala debug web server runs.",
"display_name": "Impala Daemon HTTP Server Port",
"name": "impalad_webserver_port",
"value": "25000"
},
{
"desc": "Enables the health test that the Impala Daemon's process state is consistent with the role configuration",
"display_name": "Impala Daemon Process Health Test",
"name": "impalad_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the StateStore Connectivity heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: StateStore Connectivity",
"name": "role_health_suppression_impalad_connectivity",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Enables lineage generation by Impala daemons. The lineage log file is placed in the directory specified by the 'Impala Daemon Lineage Log Directory' parameter.",
"display_name": "Enable Impala Lineage Generation",
"name": "enable_lineage_log",
"value": "true"
},
{
"desc": "Port where Llama notification callback should be started",
"display_name": "Llama Callback Port",
"name": "llama_callback_port",
"value": "28000"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Impala Daemon Scratch Directories.",
"display_name": "Impala Daemon Scratch Directories Free Space Monitoring Absolute Thresholds",
"name": "impalad_scratch_directories_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hdfs-site.xml</strong> for this role only.",
"display_name": "Impala Daemon HDFS Advanced Configuration Snippet (Safety Valve)",
"name": "impala_hdfs_site_conf_safety_valve",
"value": null
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "impalad_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "The number of log files that are kept for each severity level before all older log files are removed. The number has to be greater than 1 to keep at least the current log file open. If set to 0, all log files are retained and log rotation is effectively disabled.",
"display_name": "Impala Maximum Log Files",
"name": "max_log_files",
"value": "10"
}
]

View File

@ -1,506 +0,0 @@
[
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_llama_log_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether Llama should cache allocated resources on release.",
"display_name": "Enable Resource Caching",
"name": "llama_am_cache_enabled",
"value": "true"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Kerberos principal used by the Impala Llama ApplicationMaster roles.",
"display_name": "Role-Specific Kerberos Principal",
"name": "kerberos_role_princ_name",
"value": "llama"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_llama_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Llama System Group parameter.",
"display_name": "Suppress Parameter Validation: Llama System Group",
"name": "role_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_llama_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_llama_host_health",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "ACL for Impala ApplicationMaster clients. The ACL is a comma-separated list of user and group names. The user and group list is separated by a blank. For e.g. \"alice,bob users,wheel\". A special value of \"*\" means all users are allowed. These take effect only if security is enabled.",
"display_name": "Client ACLs",
"name": "llama_am_server_thrift_client_acl",
"value": "*"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>llama-site.xml</strong> for this role only.",
"display_name": "Impala Llama ApplicationMaster Advanced Configuration Snippet (Safety Valve) for llama-site.xml",
"name": "llama_config_valve",
"value": null
},
{
"desc": "When computing the overall Impala Llama ApplicationMaster health, consider the host's health.",
"display_name": "Impala Llama ApplicationMaster Host Health Test",
"name": "llama_host_health_enabled",
"value": "true"
},
{
"desc": "Port on which the Llama ApplicationMaster listens to HTTP requests.",
"display_name": "Llama HTTP Port",
"name": "llama_http_port",
"value": "15001"
},
{
"desc": "Maximum number of retries for a client notification. After the maximum number of client notification retries has been reached without success the client is considered lost and all its reservations are released. A successful client notification resets the retries count.",
"display_name": "Maximum Client Notification Retries",
"name": "llama_am_server_thrift_client_notifier_max_retries",
"value": "5"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Llama Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Llama Log Directory",
"name": "role_config_suppression_llama_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Llama ApplicationMaster Advanced Configuration Snippet (Safety Valve) for llama-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Impala Llama ApplicationMaster Advanced Configuration Snippet (Safety Valve) for llama-site.xml",
"name": "role_config_suppression_llama_config_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role-Specific Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Role-Specific Kerberos Principal",
"name": "role_config_suppression_kerberos_role_princ_name",
"value": "false"
},
{
"desc": "Enter an XML string that will be inserted verbatim into the Fair Scheduler allocations file. Overrides the configuration set using the Pools configuration UI. This configuration only has effect on Impala versions 1.3 or greater.",
"display_name": "Fair Scheduler XML Advanced Configuration Snippet (Safety Valve)",
"name": "llama_fair_scheduler_safety_valve",
"value": null
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Llama ApplicationMaster Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Llama ApplicationMaster Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Client notification retry interval, in milliseconds.",
"display_name": "Client Notification Retry Interval",
"name": "llama_am_server_thrift_client_notifier_retry_interval_ms",
"value": "5000"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Enables the health test that the Impala Llama ApplicationMaster's process state is consistent with the role configuration",
"display_name": "Impala Llama ApplicationMaster Process Health Test",
"name": "llama_scm_health_enabled",
"value": "true"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The group that the Llama processes should run as.",
"display_name": "Llama System Group",
"name": "process_groupname",
"value": "llama"
},
{
"desc": "Timeout policy for resources being cached.",
"display_name": "Resource Caching Idle Timeout",
"name": "llama_am_cache_eviction_timeout_policy_idle_timeout_ms",
"value": "30000"
},
{
"desc": "Port on which the Llama ApplicationMaster listens to administrative requests on its administrative Thrift interface.",
"display_name": "Llama Thrift Admin Port",
"name": "llama_am_server_thrift_admin_address",
"value": "15002"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Impala Llama ApplicationMaster in Bytes",
"name": "llama_java_heapsize",
"value": "268435456"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Administrative Interface ACLs parameter.",
"display_name": "Suppress Parameter Validation: Administrative Interface ACLs",
"name": "role_config_suppression_llama_am_server_thrift_admin_acl",
"value": "false"
},
{
"desc": "Maximum amount of time the backed off reservations will be in 'backed off' state. The actual amount time is a random value between the minimum and the maximum.",
"display_name": "Anti-Deadlock Maximum Delay",
"name": "llama_am_gang_anti_deadlock_max_delay_ms",
"value": "30000"
},
{
"desc": "Queues Llama ApplicationMaster should connect to at start up.",
"display_name": "Core Queues",
"name": "llama_am_core_queues",
"value": ""
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Maximum number of threads used by the Llama ApplicationMaster auxiliary service uses for serving client requests.",
"display_name": "Thrift Server Maximum Threads",
"name": "llama_am_server_thrift_server_max_threads",
"value": "50"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Impala Llama ApplicationMaster Environment Advanced Configuration Snippet (Safety Valve)",
"name": "LLAMA_role_env_safety_valve",
"value": null
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Interval of time without any new allocation that will trigger the Impala ApplicationMaster anti-deadlock logic.",
"display_name": "Anti-Deadlock No Allocation Limit Interval",
"name": "llama_am_gang_anti_deadlock_no_allocation_limit_ms",
"value": "30000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "Time in milliseconds after which Llama will discard its AM for a queue that has been empty of reservations. Does not apply to queues specified with the Core Queues property.",
"display_name": "Queue Expiration Age",
"name": "llama_am_queue_expire_ms",
"value": "300000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Client ACLs parameter.",
"display_name": "Suppress Parameter Validation: Client ACLs",
"name": "role_config_suppression_llama_am_server_thrift_client_acl",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Llama ApplicationMaster Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Llama ApplicationMaster Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_llama_role_env_safety_valve",
"value": "false"
},
{
"desc": "Minimum number of threads used by the Llama ApplicationMaster auxiliary service uses for serving client requests.",
"display_name": "Thrift Server Minimum Threads",
"name": "llama_am_server_thrift_server_min_threads",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "llama_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Core Queues parameter.",
"display_name": "Suppress Parameter Validation: Core Queues",
"name": "role_config_suppression_llama_am_core_queues",
"value": "false"
},
{
"desc": "If enabled, the Impala Llama ApplicationMaster binds to the wildcard address (\"0.0.0.0\") on all of its ports.",
"display_name": "Bind Impala Llama ApplicationMaster to Wildcard Address",
"name": "llama_bind_wildcard",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The maximum size, in megabytes, per log file for Impala Llama ApplicationMaster logs. Typically used by log4j or logback.",
"display_name": "Impala Llama ApplicationMaster Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Minimum amount of time the backed off reservations will be in 'backed off' state. The actual amount time is a random value between the minimum and the maximum.",
"display_name": "Anti-Deadlock Minimum Delay",
"name": "llama_am_gang_anti_deadlock_min_delay_ms",
"value": "10000"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Percentage of resources that will be backed off by the Impala ApplicationMaster anti-deadlock logic. Random reservations will be backed off until the percentage of backed off resources reaches this percentage.",
"display_name": "Anti-Deadlock Backoff Percentage",
"name": "llama_am_gang_anti_deadlock_backoff_percent",
"value": "30"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Llama Server",
"name": "llama_java_opts",
"value": ""
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Llama ApplicationMaster heartbeat interval, in milliseconds. On each heartbeat the ApplicationMaster submits new reservations to YARN ResourceManager and gets updates from it.",
"display_name": "AM Heartbeat Interval",
"name": "llama_am_server_thrift_client_notifier_heartbeat_ms",
"value": "5000"
},
{
"desc": "The user that the Llama process should run as.",
"display_name": "Llama System User",
"name": "process_username",
"value": "llama"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Port on which the Llama ApplicationMaster serves its Thrift interface.",
"display_name": "Llama Thrift Port",
"name": "llama_port",
"value": "15000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Fair Scheduler XML Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Fair Scheduler XML Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_llama_fair_scheduler_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Impala Llama ApplicationMaster Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_llama_unexpected_exits",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_llama_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Llama System User parameter.",
"display_name": "Suppress Parameter Validation: Llama System User",
"name": "role_config_suppression_process_username",
"value": "false"
},
{
"desc": "Whether to break resource requests into smaller requests of standard size before the resource cache. The sizes are taken from Yarn settings Container Memory Increment and Container Virtual CPU Cores Increment.",
"display_name": "Enable Resource Cache Normalization",
"name": "llama_am_resource_normalizing_enabled",
"value": "true"
},
{
"desc": "The maximum number of rolled log files to keep for Impala Llama ApplicationMaster logs. Typically used by log4j or logback.",
"display_name": "Impala Llama ApplicationMaster Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Socket time, in milliseconds, used Llama ApplicationMaster auxiliary service for all its server and client Thrift connections.",
"display_name": "Thrift Transport Timeout",
"name": "llama_am_server_thrift_transport_timeout_ms",
"value": "60000"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_llama_swap_memory_usage",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "ACL for Impala ApplicationMaster admins. The ACL is a comma-separated list of user and group names. The user and group list is separated by a blank. For e.g. \"alice,bob users,wheel\". A special value of \"*\" means all users are allowed. These take effect only if security is enabled.",
"display_name": "Administrative Interface ACLs",
"name": "llama_am_server_thrift_admin_acl",
"value": "*"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Llama Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Llama Server",
"name": "role_config_suppression_llama_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "Directory where Llama will place its log files.",
"display_name": "Llama Log Directory",
"name": "llama_log_dir",
"value": "/var/log/impala-llama"
},
{
"desc": "The minimum log level for Impala Llama ApplicationMaster logs",
"display_name": "Impala Llama ApplicationMaster Logging Threshold",
"name": "log_threshold",
"value": "INFO"
}
]

View File

@ -1,596 +0,0 @@
[
{
"desc": "Whether to suppress the results of the Impala Llama ApplicationMaster Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Impala Llama ApplicationMaster Health",
"name": "service_health_suppression_impala_llamas_healthy",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties parameter.",
"display_name": "Suppress Parameter Validation: Impala Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "service_config_suppression_navigator_client_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Pattern parameter.",
"display_name": "Suppress Parameter Validation: LDAP Pattern",
"name": "service_config_suppression_ldap_bind_pattern",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Bypass Hive Metastore Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Bypass Hive Metastore Validator",
"name": "service_config_suppression_impala_bypass_hms_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Short-Circuit Read Enabled Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Short-Circuit Read Enabled Validator",
"name": "service_config_suppression_short_circuit_read_validator",
"value": "false"
},
{
"desc": "The user that this Impala's processes should run as (except Llama, which has its own user).",
"display_name": "Impala System User (except Llama)",
"name": "process_username",
"value": "impala"
},
{
"desc": "<p>\nEvent filters are defined in a JSON object like the following:\n</p>\n\n<pre>\n{\n \"defaultAction\" : (\"accept\", \"discard\"),\n \"rules\" : [\n {\n \"action\" : (\"accept\", \"discard\"),\n \"fields\" : [\n {\n \"name\" : \"fieldName\",\n \"match\" : \"regex\"\n }\n ]\n }\n ]\n}\n</pre>\n\n<p>\nA filter has a default action and a list of rules, in order of precedence.\nEach rule defines an action, and a list of fields to match against the\naudit event.\n</p>\n\n<p>\nA rule is \"accepted\" if all the listed field entries match the audit\nevent. At that point, the action declared by the rule is taken.\n</p>\n\n<p>\nIf no rules match the event, the default action is taken. Actions\ndefault to \"accept\" if not defined in the JSON object.\n</p>\n\n<p>\nThe following is the list of fields that can be filtered for Impala events:\n</p>\n\n<ul>\n <li>userName: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>operation: the Impala operation being performed.</li>\n <li>databaseName: the databaseName for the operation.</li>\n <li>tableName: the tableName for the operation.</li>\n</ul>\n",
"display_name": "Audit Event Filter",
"name": "navigator_audit_event_filter",
"value": null
},
{
"desc": "When set, this value is appended to all usernames before authenticating with the LDAP server. For example, if this parameter is set to \"my.domain.com\", and the user authenticating to the Impala daemon is \"mike\", then \"mike@my.domain.com\" is passed to the LDAP server. If this field is unset, the username remains unaltered before being passed to the LDAP server. This parameter is mutually exclusive with LDAP BaseDN and LDAP Pattern.",
"display_name": "LDAP Domain",
"name": "ldap_domain",
"value": null
},
{
"desc": "Name of the HDFS service that this Impala service instance depends on",
"display_name": "HDFS Service",
"name": "hdfs_service",
"value": null
},
{
"desc": "Maximum amount of time (in milliseconds) that a request waits to be admitted before timing out. Must be a positive integer.",
"display_name": "Admission Control Queue Timeout",
"name": "admission_control_queue_timeout",
"value": "60000"
},
{
"desc": "Use Dynamic Resource Pools to configure Impala admission control or RM for this Impala service. These features are only supported in Impala 1.3 or higher deployments.",
"display_name": "Enable Dynamic Resource Pools",
"name": "admission_control_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the HDFS Local Path Access User Validator configuration validator.",
"display_name": "Suppress Configuration Validator: HDFS Local Path Access User Validator",
"name": "service_config_suppression_local_path_access_user_validator",
"value": "false"
},
{
"desc": "Use Sentry to enable role-based, fine-grained authorization. This configuration enables Sentry using policy files. To enable Sentry using Sentry service instead, add Sentry service as a dependency to Impala service. <strong>Sentry service provides concurrent and secure access to authorization policy metadata and is the recommended option for enabling Sentry. </strong> Sentry is supported only on Impala 1.1 or later deployments.",
"display_name": "Enable Sentry Authorization using Policy Files",
"name": "sentry_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Impala Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "service_config_suppression_impalad_sentry_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Tracker parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Tracker",
"name": "service_config_suppression_navigator_event_tracker",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to Impala Daemon command-line flags. Applies to all roles in this service. Key names should begin with a hyphen(-). <strong>For example</strong>: -log_filename=foo.log",
"display_name": "Impala Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "impala_cmd_args_safety_valve",
"value": null
},
{
"desc": "Timeout in milliseconds for all HBase RPCs made by Impala. Overrides configuration in HBase service.",
"display_name": "HBase RPC Timeout",
"name": "hbase_rpc_timeout",
"value": "3000"
},
{
"desc": "Use debug build of Impala binaries when starting roles. Useful when performing diagnostic activities to get more information in the stacktrace or core dump.",
"display_name": "Use Debug Build",
"name": "use_debug_build",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Proxy User Configuration parameter.",
"display_name": "Suppress Parameter Validation: Proxy User Configuration",
"name": "service_config_suppression_impala_authorized_proxy_user_config",
"value": "false"
},
{
"desc": "Action to take when the audit event queue is full. Drop the event or shutdown the affected process.",
"display_name": "Audit Queue Policy",
"name": "navigator_audit_queue_policy",
"value": "DROP"
},
{
"desc": "Local path to the private key that matches the certificate specified in the Certificate for Clients. This file must be in PEM format, and is required if the SSL/TLS Certificate for Clients is supplied.",
"display_name": "SSL/TLS Private Key for Clients",
"name": "ssl_private_key",
"value": null
},
{
"desc": "Whether to suppress the results of the Impala Catalog Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Impala Catalog Server Health",
"name": "service_health_suppression_impala_catalogserver_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the LDAP Configuration Validator configuration validator.",
"display_name": "Suppress Configuration Validator: LDAP Configuration Validator",
"name": "service_config_suppression_impala_ldap_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala System Group (except Llama) parameter.",
"display_name": "Suppress Parameter Validation: Impala System Group (except Llama)",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Assignment Locality heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Assignment Locality",
"name": "service_health_suppression_impala_assignment_locality",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala Daemon Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala Daemon Count Validator",
"name": "service_config_suppression_impalad_count_validator",
"value": "false"
},
{
"desc": "Specifies the set of authorized proxy users (users who can impersonate other users during authorization) and whom they are allowed to impersonate. Input is a semicolon-separated list of key=value pairs of authorized proxy users to the user(s) they can impersonate. These users are specified as a comma separated list of short usernames, or '*' to indicate all users. For example: joe=alice,bob;hue=*;admin=*. Only valid when Sentry is enabled.",
"display_name": "Proxy User Configuration",
"name": "impala_authorized_proxy_user_config",
"value": "hue=*"
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala Catalog Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala Catalog Server Count Validator",
"name": "service_config_suppression_catalogserver_count_validator",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Configures the maximum number of concurrently running queries for admission control when using a single pool. -1 indicates no limit and 0 indicates all incoming requests will be rejected. Ignored when Dynamic Resource Pools for Admission Control is enabled.",
"display_name": "Single Pool Max Running Queries",
"name": "admission_control_single_pool_max_requests",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_impala_service_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Impala StateStore Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Impala StateStore Health",
"name": "service_health_suppression_impala_statestore_health",
"value": "false"
},
{
"desc": "Name of YARN service to use for resource management integration between Impala and YARN. This service dependency and the existence of a Llama role is required for using said integration.",
"display_name": "YARN Service for Resource Management",
"name": "yarn_service",
"value": null
},
{
"desc": "Name of the Sentry service that this Impala service instance depends on. If selected, Impala uses this Sentry service to look up authorization privileges. Before enabling Sentry, read the requirements and configuration steps in <a class=\"bold\" href=\"http://tiny.cloudera.com/sentry-service-cm5\" target=\"_blank\">Setting Up The Sentry Service<i class=\"externalLink\"></i></a>.",
"display_name": "Sentry Service",
"name": "sentry_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Client Advanced Configuration Snippet (Safety Valve) for navigator.lineage.client.properties parameter.",
"display_name": "Suppress Parameter Validation: Impala Client Advanced Configuration Snippet (Safety Valve) for navigator.lineage.client.properties",
"name": "service_config_suppression_navigator_lineage_client_config_safety_valve",
"value": "false"
},
{
"desc": "Time in seconds before Impala Daemon or Catalog Server times out with the StateStore.",
"display_name": "StateStoreSubscriber Timeout",
"name": "statestore_subscriber_timeout",
"value": "30"
},
{
"desc": "Configures the max memory of running queries for admission control when using a single pool. -1 or 0 indicates no limit. Ignored when Dynamic Resource Pools for Admission Control is enabled.",
"display_name": "Single Pool Mem Limit",
"name": "admission_control_single_pool_mem_limit",
"value": "-1"
},
{
"desc": "Maximum number of HBase client retries for Impala. Used as a maximum for all operations such as fetching of the root region from the root RegionServer, getting a cell's value, and starting a row update. Overrides configuration in the HBase service.",
"display_name": "Maximum HBase Client Retries",
"name": "hbase_client_retries_number",
"value": "3"
},
{
"desc": "Controls the aggregate metrics generated for Impala queries. The structure is a JSON list of the attributes to aggregate and the entities to aggregate to. For example, if the attributeName is 'hdfs_bytes_read' and the aggregationTargets is ['USER'] then the Service Monitor will create the metric 'impala_query_hdfs_bytes_read_rate' and, every ten minutes, will record the total hdfs bytes read for each user across all their Impala queries. By default it will also record the number of queries issues ('num_impala_queries_rate') for both users and pool. For a full list of the supported attributes see the Impala search page. Note that the valid aggregation targets are USER, YARN_POOL, and IMPALA (the service), and that these aggregate metrics can be viewed on both the reports and charts search pages.",
"display_name": "Impala Query Aggregates",
"name": "impala_query_aggregates",
"value": "[\n {\n \"attributeName\": \"hdfs_bytes_read\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"IMPALA\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"hdfs_bytes_written\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"IMPALA\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"thread_cpu_time\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"IMPALA\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"bytes_streamed\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"IMPALA\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"cm_cpu_milliseconds\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"IMPALA\", \"CLUSTER\"]\n },\n { \"attributeName\": \"query_duration\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"IMPALA\", \"CLUSTER\"]\n }\n]\n"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Query Aggregates parameter.",
"display_name": "Suppress Parameter Validation: Impala Query Aggregates",
"name": "service_config_suppression_impala_query_aggregates",
"value": "false"
},
{
"desc": "The health test thresholds of the overall Impala Llama ApplicationMaster health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Impala Llama ApplicationMasters falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Impala Llama ApplicationMasters falls below the critical threshold.",
"display_name": "Healthy Impala Llama ApplicationMaster Monitoring Thresholds",
"name": "impala_llamas_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala System User (except Llama) parameter.",
"display_name": "Suppress Parameter Validation: Impala System User (except Llama)",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Impala Daemon Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Impala Daemon Health",
"name": "service_health_suppression_impala_impalads_healthy",
"value": "false"
},
{
"desc": "Local path to the X509 certificate that will identify the Impala daemon to clients during SSL/TLS connections. This file must be in PEM format.",
"display_name": "SSL/TLS Certificate for Clients",
"name": "ssl_server_certificate",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SSL/TLS CA Certificate parameter.",
"display_name": "Suppress Parameter Validation: SSL/TLS CA Certificate",
"name": "service_config_suppression_ssl_client_ca_certificate",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SSL/TLS Private Key for Clients parameter.",
"display_name": "Suppress Parameter Validation: SSL/TLS Private Key for Clients",
"name": "service_config_suppression_ssl_private_key",
"value": "false"
},
{
"desc": "Enable collection of audit events from the service's roles.",
"display_name": "Enable Audit Collection",
"name": "navigator_audit_enabled",
"value": "true"
},
{
"desc": "A list specifying the rules to run to determine which Fair Scheduler configuration to use. Typically edited using the Rules configuration UI. This configuration only has effect on Impala versions 1.3 or greater.",
"display_name": "Fair Scheduler Configuration Rules",
"name": "impala_schedule_rules",
"value": "[]"
},
{
"desc": "When checked, LDAP-based authentication for users is enabled. Usernames and passwords are transmitted in the clear unless encryption is turned on. To encrypt the network traffic from the Impala daemon to the LDAP server, use either an ldaps:// URI or select 'Enable LDAP TLS'. To encrypt network traffic from clients to the Impala daemon, specify 'Enable TLS/SSL for Impala Client Services'.",
"display_name": "Enable LDAP Authentication",
"name": "enable_ldap_auth",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala StateStore Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala StateStore Count Validator",
"name": "service_config_suppression_statestore_count_validator",
"value": "false"
},
{
"desc": "Use Impala Admission Control to throttle Impala requests. Unless 'Enable Dynamic Resource Pools' is enabled, Impala uses a single, default pool that is configured using the Single Pool configurations below. These features are only supported in Impala 1.3 or higher deployments.",
"display_name": "Enable Impala Admission Control",
"name": "all_admission_control_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Domain parameter.",
"display_name": "Suppress Parameter Validation: LDAP Domain",
"name": "service_config_suppression_ldap_domain",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP BaseDN parameter.",
"display_name": "Suppress Parameter Validation: LDAP BaseDN",
"name": "service_config_suppression_ldap_basedn",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Filter parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Filter",
"name": "service_config_suppression_navigator_audit_event_filter",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>sentry-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Impala Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "impalad_sentry_safety_valve",
"value": null
},
{
"desc": "Configures the maximum number of queued queries for admission control when using a single pool. -1 or 0 disables queuing, i.e. incoming requests are rejected if they can not be executed immediately. Ignored when Dynamic Resource Pools for Admission Control is enabled.",
"display_name": "Single Pool Max Queued Queries",
"name": "admission_control_single_pool_max_queued",
"value": "200"
},
{
"desc": "The health test thresholds of the overall Impala Daemon health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Impala Daemons falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Impala Daemons falls below the critical threshold.",
"display_name": "Healthy Impala Daemon Monitoring Thresholds",
"name": "impala_impalads_healthy_thresholds",
"value": "{\"critical\":\"90.0\",\"warning\":\"95.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the Enable HDFS Block Metadata API Configuration Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Enable HDFS Block Metadata API Configuration Validator",
"name": "service_config_suppression_impala_hdfs_dfs_datanode_hdfs_blocks_metadata_enabled_set_validator",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Impala Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "impala_service_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala Command Line Argument Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_impala_cmd_args_safety_valve",
"value": "false"
},
{
"desc": "Number of minutes between reestablishing our ticket with the Kerberos server.",
"display_name": "Kerberos Re-init Interval",
"name": "kerberos_reinit_interval",
"value": "60"
},
{
"desc": "If true, attempts to establish a TLS (Transport Layer Security) connection with the LDAP server. Only supported in Impala 1.4 or CDH 5.1 or higher. Not required when using an LDAP URI with prefix ldaps://, because that already specifies TLS.",
"display_name": "Enable LDAP TLS",
"name": "enable_ldap_tls",
"value": "false"
},
{
"desc": "When set, this parameter allows arbitrary mapping from usernames into a Distinguished Name (DN). The string specified must have a placeholder named \"#UID\" inside it, and that #UID is replaced with the username. For example, you could mimic the behavior of LDAP BaseDN by specifying \"uid=#UID,ou=People,dc=cloudera,dc=com\". When the username of \"mike\" comes in, it replaces the #UID and the result is \"uid=mike,ou=People,dc=cloudera,dc=com\". This option should be used when more control over the DN is needed. This parameter is mutually exclusive with LDAP Domain and LDAP BaseDN.",
"display_name": "LDAP Pattern",
"name": "ldap_bind_pattern",
"value": null
},
{
"desc": "The URI of the LDAP server to use if LDAP is enabled. The URI must be prefixed with ldap:// or ldaps://. The URI can optionally specify the port, for example: ldap://ldap_server.example.com:389. ldaps:// is only supported in Impala 1.4 or CDH 5.1 or higher, and usually requires that you specify a port.",
"display_name": "LDAP URI",
"name": "impala_ldap_uri",
"value": null
},
{
"desc": "The health test thresholds for the assignment locality health test. Specified as a percentage of total assignments.",
"display_name": "Assignment Locality Ratio Thresholds",
"name": "impala_assignment_locality_thresholds",
"value": "{\"critical\":\"5.0\",\"warning\":\"80.0\"}"
},
{
"desc": "Controls which queries a non-admin user can see in the queries list view",
"display_name": "Non-Admin Users Query List Visibility Settings",
"name": "user_query_list_settings",
"value": "ALL"
},
{
"desc": "The location on disk of the certificate, in PEM format, used to confirm the authenticity of SSL/TLS servers that the Impala daemons might connect to.",
"display_name": "SSL/TLS CA Certificate",
"name": "ssl_client_ca_certificate",
"value": null
},
{
"desc": "Used to generate a core dump to get more information about an Impala crash. Unless otherwise configured systemwide using /proc/sys/kernel/core_pattern, the dump is generated in the 'current directory' of the Impala process (usually a subdirectory of the /var/run/cloudera-scm-agent/process directory). The core file can be very large.",
"display_name": "Enable Core Dump",
"name": "enable_core_dump",
"value": "false"
},
{
"desc": "Enable HDFS short-circuit read. This allows a client colocated with the DataNode to read HDFS file blocks directly. This gives a performance boost to distributed clients that are aware of locality.",
"display_name": "Enable HDFS Short-Circuit Read",
"name": "dfs_client_read_shortcircuit",
"value": "true"
},
{
"desc": "Password protecting the SSL/TLS Private Key for Clients. Leave blank if there is no password.",
"display_name": "SSL/TLS Private Key Password for Clients",
"name": "ssl_private_key_password",
"value": null
},
{
"desc": "The minimum number of assignments that must occur during the test time period before the threshold values will be checked. Until this number of assignments have been observed in the test time period the health test will be disabled.",
"display_name": "Assignment Locality Minimum Assignments",
"name": "impala_assignment_locality_minimum",
"value": "10"
},
{
"desc": "Enable collection of lineage from the service's roles.",
"display_name": "Enable Lineage Collection",
"name": "navigator_lineage_enabled",
"value": "true"
},
{
"desc": "JSON representation of all the configurations that the Fair Scheduler can take on across all schedules. Typically edited using the Pools configuration UI. This configuration only has effect on Impala versions 1.3 or greater.",
"display_name": "Fair Scheduler Allocations",
"name": "impala_scheduled_allocations",
"value": "{\"defaultMinSharePreemptionTimeout\":null,\"defaultQueueSchedulingPolicy\":null,\"fairSharePreemptionTimeout\":null,\"queueMaxAMShareDefault\":null,\"queueMaxAppsDefault\":null,\"queuePlacementRules\":null,\"queues\":[{\"aclAdministerApps\":null,\"aclSubmitApps\":null,\"minSharePreemptionTimeout\":null,\"name\":\"root\",\"queues\":[{\"aclAdministerApps\":null,\"aclSubmitApps\":null,\"minSharePreemptionTimeout\":null,\"name\":\"default\",\"queues\":[],\"schedulablePropertiesList\":[{\"impalaMaxMemory\":null,\"impalaMaxQueuedQueries\":null,\"impalaMaxRunningQueries\":null,\"maxAMShare\":null,\"maxResources\":null,\"maxRunningApps\":null,\"minResources\":null,\"scheduleName\":\"default\",\"weight\":null}],\"schedulingPolicy\":null}],\"schedulablePropertiesList\":[{\"impalaMaxMemory\":null,\"impalaMaxQueuedQueries\":null,\"impalaMaxRunningQueries\":null,\"maxAMShare\":null,\"maxResources\":null,\"maxRunningApps\":null,\"minResources\":null,\"scheduleName\":\"default\",\"weight\":null}],\"schedulingPolicy\":null}],\"userMaxAppsDefault\":null,\"users\":[]}"
},
{
"desc": "When computing the overall IMPALA health, consider Impala Catalog Server's health",
"display_name": "Impala Catalog Server Role Health Test",
"name": "impala_catalogserver_health_enabled",
"value": "true"
},
{
"desc": "Name of the HBase service that this Impala service instance depends on",
"display_name": "HBase Service",
"name": "hbase_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the Short-Circuit Read Permissions Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Short-Circuit Read Permissions Validator",
"name": "service_config_suppression_short_circuit_reads_data_directory_permissions_validator",
"value": "false"
},
{
"desc": "<p>\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n</p>\n\n<p>\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n</p>\n\n<p>Event trackers are defined in a JSON object like the following:</p>\n\n<pre>\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n</pre>\n\n<p>\nWhere:\n</p>\n\n<ul>\n <li>timeToLive: maximum amount of time an event will be tracked, in\n milliseconds. Must be provided. This defines how long, since it's\n first seen, an event will be tracked. A value of 0 disables tracking.</li>\n\n <li>fields: list of fields to compare when matching events against\n tracked events.</li>\n</ul>\n\n<p>\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n</p>\n\n<ul>\n <li>value: uses the field value for comparison.</li>\n\n <li>userName: treats the field value as a userName, and ignores any\n host-specific data. This is useful for environment using Kerberos,\n so that only the principal name and realm are compared.</li>\n</ul>\n\n<p>\nThe following is the list of fields that can be used to compare Impala events:\n</p>\n\n<ul>\n <li>operation: the Impala operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n <li>databaseName: the database affected by the operation.</li>\n <li>tableName: the table affected by the operation.</li>\n <li>objectType: the type of object affected by the operation.</li>\n <li>privilege: the privilege associated with the operation.</li>\n</ul>\n\n",
"display_name": "Audit Event Tracker",
"name": "navigator_event_tracker",
"value": null
},
{
"desc": "Name of the Hive service that this Impala service instance depends on",
"display_name": "Hive Service",
"name": "hive_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala Llama ApplicationMaster Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala Llama ApplicationMaster Count Validator",
"name": "service_config_suppression_llama_count_validator",
"value": "false"
},
{
"desc": "When set, this parameter is used to convert the username into the LDAP Distinguished Name (DN), so that the resulting DN looks like uid=username,X. For example, if this parameter is set to \"ou=People,dc=cloudera,dc=com\", and the username passed in is \"mike\", the resulting authentication passed to the LDAP server looks like \"uid=mike,ou=People,dc=cloudera,dc=com\". This parameter is frequently useful when authenticating against an OpenLDAP server. This parameter is mutually exclusive with LDAP Domain and LDAP Pattern.",
"display_name": "LDAP BaseDN",
"name": "ldap_baseDN",
"value": null
},
{
"desc": "Name of the ZooKeeper service to use for leader election and fencing when Llama is configured for high availability. This service dependency is required when more than one Llama role is present.",
"display_name": "ZooKeeper Service for Llama HA",
"name": "zookeeper_service",
"value": null
},
{
"desc": "The time period over which to compute the assignment locality ratio. Specified in minutes.",
"display_name": "Assignment Locality Monitoring Period",
"name": "impala_assignment_locality_window",
"value": "15"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "impala"
},
{
"desc": "The group that this Impala's processes should run as (except Llama, which has its own group).",
"display_name": "Impala System Group (except Llama)",
"name": "process_groupname",
"value": "impala"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SSL/TLS Certificate for Clients parameter.",
"display_name": "Suppress Parameter Validation: SSL/TLS Certificate for Clients",
"name": "service_config_suppression_ssl_server_certificate",
"value": "false"
},
{
"desc": "Controls which queries admin users can see in the queries list view",
"display_name": "Admin Users Query List Visibility Settings",
"name": "admin_query_list_settings",
"value": "ALL"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Encrypt communication between clients (like ODBC, JDBC, and the Impala shell) and the Impala daemon using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Impala Client Services",
"name": "client_services_ssl_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SSL/TLS Private Key Password for Clients parameter.",
"display_name": "Suppress Parameter Validation: SSL/TLS Private Key Password for Clients",
"name": "service_config_suppression_ssl_private_key_password",
"value": "false"
},
{
"desc": "When computing the overall IMPALA health, consider Impala StateStore's health",
"display_name": "Impala StateStore Role Health Test",
"name": "impala_statestore_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP URI parameter.",
"display_name": "Suppress Parameter Validation: LDAP URI",
"name": "service_config_suppression_impala_ldap_uri",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.client.properties</strong>.",
"display_name": "Impala Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "navigator_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala Sentry Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala Sentry Validator",
"name": "service_config_suppression_impala_sentry_validator",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.lineage.client.properties</strong>.",
"display_name": "Impala Client Advanced Configuration Snippet (Safety Valve) for navigator.lineage.client.properties",
"name": "navigator_lineage_client_config_safety_valve",
"value": null
}
]

View File

@ -1,350 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SSL/TLS Certificate for Statestore Webserver parameter.",
"display_name": "Suppress Parameter Validation: SSL/TLS Certificate for Statestore Webserver",
"name": "role_config_suppression_webserver_certificate_file",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_statestore_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "statestore_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.",
"display_name": "Health Check Startup Tolerance",
"name": "statestore_startup_tolerance",
"value": "5"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be added (verbatim) to StateStore command line flags.",
"display_name": "Statestore Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "statestore_cmd_args_safety_valve",
"value": null
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "statestore_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; 1 means buffer WARNING only, ...)",
"display_name": "StateStore Log Buffer Level",
"name": "logbuflevel",
"value": "0"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Impala StateStore Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Impala StateStore Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_statestore_role_env_safety_valve",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "When computing the overall Impala StateStore health, consider the host's health.",
"display_name": "Impala StateStore Host Health Test",
"name": "statestore_host_health_enabled",
"value": "true"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_statestore_scm_health",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Username for Statestore webserver authentication.",
"display_name": "Statestore Web Server Username",
"name": "webserver_htpassword_user",
"value": null
},
{
"desc": "Password for Statestore webserver authentication.",
"display_name": "Statestore Web Server User Password",
"name": "webserver_htpassword_password",
"value": null
},
{
"desc": "Maximum number of tasks allowed to be pending at the thread manager underlying the StateStore Thrift server (0 allows infinitely many pending tasks)",
"display_name": "Maximum StateStore Pending Tasks",
"name": "state_store_pending_task_count_max",
"value": "0"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_statestore_web_metric_collection",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Port where StateStore debug web server runs.",
"display_name": "StateStore HTTP Server Port",
"name": "statestore_webserver_port",
"value": "25010"
},
{
"desc": "Directory where StateStore will place its log files.",
"display_name": "StateStore Log Directory",
"name": "log_dir",
"value": "/var/log/statestore"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_statestore_host_health",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_statestore_unexpected_exits",
"value": "false"
},
{
"desc": "Port where StateStoreService is exported.",
"display_name": "StateStore Service Port",
"name": "state_store_port",
"value": "24000"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Statestore Command Line Argument Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Statestore Command Line Argument Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_statestore_cmd_args_safety_valve",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the StateStore Core Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: StateStore Core Dump Directory",
"name": "role_config_suppression_core_dump_dir",
"value": "false"
},
{
"desc": "Number of worker threads for the thread manager underlying the StateStore Thrift server.",
"display_name": "StateStore Worker Threads",
"name": "state_store_num_server_worker_threads",
"value": "4"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the StateStore Log Directory parameter.",
"display_name": "Suppress Parameter Validation: StateStore Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Resident Set Size heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Resident Set Size",
"name": "role_health_suppression_statestore_memory_rss_health",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Statestore Web Server Username parameter.",
"display_name": "Suppress Parameter Validation: Statestore Web Server Username",
"name": "role_config_suppression_webserver_htpassword_user",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_statestore_file_descriptor",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Verbose logging level for the GLog logger. These messages are always logged at 'INFO' log level, so this setting has no effect if Logging Threshold is set to 'WARN' or above. ",
"display_name": "StateStore Verbose Log Level",
"name": "log_verbose_level",
"value": "1"
},
{
"desc": "The health test thresholds on the resident size of the process.",
"display_name": "Resident Set Size Thresholds",
"name": "process_resident_set_size_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The maximum size, in megabytes, per log file for Impala StateStore logs. Typically used by log4j or logback.",
"display_name": "Impala StateStore Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Enables the health test that the Impala StateStore's process state is consistent with the role configuration",
"display_name": "Impala StateStore Process Health Test",
"name": "statestore_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_statestore_log_directory_free_space",
"value": "false"
},
{
"desc": "Directory where a StateStore core dump is placed.",
"display_name": "StateStore Core Dump Directory",
"name": "core_dump_dir",
"value": "/var/log/statestore"
},
{
"desc": "Local path to the certificate presented by the StateStore debug webserver. This file must be in .pem format. If empty, webserver SSL/TLS support is not enabled.",
"display_name": "SSL/TLS Certificate for Statestore Webserver",
"name": "webserver_certificate_file",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Statestore Web Server User Password parameter.",
"display_name": "Suppress Parameter Validation: Statestore Web Server User Password",
"name": "role_config_suppression_webserver_htpassword_password",
"value": "false"
},
{
"desc": "Enable/Disable StateStore web server. This web server contains useful information about StateStore daemon.",
"display_name": "Enable StateStore Web Server",
"name": "statestore_enable_webserver",
"value": "true"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Impala StateStore Environment Advanced Configuration Snippet (Safety Valve)",
"name": "STATESTORE_role_env_safety_valve",
"value": null
},
{
"desc": "The minimum log level for Impala StateStore logs",
"display_name": "Impala StateStore Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The number of log files that are kept for each severity level before all older log files are removed. The number has to be greater than 1 to keep at least the current log file open. If set to 0, all log files are retained and log rotation is effectively disabled.",
"display_name": "StateStore Maximum Log Files",
"name": "max_log_files",
"value": "10"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "statestore_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_statestore_swap_memory_usage",
"value": "false"
}
]

View File

@ -1,476 +0,0 @@
[
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "The port to give out to producers, consumers, and other brokers to use in establishing connections. This only needs to be set if this port is different from the port the server should bind to.",
"display_name": "Advertised Port",
"name": "advertised.port",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Additional Broker Java Options parameter.",
"display_name": "Suppress Parameter Validation: Additional Broker Java Options",
"name": "role_config_suppression_broker_java_opts",
"value": "false"
},
{
"desc": "If set, this is the hostname given out to producers, consumers, and other brokers to use in establishing connections. Never set this property at the group level; it should always be overriden on instance level.",
"display_name": "Advertised Host",
"name": "advertised.host.name",
"value": null
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HTTP Metric Report Host parameter.",
"display_name": "Suppress Parameter Validation: HTTP Metric Report Host",
"name": "role_config_suppression_kafka.http.metrics.host",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kafka_kafka_broker_scm_health",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Kafka Broker might connect to. This is used when Kafka Broker is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "ssl_client_truststore_location",
"value": ""
},
{
"desc": "Port the HTTP metric reporter listens on.",
"display_name": "HTTP Metric Report Port",
"name": "kafka.http.metrics.port",
"value": "24042"
},
{
"desc": "Kafka broker port.",
"display_name": "TCP Port",
"name": "port",
"value": "9092"
},
{
"desc": "The log for a topic partition is stored as a directory of segment files. This setting controls the size to which a segment file can grow before a new segment is rolled over in the log. This value should be larger than message.max.bytes.",
"display_name": "Segment File Size",
"name": "log.segment.bytes",
"value": "1073741824"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka-monitoring.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "kafka-monitoring.properties_role_safety_valve",
"value": null
},
{
"desc": "The frequency, in milliseconds, that the log cleaner checks whether any log segment is eligible for deletion, per retention policies.",
"display_name": "Data Retention Check Interval",
"name": "log.retention.check.interval.ms",
"value": "300000"
},
{
"desc": "The maximum number of rolled log files to keep for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Advertised Host parameter.",
"display_name": "Suppress Parameter Validation: Advertised Host",
"name": "role_config_suppression_advertised.host.name",
"value": "false"
},
{
"desc": "The minimum log level for Kafka Broker logs",
"display_name": "Kafka Broker Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "When computing the overall Kafka Broker health, consider the host's health.",
"display_name": "Kafka Broker Host Health Test",
"name": "kafka_broker_host_health_enabled",
"value": "true"
},
{
"desc": "Maximum size for the Java process heap memory. Passed to Java -Xmx. Measured in megabytes. Kafka does not generally require setting large heap sizes. It is better to let the file system cache utilize the available memory.",
"display_name": "Java Heap Size of Broker",
"name": "broker_max_heap_size",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kafka_kafka_broker_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "The number of I/O threads that the server uses for executing requests. You should have at least as many threads as you have disks.",
"display_name": "Number of I/O Threads",
"name": "num.io.threads",
"value": "8"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "Protocol to be used for inter-broker communication.",
"display_name": "Inter Broker Protocol",
"name": "security.inter.broker.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "role_config_suppression_ssl.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "kafka.properties_role_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). Secondary to the log.retention.ms property. The special value of -1 is interpreted as unlimited. This property is deprecated in Kafka 1.4.0. Use log.retention.ms.",
"display_name": "Data Retention Hours",
"name": "log.retention.hours",
"value": "168"
},
{
"desc": "Kafka broker secure port.",
"display_name": "TLS/SSL Port",
"name": "ssl_port",
"value": "9093"
},
{
"desc": "The amount of data to retain in the log for each topic-partition. This is the limit per partition: multiply by the number of partitions to get the total data retained for the topic. The special value of -1 is interpreted as unlimited. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded.",
"display_name": "Data Retention Size",
"name": "log.retention.bytes",
"value": "-1"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Host the HTTP metric reporter binds to.",
"display_name": "HTTP Metric Report Host",
"name": "kafka.http.metrics.host",
"value": "0.0.0.0"
},
{
"desc": "The maximum size, in megabytes, per log file for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "role_config_suppression_kafka-monitoring.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kafka_broker_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_BROKER_role_env_safety_valve",
"value": null
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Port for JMX.",
"display_name": "JMX Port",
"name": "jmx_port",
"value": "9393"
},
{
"desc": "The log directory for log files of the role Kafka Broker.",
"display_name": "Kafka Broker Log Directory",
"name": "log_dir",
"value": "/var/log/kafka"
},
{
"desc": "The maximum time before a new log segment is rolled out. This property is used in Cloudera Kafka 1.4.0 and later in place of log.roll.hours.",
"display_name": "Data Log Roll Time",
"name": "log.roll.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "role_config_suppression_kafka.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when Kafka Broker is acting as a TLS/SSL server.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": ""
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kafka_kafka_broker_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Data Directories parameter.",
"display_name": "Suppress Parameter Validation: Data Directories",
"name": "role_config_suppression_log.dirs",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded. The special value of -1 is interpreted as unlimited. This property is used in Kafka 1.4.0 and later in place of log.retention.hours.",
"display_name": "Data Retention Time",
"name": "log.retention.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). This property is deprecated in Cloudera Kafka 1.4.0; use log.roll.ms.",
"display_name": "Data Log Roll Hours",
"name": "log.roll.hours",
"value": "168"
},
{
"desc": "These arguments are passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags are passed here.",
"display_name": "Additional Broker Java Options",
"name": "broker_java_opts",
"value": "-server -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Kafka Broker is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kafka_kafka_broker_host_health",
"value": "false"
},
{
"desc": "A list of one or more directories in which Kafka data is stored. Each new partition created is placed in the directory that currently has the fewest partitions. Each directory should be on its own separate drive.",
"display_name": "Data Directories",
"name": "log.dirs",
"value": "/var/local/kafka/data"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Segment File Size parameter.",
"display_name": "Suppress Parameter Validation: Segment File Size",
"name": "role_config_suppression_log.segment.bytes",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Broker ID parameter.",
"display_name": "Suppress Parameter Validation: Broker ID",
"name": "role_config_suppression_broker.id",
"value": "false"
},
{
"desc": "The password for the Kafka Broker JKS keystore file.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kafka_kafka_broker_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kafka_broker_role_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "ssl.properties_role_safety_valve",
"value": null
},
{
"desc": "Encrypt communication between clients and Kafka Broker using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Kafka Broker",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "ID uniquely identifying each broker. Never set this property at the group level; it should always be overridden on instance level.",
"display_name": "Broker ID",
"name": "broker.id",
"value": null
},
{
"desc": "Maximum number of connections allowed from each IP address.",
"display_name": "Maximum Connections per IP Address",
"name": "max.connections.per.ip",
"value": null
},
{
"desc": "Enables the health test that the Kafka Broker's process state is consistent with the role configuration",
"display_name": "Kafka Broker Process Health Test",
"name": "kafka_broker_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of I/O Threads parameter.",
"display_name": "Suppress Parameter Validation: Number of I/O Threads",
"name": "role_config_suppression_num.io.threads",
"value": "false"
},
{
"desc": "The password for the Kafka Broker TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "ssl_client_truststore_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Heap Size of Broker parameter.",
"display_name": "Suppress Parameter Validation: Java Heap Size of Broker",
"name": "role_config_suppression_broker_max_heap_size",
"value": "false"
},
{
"desc": "Client authentication mode for SSL connections. Default is none, could be set to \"required\", i.e., client authentication is required or to \"requested\", i.e., client authentication is requested and client without certificates can still connect.",
"display_name": "SSL Client Authentication",
"name": "ssl.client.auth",
"value": "none"
},
{
"desc": "Authenticate a SASL connection with zookeeper, if Kerberos authentication is enabled. It also allows a broker to set SASL ACL on zookeeper nodes which locks these nodes down so that only kafka broker can modify.",
"display_name": "Authenticate Zookeeper Connection",
"name": "authenticate.zookeeper.connection",
"value": "true"
}
]

View File

@ -1,482 +0,0 @@
[
{
"desc": "The maximum size, in megabytes, per log file for Kafka MirrorMaker logs. Typically used by log4j or logback.",
"display_name": "Kafka MirrorMaker Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Stop the entire mirror maker when a send failure occurs.",
"display_name": "Abort on Send Failure",
"name": "abort.on.send.failure",
"value": "true"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "Maximum number of bytes that can be buffered between producer and consumer. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Queue Size",
"name": "queue.byte.size",
"value": "100000000"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kafka_kafka_mirror_maker_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kafka_mirror_maker_role_env_safety_valve",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Only required if source Kafka cluster requires client authentication.",
"display_name": "Source Kafka Cluster's Client Auth",
"name": "source.ssl.client.auth",
"value": "false"
},
{
"desc": "When computing the overall Kafka MirrorMaker health, consider the host's health.",
"display_name": "Kafka MirrorMaker Host Health Test",
"name": "kafka_mirror_maker_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties",
"name": "role_config_suppression_ssl_client.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for Kafka MirrorMaker logs. Typically used by log4j or logback.",
"display_name": "Kafka MirrorMaker Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Topic Blacklist parameter.",
"display_name": "Suppress Parameter Validation: Topic Blacklist",
"name": "role_config_suppression_blacklist",
"value": "false"
},
{
"desc": "The minimum log level for Kafka MirrorMaker logs",
"display_name": "Kafka MirrorMaker Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Offset commit interval in milliseconds.",
"display_name": "Offset Commit Interval",
"name": "offset.commit.interval.ms",
"value": "60000"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mirror_maker_producers.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties",
"name": "mirror_maker_producers.properties_role_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kafka_kafka_mirror_maker_host_health",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kafka_mirror_maker_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties",
"name": "role_config_suppression_ssl_server.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "The password for the Kafka MirrorMaker JKS keystore file.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Destination Broker List parameter.",
"display_name": "Suppress Parameter Validation: Destination Broker List",
"name": "role_config_suppression_bootstrap.servers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "Number of producer instances. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Number of Producers",
"name": "num.producers",
"value": "1"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl_client.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_client.properties",
"name": "ssl_client.properties_role_safety_valve",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kafka MirrorMaker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_MIRROR_MAKER_role_env_safety_valve",
"value": null
},
{
"desc": "Name of the consumer group used by MirrorMaker. When multiple role instances are configured with the same topics and same group ID, the role instances load-balance replication for the topics. When multiple role instances are configured with the same topics but different group ID, each role instance replicates all the events for those topics - this can be used to replicate the source cluster into multiple destination clusters.",
"display_name": "Consumer Group ID",
"name": "group.id",
"value": "cloudera_mirrormaker"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kafka_kafka_mirror_maker_swap_memory_usage",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Consumer Group ID parameter.",
"display_name": "Suppress Parameter Validation: Consumer Group ID",
"name": "role_config_suppression_group.id",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Consumer Rebalance Listener Arguments parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Consumer Rebalance Listener Arguments",
"name": "role_config_suppression_consumer.rebalance.listener.args",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Kafka MirrorMaker might connect to. This is used when Kafka MirrorMaker is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Kafka MirrorMaker TLS/SSL Certificate Trust Store File",
"name": "ssl_client_truststore_location",
"value": ""
},
{
"desc": "Enables the health test that the Kafka MirrorMaker's process state is consistent with the role configuration",
"display_name": "Kafka MirrorMaker Process Health Test",
"name": "kafka_mirror_maker_scm_health_enabled",
"value": "true"
},
{
"desc": "Number of messages that are buffered between producer and consumer. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Message Queue Size",
"name": "queue.size",
"value": "10000"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "A consumer rebalance listener of type ConsumerRebalanceListener to be invoked when MirrorMaker's consumer rebalances.",
"display_name": "MirrorMaker Consumer Rebalance Listener",
"name": "consumer.rebalance.listener",
"value": ""
},
{
"desc": "Run with MirrorMaker settings that eliminate potential loss of data. This impacts performance, but is highly recommended. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Avoid Data Loss",
"name": "no.data.loss",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_producers.properties",
"name": "role_config_suppression_mirror_maker_producers.properties_role_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mirror_maker_consumers.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties",
"name": "mirror_maker_consumers.properties_role_safety_valve",
"value": null
},
{
"desc": "Protocol to be used for communication with source kafka cluster.",
"display_name": "Source Kafka Cluster's Security Protocol",
"name": "source.security.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Consumer Rebalance Listener parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Consumer Rebalance Listener",
"name": "role_config_suppression_consumer.rebalance.listener",
"value": "false"
},
{
"desc": "Protocol to be used for communication with destination kafka cluster.",
"display_name": "Destination Kafka Cluster's Security Protocol",
"name": "destination.security.protocol",
"value": "PLAINTEXT"
},
{
"desc": "Regular expression that represents a set of topics to mirror. Note that whitelist and blacklist parameters are mutually exclusive. If both are defined, only the whilelist is used.",
"display_name": "Topic Whitelist",
"name": "whitelist",
"value": ""
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kafka_kafka_mirror_maker_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Topic Whitelist parameter.",
"display_name": "Suppress Parameter Validation: Topic Whitelist",
"name": "role_config_suppression_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "Port for JMX.",
"display_name": "JMX Port",
"name": "jmx_port",
"value": "9394"
},
{
"desc": "Arguments used by MirrorMaker message handler.",
"display_name": "MirrorMaker Message Handler Arguments",
"name": "message.handler.args",
"value": ""
},
{
"desc": "List of brokers on destination cluster. This should be more than one, for high availability, but there's no need to list all brokers.",
"display_name": "Destination Broker List",
"name": "bootstrap.servers",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Source Broker List parameter.",
"display_name": "Suppress Parameter Validation: Source Broker List",
"name": "role_config_suppression_source.bootstrap.servers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kafka_kafka_mirror_maker_file_descriptor",
"value": "false"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Kafka MirrorMaker is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Message Handler parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Message Handler",
"name": "role_config_suppression_message.handler",
"value": "false"
},
{
"desc": "Only required if destination Kafka cluster requires client authentication.",
"display_name": "Destination Kafka Cluster's Client Auth",
"name": "destination.ssl.client.auth",
"value": "false"
},
{
"desc": "A MirrorMaker message handler of type MirrorMakerMessageHandler that will process every record in-between producer and consumer.",
"display_name": "MirrorMaker Message Handler",
"name": "message.handler",
"value": ""
},
{
"desc": "List of brokers on source cluster. This should be more than one, for high availability, but there's no need to list all brokers.",
"display_name": "Source Broker List",
"name": "source.bootstrap.servers",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for mirror_maker_consumers.properties",
"name": "role_config_suppression_mirror_maker_consumers.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl_server.properties</strong> for this role only.",
"display_name": "Kafka MirrorMaker Advanced Configuration Snippet (Safety Valve) for ssl_server.properties",
"name": "ssl_server.properties_role_safety_valve",
"value": null
},
{
"desc": "Number of consumer threads.",
"display_name": "Number of Consumer Threads",
"name": "num.streams",
"value": "1"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when Kafka MirrorMaker is acting as a TLS/SSL server.",
"display_name": "Kafka MirrorMaker TLS/SSL Server JKS Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": ""
},
{
"desc": "Encrypt communication between clients and Kafka MirrorMaker using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Kafka MirrorMaker",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "Regular expression that represents a set of topics to avoid mirroring. Note that whitelist and blacklist parameters are mutually exclusive. If both are defined, only the whilelist is used. WARNING: Does not work with Kafka 2.0 or later.",
"display_name": "Topic Blacklist",
"name": "blacklist",
"value": ""
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Arguments used by MirrorMaker consumer rebalance listener.",
"display_name": "MirrorMaker Consumer Rebalance Listener Arguments",
"name": "consumer.rebalance.listener.args",
"value": ""
},
{
"desc": "The log directory for log files of the role Kafka MirrorMaker.",
"display_name": "Kafka MirrorMaker Log Directory",
"name": "log_dir",
"value": "/var/log/kafka"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MirrorMaker Message Handler Arguments parameter.",
"display_name": "Suppress Parameter Validation: MirrorMaker Message Handler Arguments",
"name": "role_config_suppression_message.handler.args",
"value": "false"
},
{
"desc": "The password for the Kafka MirrorMaker TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Kafka MirrorMaker TLS/SSL Certificate Trust Store Password",
"name": "ssl_client_truststore_password",
"value": ""
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
}
]

View File

@ -1,374 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Controlled Shutdown Maximum Attempts parameter.",
"display_name": "Suppress Parameter Validation: Controlled Shutdown Maximum Attempts",
"name": "service_config_suppression_controlled.shutdown.max.retries",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_kafka_service_env_safety_valve",
"value": "false"
},
{
"desc": "The default number of partitions for automatically created topics.",
"display_name": "Default Number of Partitions",
"name": "num.partitions",
"value": "1"
},
{
"desc": "The amount of time to retain delete messages for log compacted topics. Once a consumer has seen an original message you need to ensure it also sees the delete message. If you removed the delete message too quickly, this might not happen. As a result there is a configurable delete retention time.",
"display_name": "Log Compaction Delete Record Retention Time",
"name": "log.cleaner.delete.retention.ms",
"value": "604800000"
},
{
"desc": "Enables auto creation of topics on the server. If this is set to true, then attempts to produce, consume, or fetch metadata for a non-existent topic automatically create the topic with the default replication factor and number of partitions.",
"display_name": "Topic Auto Creation",
"name": "auto.create.topics.enable",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Number of threads used to replicate messages from leaders. Increasing this value increases the degree of I/O parallelism in the follower broker.",
"display_name": "Number of Replica Fetchers",
"name": "num.replica.fetchers",
"value": "1"
},
{
"desc": "Enables Kafka monitoring.",
"display_name": "Enable Kafka Monitoring (Note: Requires Kafka-1.3.0 parcel or higher)",
"name": "monitoring.enabled",
"value": "true"
},
{
"desc": "If automatic leader rebalancing is enabled, the controller tries to balance leadership for partitions among the brokers by periodically returning leadership for each partition to the preferred replica, if it is available.",
"display_name": "Enable Automatic Leader Rebalancing",
"name": "auto.leader.rebalance.enable",
"value": "true"
},
{
"desc": "Number of unsuccessful controlled shutdown attempts before executing an unclean shutdown. For example, the default value of 3 means that the system will attempt a controlled shutdown 3 times before executing an unclean shutdown.",
"display_name": "Controlled Shutdown Maximum Attempts",
"name": "controlled.shutdown.max.retries",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "The number of partitions for the offset commit topic. Since changing this after deployment is currently unsupported, we recommend using a higher setting for production (for example, 100-200).",
"display_name": "Offset Commit Topic Number of Partitions",
"name": "offsets.topic.num.partitions",
"value": "50"
},
{
"desc": "If a follower has not sent any fetch requests, nor has it consumed up to the leader's log end offset during this time, the leader removes the follower from the ISR set.",
"display_name": "Allowed Replica Time Lag",
"name": "replica.lag.time.max.ms",
"value": "10000"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "kafka"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Replica Maximum Fetch Size parameter.",
"display_name": "Suppress Parameter Validation: Replica Maximum Fetch Size",
"name": "service_config_suppression_replica.fetch.max.bytes",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "The replication factor for the offset commit topic. A higher setting is recommended in order to ensure higher availability (for example, 3 or 4) . If the offsets topic is created when there are fewer brokers than the replication factor, then the offsets topic is created with fewer replicas.",
"display_name": "Offset Commit Topic Replication Factor",
"name": "offsets.topic.replication.factor",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka Broker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka Broker Count Validator",
"name": "service_config_suppression_kafka_broker_count_validator",
"value": "false"
},
{
"desc": "Controls how frequently the log cleaner will attempt to clean the log. This ratio bounds the maximum space wasted in the log by duplicates. For example, at 0.5 at most 50% of the log could be duplicates. A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log.",
"display_name": "Log Cleaner Clean Ratio",
"name": "log.cleaner.min.cleanable.ratio",
"value": "0.5"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "kafka"
},
{
"desc": "Enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so might result in data loss.",
"display_name": "Enable Unclean Leader Election",
"name": "unclean.leader.election.enable",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Default Replication Factor",
"name": "service_config_suppression_default.replication.factor",
"value": "false"
},
{
"desc": "Enables controlled shutdown of the broker. If enabled, the broker moves all leaders on it to other brokers before shutting itself down. This reduces the unavailability window during shutdown.",
"display_name": "Enable Controlled Shutdown",
"name": "controlled.shutdown.enable",
"value": "true"
},
{
"desc": "The frequency with which to check for leader imbalance.",
"display_name": "Leader Imbalance Check Interval",
"name": "leader.imbalance.check.interval.seconds",
"value": "300"
},
{
"desc": "The maximum number of bytes to fetch for each partition in fetch requests replicas send to the leader. This value should be larger than message.max.bytes.",
"display_name": "Replica Maximum Fetch Size",
"name": "replica.fetch.max.bytes",
"value": "1048576"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Session Timeout parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Session Timeout",
"name": "service_config_suppression_zookeeper.session.timeout.ms",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of Replica Fetchers parameter.",
"display_name": "Suppress Parameter Validation: Number of Replica Fetchers",
"name": "service_config_suppression_num.replica.fetchers",
"value": "false"
},
{
"desc": "Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Producer Quota",
"name": "quota.producer.default",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_service_env_safety_valve",
"value": null
},
{
"desc": "The maximum size of a message that the server can receive. It is important that this property be in sync with the maximum fetch size the consumers use, or else an unruly producer could publish messages too large for consumers to consume.",
"display_name": "Maximum Message Size",
"name": "message.max.bytes",
"value": "1000000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Allowed Per Broker parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Allowed Per Broker",
"name": "service_config_suppression_leader.imbalance.per.broker.percentage",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Enables the log cleaner to compact topics with cleanup.policy=compact on this cluster.",
"display_name": "Enable Log Compaction",
"name": "log.cleaner.enable",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Minimum Number of Replicas in ISR parameter.",
"display_name": "Suppress Parameter Validation: Minimum Number of Replicas in ISR",
"name": "service_config_suppression_min.insync.replicas",
"value": "false"
},
{
"desc": "ZNode in ZooKeeper that should be used as a root for this Kafka cluster.",
"display_name": "ZooKeeper Root",
"name": "zookeeper.chroot",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka MirrorMaker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka MirrorMaker Count Validator",
"name": "service_config_suppression_kafka_mirror_maker_count_validator",
"value": "false"
},
{
"desc": "The number of messages written to a log partition before triggering an fsync on the log. Setting this lower syncs data to disk more often, but has a major impact on performance. We recommend use of replication for durability rather than depending on single-server fsync; however, this setting can be used to be extra certain. If used in conjunction with log.flush.interval.ms, the log is flushed when either criteria is met.",
"display_name": "Log Flush Message Interval",
"name": "log.flush.interval.messages",
"value": null
},
{
"desc": "The number of background threads to use for log cleaning.",
"display_name": "Number of Log Cleaner Threads",
"name": "log.cleaner.threads",
"value": "1"
},
{
"desc": "Enable Kerberos authentication for this KAFKA service.",
"display_name": "Enable Kerberos Authentication",
"name": "kerberos.auth.enable",
"value": "false"
},
{
"desc": "List of metric reporter class names. HTTP reporter is included by default.",
"display_name": "List of Metric Reporters",
"name": "kafka.metrics.reporters",
"value": "nl.techop.kafka.KafkaHttpMetricsReporter"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Root parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Root",
"name": "service_config_suppression_zookeeper.chroot",
"value": "false"
},
{
"desc": "The frequency, in ms, with which the log flusher checks whether any log is eligible to be flushed to disk.",
"display_name": "Log Flush Scheduler Interval",
"name": "log.flush.scheduler.interval.ms",
"value": null
},
{
"desc": "The minimum number of replicas in the in-sync replica needed to satisfy a produce request where required.acks=-1 (that is, all).",
"display_name": "Minimum Number of Replicas in ISR",
"name": "min.insync.replicas",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Replication Factor",
"name": "service_config_suppression_offsets.topic.replication.factor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Check Interval parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Check Interval",
"name": "service_config_suppression_leader.imbalance.check.interval.seconds",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "If the server fails to send a heartbeat to ZooKeeper within this period of time, it is considered dead. If set too low, ZooKeeper might falsely consider a server dead; if set too high, ZooKeeper might take too long to recognize a dead server.",
"display_name": "ZooKeeper Session Timeout",
"name": "zookeeper.session.timeout.ms",
"value": "6000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Maximum Message Size parameter.",
"display_name": "Suppress Parameter Validation: Maximum Message Size",
"name": "service_config_suppression_message.max.bytes",
"value": "false"
},
{
"desc": "Enables topic deletion using admin tools. When delete topic is disabled, deleting topics through the admin tools has no effect.",
"display_name": "Enable Delete Topic",
"name": "delete.topic.enable",
"value": "true"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "If a replica falls more than this number of messages behind the leader, the leader removes the follower from the ISR and treats it as dead. This property is deprecated in Kafka 1.4.0; higher versions use only replica.lag.time.max.ms.",
"display_name": "Allowed Replica Message Lag",
"name": "replica.lag.max.messages",
"value": "4000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Default Number of Partitions",
"name": "service_config_suppression_num.partitions",
"value": "false"
},
{
"desc": "Name of the ZooKeeper service that this Kafka service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "The total memory used for log deduplication across all cleaner threads. This memory is statically allocated and will not cause GC problems.",
"display_name": "Log Cleaner Deduplication Buffer Size",
"name": "log.cleaner.dedupe.buffer.size",
"value": "134217728"
},
{
"desc": "Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Consumer Quota",
"name": "quota.consumer.default",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Number of Partitions",
"name": "service_config_suppression_offsets.topic.num.partitions",
"value": "false"
},
{
"desc": "The default replication factor for automatically created topics.",
"display_name": "Default Replication Factor",
"name": "default.replication.factor",
"value": "1"
},
{
"desc": "The maximum time between fsync calls on the log. If used in conjuction with log.flush.interval.messages, the log is flushed when either criteria is met.",
"display_name": "Log Flush Time Interval",
"name": "log.flush.interval.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the List of Metric Reporters parameter.",
"display_name": "Suppress Parameter Validation: List of Metric Reporters",
"name": "service_config_suppression_kafka.metrics.reporters",
"value": "false"
},
{
"desc": "The percentage of leader imbalance allowed per broker. The controller rebalances leadership if this ratio goes above the configured value per broker.",
"display_name": "Leader Imbalance Allowed Per Broker",
"name": "leader.imbalance.per.broker.percentage",
"value": "10"
}
]

View File

@ -1,320 +0,0 @@
[
{
"desc": "Allows the oozie superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "Oozie Proxy User Groups",
"name": "hadoop_kms_proxyuser_oozie_groups",
"value": "*"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kms_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Allows the mapred superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "Mapred Proxy User Groups",
"name": "hadoop_kms_proxyuser_mapred_groups",
"value": "*"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the mapred user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "Mapred Proxy User Hosts",
"name": "hadoop_kms_proxyuser_mapred_hosts",
"value": "*"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the httpfs user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "HttpFS Proxy User Hosts",
"name": "hadoop_kms_proxyuser_httpfs_hosts",
"value": "*"
},
{
"desc": "Maximum number of threads used to handle KMS requests.",
"display_name": "KMS Max Threads",
"name": "kms_max_threads",
"value": "250"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the oozie user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "Oozie Proxy User Hosts",
"name": "hadoop_kms_proxyuser_oozie_hosts",
"value": "*"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the yarn user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "YARN Proxy User Hosts",
"name": "hadoop_kms_proxyuser_yarn_hosts",
"value": "*"
},
{
"desc": "Enables the health test that the Key Management Server's process state is consistent with the role configuration",
"display_name": "Key Management Server Process Health Test",
"name": "kms_scm_health_enabled",
"value": "true"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "The password for the Key Management Server JKS keystore file.",
"display_name": "Key Management Server TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "Allows the hive superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "Hive Proxy User Groups",
"name": "hadoop_kms_proxyuser_hive_groups",
"value": "*"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the HTTP user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "HTTP Proxy User Hosts",
"name": "hadoop_kms_proxyuser_HTTP_hosts",
"value": "*"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the hdfs user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "HDFS Proxy User Hosts",
"name": "hadoop_kms_proxyuser_hdfs_hosts",
"value": "*"
},
{
"desc": "Allows the hdfs superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "HDFS Proxy User Groups",
"name": "hadoop_kms_proxyuser_hdfs_groups",
"value": "*"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>kms-acls.xml</strong> for this role only.",
"display_name": "Key Management Server Advanced Configuration Snippet (Safety Valve) for kms-acls.xml",
"name": "kms-acls.xml_role_safety_valve",
"value": null
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the hue user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "Hue Proxy User Hosts",
"name": "hadoop_kms_proxyuser_hue_hosts",
"value": "*"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Encrypt communication between clients and Key Management Server using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Key Management Server",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "The log directory for log files of the role Key Management Server.",
"display_name": "Key Management Server Log Directory",
"name": "log_dir",
"value": "/var/log/hadoop-kms"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the hive user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "Hive Proxy User Hosts",
"name": "hadoop_kms_proxyuser_hive_hosts",
"value": "*"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Key Management Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Key Management Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KMS_role_env_safety_valve",
"value": null
},
{
"desc": "Port used by clients to interact with the KMS.",
"display_name": "KMS HTTP Port",
"name": "kms_http_port",
"value": "16000"
},
{
"desc": "Allows the yarn superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "YARN Proxy User Groups",
"name": "hadoop_kms_proxyuser_yarn_groups",
"value": "*"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Allows the hue superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "Hue Proxy User Groups",
"name": "hadoop_kms_proxyuser_hue_groups",
"value": "*"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Comma-delimited list of hosts where you want to allow the flume user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "Flume Proxy User Hosts",
"name": "hadoop_kms_proxyuser_flume_hosts",
"value": "*"
},
{
"desc": "The maximum size, in megabytes, per log file for Key Management Server logs. Typically used by log4j or logback.",
"display_name": "Key Management Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "The password for the JavaKeyStoreProvider keystore file.",
"display_name": "JavaKeyStoreProvider Password",
"name": "hadoop_security_keystore_javaKeyStoreProvider_password",
"value": null
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "When computing the overall Key Management Server health, consider the host's health.",
"display_name": "Key Management Server Host Health Test",
"name": "kms_host_health_enabled",
"value": "true"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has all of the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger will not be evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here may lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file-descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change in the future and, as a result, backward compatibility is not guaranteed between releases at this time.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "A comma-separated list of users (no spaces) for whom to disallow access to key material. These users can still fetch key metadata and create encrypted encryption keys, but are unable to do any other KMS operations. Typically, HDFS superusers will be specified here.",
"display_name": "KMS Blacklist Users",
"name": "kms_blacklist_users",
"value": ""
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Key Management Server is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Key Management Server TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "Allows the httpfs superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "HttpFS Proxy User Groups",
"name": "hadoop_kms_proxyuser_httpfs_groups",
"value": "*"
},
{
"desc": "Directory where configuration and binaries are staged before starting KMS. Does not normally need to be modified.",
"display_name": "KMS Staging Directory",
"name": "kms_staging_dir",
"value": "/var/lib/hadoop-kms"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>core-site.xml</strong> for this role only.",
"display_name": "Key Management Server Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "core-site.xml_role_safety_valve",
"value": null
},
{
"desc": "The maximum number of rolled log files to keep for Key Management Server logs. Typically used by log4j or logback.",
"display_name": "Key Management Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Allows the flume superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "Flume Proxy User Groups",
"name": "hadoop_kms_proxyuser_flume_groups",
"value": "*"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Allows the HTTP superuser to impersonate any members of a comma-delimited list of groups. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "HTTP Proxy User Groups",
"name": "hadoop_kms_proxyuser_HTTP_groups",
"value": "*"
},
{
"desc": "Directory of the keystore file kms.keystore used by JavaKeyStoreProvider that backs the KMS.",
"display_name": "JavaKeyStoreProvider Directory",
"name": "hadoop_security_key_provider_dir",
"value": "/var/lib/kms"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>kms-site.xml</strong> for this role only.",
"display_name": "Key Management Server Advanced Configuration Snippet (Safety Valve) for kms-site.xml",
"name": "kms-site.xml_role_safety_valve",
"value": null
},
{
"desc": "The minimum log level for Key Management Server logs",
"display_name": "Key Management Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Maximum heap size of the KMS.",
"display_name": "KMS Heap Size",
"name": "kms_heap_size",
"value": "1073741824"
},
{
"desc": "Port used to access the KMS' embedded Tomcat admin console.",
"display_name": "KMS Admin Port",
"name": "kms_admin_port",
"value": "16001"
}
]

View File

@ -1,50 +0,0 @@
[
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "kms"
},
{
"desc": "Authentication type for the KMS. Can either be \"simple\" or \"kerberos\".",
"display_name": "Authentication Type",
"name": "hadoop_kms_authentication_type",
"value": "simple"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "kms"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has all of the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger will not be evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here may lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file-descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change in the future and, as a result, backward compatibility is not guaranteed between releases at this time.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Java KeyStore KMS Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KMS_service_env_safety_valve",
"value": null
}
]

View File

@ -1,356 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Kerberos principal used by the Lily HBase Indexer roles.",
"display_name": "Role-Specific Kerberos Principal",
"name": "kerberos_role_princ_name",
"value": "hbase"
},
{
"desc": "The group that the HBase Indexer process should run as.",
"display_name": "System Group",
"name": "hbase_indexer_process_groupname",
"value": "hbase"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role-Specific Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Role-Specific Kerberos Principal",
"name": "role_config_suppression_kerberos_role_princ_name",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Lily HBase Indexer",
"name": "hbase_indexer_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Indexer Log Directory parameter.",
"display_name": "Suppress Parameter Validation: HBase Indexer Log Directory",
"name": "role_config_suppression_hbase_indexer_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "HTTP port used by HBase Indexer.",
"display_name": "HBase Indexer HTTP Port",
"name": "hbase_indexer_http_port",
"value": "11060"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "hbase_indexer_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_hbase_indexer_swap_memory_usage",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_hbase_indexer_host_health",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Lily HBase Indexer Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Lily HBase Indexer Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "The user that the HBase Indexer process should run as.",
"display_name": "System User",
"name": "hbase_indexer_process_username",
"value": "hbase"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "role_config_suppression_hbase_indexer_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Lily HBase Indexer parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Lily HBase Indexer",
"name": "role_config_suppression_hbase_indexer_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_hbase_indexer_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_hbase_indexer_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_hbase_indexer_unexpected_exits",
"value": "false"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>hbase-indexer-site.xml</strong> for this role only.",
"display_name": "Lily HBase Indexer Advanced Configuration Snippet (Safety Valve) for hbase-indexer-site.xml",
"name": "hbase_indexer_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Lily HBase Indexer Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Lily HBase Indexer Environment Advanced Configuration Snippet (Safety Valve)",
"name": "HBASE_INDEXER_role_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "role_config_suppression_hbase_indexer_process_username",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for Lily HBase Indexer logs. Typically used by log4j or logback.",
"display_name": "Lily HBase Indexer Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Directory where HBase Indexer will place its log files.",
"display_name": "HBase Indexer Log Directory",
"name": "hbase_indexer_log_dir",
"value": "/var/log/hbase-solr"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_hbase_indexer_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Lily HBase Indexer Advanced Configuration Snippet (Safety Valve) for hbase-indexer-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Lily HBase Indexer Advanced Configuration Snippet (Safety Valve) for hbase-indexer-site.xml",
"name": "role_config_suppression_hbase_indexer_config_safety_valve",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "When computing the overall Lily HBase Indexer health, consider the host's health.",
"display_name": "Lily HBase Indexer Host Health Test",
"name": "hbase_indexer_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Lily HBase Indexer Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Lily HBase Indexer Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hbase_indexer_role_env_safety_valve",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_hbase_indexer_log_directory_free_space",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for Lily HBase Indexer logs. Typically used by log4j or logback.",
"display_name": "Lily HBase Indexer Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Enables the health test that the Lily HBase Indexer's process state is consistent with the role configuration",
"display_name": "Lily HBase Indexer Process Health Test",
"name": "hbase_indexer_scm_health_enabled",
"value": "true"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The minimum log level for Lily HBase Indexer logs",
"display_name": "Lily HBase Indexer Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Lily HBase Indexer in Bytes",
"name": "hbase_indexer_java_heapsize",
"value": "1073741824"
}
]

View File

@ -1,188 +0,0 @@
[
{
"desc": "Name of the HBase service that this Key-Value Store Indexer service instance depends on",
"display_name": "HBase Service",
"name": "hbase_service",
"value": null
},
{
"desc": "Whether to suppress the results of the Lily HBase Indexer Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Lily HBase Indexer Health",
"name": "service_health_suppression_ks_indexer_hbase_indexers_healthy",
"value": "false"
},
{
"desc": "The password for the HBase Indexer TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "HBase Indexer TLS/SSL Certificate Trust Store Password",
"name": "keystore_indexer_truststore_password",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Key-Value Store Indexer Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Key-Value Store Indexer Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "service_config_suppression_keystore_indexer_sentry_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Key-Value Store Indexer Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Key-Value Store Indexer Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_ks_indexer_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Lily HBase Indexer Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Lily HBase Indexer Count Validator",
"name": "service_config_suppression_hbase_indexer_count_validator",
"value": "false"
},
{
"desc": "Name of the Solr service that this Key-Value Store Indexer service instance depends on",
"display_name": "Solr Service",
"name": "solr_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Indexer TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: HBase Indexer TLS/SSL Certificate Trust Store Password",
"name": "service_config_suppression_keystore_indexer_truststore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Morphlines File parameter.",
"display_name": "Suppress Parameter Validation: Morphlines File",
"name": "service_config_suppression_morphlines_conf_file",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Grok Dictionary File parameter.",
"display_name": "Suppress Parameter Validation: Grok Dictionary File",
"name": "service_config_suppression_grok_dictionary_conf_file",
"value": "false"
},
{
"desc": "Use Sentry to enable role-based, fine-grained authorization. This configuration enables Sentry using policy files. Sentry is supported only on CDH 5.4 or later deployments and requires authentication to be turned on for HBase Indexer.</a>.",
"display_name": "Enable Sentry Authorization using Policy Files",
"name": "keystore_indexer_sentry_enabled",
"value": "false"
},
{
"desc": "The health test thresholds of the overall Lily HBase Indexer health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Lily HBase Indexers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Lily HBase Indexers falls below the critical threshold.",
"display_name": "Healthy Lily HBase Indexer Monitoring Thresholds",
"name": "ks_indexer_indexers_healthy_thresholds",
"value": "{\"critical\":\"90.0\",\"warning\":\"95.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the Impala Sentry Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Impala Sentry Validator",
"name": "service_config_suppression_keystore_indexer_sentry_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Custom Mime-types File parameter.",
"display_name": "Suppress Parameter Validation: Custom Mime-types File",
"name": "service_config_suppression_custom_mimetypes_file",
"value": "false"
},
{
"desc": "Text that goes into morphlines.conf file used by HBase Indexers. The text goes verbatim into the config file except that $ZK_HOST is replaced by the ZooKeeper quorum of the Solr service.",
"display_name": "Morphlines File",
"name": "morphlines_conf_file",
"value": "SOLR_LOCATOR : {\n # Name of solr collection\n collection : collection\n \n # ZooKeeper ensemble\n zkHost : \"$ZK_HOST\" \n}\n\n\nmorphlines : [\n{\nid : morphline\nimportCommands : [\"org.kitesdk.**\", \"com.ngdata.**\"]\n\ncommands : [ \n {\n extractHBaseCells {\n mappings : [\n {\n inputColumn : \"data:*\"\n outputField : \"data\" \n type : string \n source : value\n }\n ]\n }\n }\n\n\n { logDebug { format : \"output record: {}\", args : [\"@{}\"] } }\n]\n}\n]\n"
},
{
"desc": "Authentication mechanism used by HBase Indexer.",
"display_name": "HBase Indexer Secure Authentication",
"name": "hbase_indexer_security_authentication",
"value": "simple"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>sentry-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Key-Value Store Indexer Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "keystore_indexer_sentry_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "Text that goes verbatim into grok-dictionary.conf file used by HBase Indexers.",
"display_name": "Grok Dictionary File",
"name": "grok_dictionary_conf_file",
"value": "USERNAME [a-zA-Z0-9._-]+\nUSER %{USERNAME}\nINT (?:[+-]?(?:[0-9]+))\nBASE10NUM (?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\\.[0-9]+)?)|(?:\\.[0-9]+)))\nNUMBER (?:%{BASE10NUM})\nBASE16NUM (?<![0-9A-Fa-f])(?:[+-]?(?:0x)?(?:[0-9A-Fa-f]+))\nBASE16FLOAT \\b(?<![0-9A-Fa-f.])(?:[+-]?(?:0x)?(?:(?:[0-9A-Fa-f]+(?:\\.[0-9A-Fa-f]*)?)|(?:\\.[0-9A-Fa-f]+)))\\b\n\nPOSINT \\b(?:[1-9][0-9]*)\\b\nNONNEGINT \\b(?:[0-9]+)\\b\nWORD \\b\\w+\\b\nNOTSPACE \\S+\nSPACE \\s*\nDATA .*?\nGREEDYDATA .*\n#QUOTEDSTRING (?:(?<!\\\\)(?:\"(?:\\\\.|[^\\\\\"])*\"|(?:'(?:\\\\.|[^\\\\'])*')|(?:`(?:\\\\.|[^\\\\`])*`)))\nQUOTEDSTRING (?>(?<!\\\\)(?>\"(?>\\\\.|[^\\\\\"]+)+\"|\"\"|(?>'(?>\\\\.|[^\\\\']+)+')|''|(?>`(?>\\\\.|[^\\\\`]+)+`)|``))\nUUID [A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}\n\n# Networking\nMAC (?:%{CISCOMAC}|%{WINDOWSMAC}|%{COMMONMAC})\nCISCOMAC (?:(?:[A-Fa-f0-9]{4}\\.){2}[A-Fa-f0-9]{4})\nWINDOWSMAC (?:(?:[A-Fa-f0-9]{2}-){5}[A-Fa-f0-9]{2})\nCOMMONMAC (?:(?:[A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2})\nIP (?<![0-9])(?:(?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2})[.](?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2})[.](?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2})[.](?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2}))(?![0-9])\nHOSTNAME \\b(?:[0-9A-Za-z][0-9A-Za-z-]{0,62})(?:\\.(?:[0-9A-Za-z][0-9A-Za-z-]{0,62}))*(\\.?|\\b)\nHOST %{HOSTNAME}\nIPORHOST (?:%{HOSTNAME}|%{IP})\n#HOSTPORT (?:%{IPORHOST=~/\\./}:%{POSINT}) # WH\n\n# paths\nPATH (?:%{UNIXPATH}|%{WINPATH})\nUNIXPATH (?>/(?>[\\w_%!$@:.,-]+|\\\\.)*)+\n#UNIXPATH (?<![\\w\\/])(?:/[^\\/\\s?*]*)+\nLINUXTTY (?>/dev/pts/%{NONNEGINT})\nBSDTTY (?>/dev/tty[pq][a-z0-9])\nTTY (?:%{BSDTTY}|%{LINUXTTY})\nWINPATH (?>[A-Za-z]+:|\\\\)(?:\\\\[^\\\\?*]*)+\nURIPROTO [A-Za-z]+(\\+[A-Za-z+]+)?\nURIHOST %{IPORHOST}(?::%{POSINT:port})?\n# uripath comes loosely from RFC1738, but mostly from what Firefox\n# doesn't turn into %XX\nURIPATH (?:/[A-Za-z0-9$.+!*'(){},~:;=#%_\\-]*)+\n#URIPARAM \\?(?:[A-Za-z0-9]+(?:=(?:[^&]*))?(?:&(?:[A-Za-z0-9]+(?:=(?:[^&]*))?)?)*)?\nURIPARAM \\?[A-Za-z0-9$.+!*'|(){},~#%&/=:;_?\\-\\[\\]]*\nURIPATHPARAM %{URIPATH}(?:%{URIPARAM})?\nURI %{URIPROTO}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST})?(?:%{URIPATHPARAM})?\n\n# Months: January, Feb, 3, 03, 12, December\nMONTH \\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\\b\nMONTHNUM (?:0?[1-9]|1[0-2])\nMONTHDAY (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9])\n\n# Days: Monday, Tue, Thu, etc...\nDAY (?:Mon(?:day)?|Tue(?:sday)?|Wed(?:nesday)?|Thu(?:rsday)?|Fri(?:day)?|Sat(?:urday)?|Sun(?:day)?)\n\n# Years?\nYEAR (?>\\d\\d){1,2}\n# Time: HH:MM:SS\n#TIME \\d{2}:\\d{2}(?::\\d{2}(?:\\.\\d+)?)?\n# I'm still on the fence about using grok to perform the time match,\n# since it's probably slower.\n# TIME %{POSINT<24}:%{POSINT<60}(?::%{POSINT<60}(?:\\.%{POSINT})?)?\nHOUR (?:2[0123]|[01]?[0-9])\nMINUTE (?:[0-5][0-9])\n# '60' is a leap second in most time standards and thus is valid.\nSECOND (?:(?:[0-5][0-9]|60)(?:[:.,][0-9]+)?)\nTIME (?!<[0-9])%{HOUR}:%{MINUTE}(?::%{SECOND})(?![0-9])\n# datestamp is YYYY/MM/DD-HH:MM:SS.UUUU (or something like it)\nDATE_US %{MONTHNUM}[/-]%{MONTHDAY}[/-]%{YEAR}\nDATE_EU %{MONTHDAY}[./-]%{MONTHNUM}[./-]%{YEAR}\nISO8601_TIMEZONE (?:Z|[+-]%{HOUR}(?::?%{MINUTE}))\nISO8601_SECOND (?:%{SECOND}|60)\nTIMESTAMP_ISO8601 %{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE}?\nDATE %{DATE_US}|%{DATE_EU}\nDATESTAMP %{DATE}[- ]%{TIME}\nTZ (?:[PMCE][SD]T)\nDATESTAMP_RFC822 %{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{TIME} %{TZ}\nDATESTAMP_OTHER %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{TZ} %{YEAR}\n\n# Syslog Dates: Month Day HH:MM:SS\nSYSLOGTIMESTAMP %{MONTH} +%{MONTHDAY} %{TIME}\nPROG (?:[\\w._/%-]+)\nSYSLOGPROG %{PROG:program}(?:\\[%{POSINT:pid}\\])?\nSYSLOGHOST %{IPORHOST}\nSYSLOGFACILITY <%{NONNEGINT:facility}.%{NONNEGINT:priority}>\nHTTPDATE %{MONTHDAY}/%{MONTH}/%{YEAR}:%{TIME} %{INT}\n\n# Shortcuts\nQS %{QUOTEDSTRING}\n\n# Log formats\nSYSLOGBASE %{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource} %{SYSLOGPROG}:\nCOMBINEDAPACHELOG %{IPORHOST:clientip} %{USER:ident} %{USER:auth} \\[%{HTTPDATE:timestamp}\\] \"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})\" %{NUMBER:response} (?:%{NUMBER:bytes}|-) %{QS:referrer} %{QS:agent}\n\n# Log Levels\nLOGLEVEL ([T|t]race|TRACE|[D|d]ebug|DEBUG|[N|n]otice|NOTICE|[I|i]nfo|INFO|[W|w]arn?(?:ing)?|WARN?(?:ING)?|[E|e]rr?(?:or)?|ERR?(?:OR)?|[C|c]rit?(?:ical)?|CRIT?(?:ICAL)?|[F|f]atal|FATAL|[S|s]evere|SEVERE|EMERG(?:ENCY)?|[Ee]merg(?:ency)?)"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Global Policy File parameter.",
"display_name": "Suppress Parameter Validation: Sentry Global Policy File",
"name": "service_config_suppression_keystore_indexer_provider_resource",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Key-Value Store Indexer Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "ks_indexer_env_safety_valve",
"value": null
},
{
"desc": "Text that goes verbatim into custom-mimetypes.xml file used by HBase Indexers.",
"display_name": "Custom Mime-types File",
"name": "custom_mimetypes_file",
"value": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor\n license agreements. See the NOTICE file distributed with this work for additional\n information regarding copyright ownership. The ASF licenses this file to\n You under the Apache License, Version 2.0 (the \"License\"); you may not use\n this file except in compliance with the License. You may obtain a copy of\n the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required\n by applicable law or agreed to in writing, software distributed under the\n License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS\n OF ANY KIND, either express or implied. See the License for the specific\n language governing permissions and limitations under the License. -->\n\n<mime-info>\n\n <mime-type type=\"text/space-separated-values\">\n <glob pattern=\"*.ssv\"/>\n </mime-type>\n\n <mime-type type=\"avro/binary\">\n <magic priority=\"50\">\n <match value=\"0x4f626a01\" type=\"string\" offset=\"0\"/> \n </magic>\n <glob pattern=\"*.avro\"/>\n </mime-type>\n\n <mime-type type=\"mytwittertest/json+delimited+length\">\n <magic priority=\"50\">\n <match value=\"[0-9]+(\\r)?\\n\\\\{&quot;\" type=\"regex\" offset=\"0:16\"/> \n </magic>\n </mime-type>\n \n <mime-type type=\"application/hadoop-sequence-file\">\n <magic priority=\"50\">\n <match value=\"SEQ[\\0-\\6]\" type=\"regex\" offset=\"0\"/>\n </magic>\n </mime-type>\n \n</mime-info>"
},
{
"desc": "The class to use in Sentry authorization for user to group mapping. Sentry authorization may be configured to use either Hadoop groups or local groups defined in the policy file. When configured with Hadoop groups, Sentry will ask the HDFS Namenode for group mapping for a given user to determine authorization access.",
"display_name": "Sentry User to Group Mapping Class",
"name": "keystore_indexer_sentry_provider",
"value": "org.apache.sentry.provider.file.HadoopGroupResourceAuthorizationProvider"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Indexer TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: HBase Indexer TLS/SSL Certificate Trust Store File",
"name": "service_config_suppression_keystore_indexer_truststore_file",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that HBase Indexer might connect to. This is used when HBase Indexer is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "HBase Indexer TLS/SSL Certificate Trust Store File",
"name": "keystore_indexer_truststore_file",
"value": null
},
{
"desc": "HDFS path to the global policy file for Sentry authorization. This should be a relative path (and not a full HDFS URL). The global policy file must be in Sentry policy file format.",
"display_name": "Sentry Global Policy File",
"name": "keystore_indexer_provider_resource",
"value": "/user/hbaseindexer/sentry/sentry-provider.ini"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
}
]

View File

@ -1,157 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--Autogenerated by Cloudera Manager-->
<configuration>
<property>
<name>mapreduce.job.split.metainfo.maxsize</name>
<value>10000000</value>
</property>
<property>
<name>mapreduce.job.counters.max</name>
<value>120</value>
</property>
<property>
<name>mapreduce.output.fileoutputformat.compress</name>
<value>false</value>
</property>
<property>
<name>mapreduce.output.fileoutputformat.compress.type</name>
<value>BLOCK</value>
</property>
<property>
<name>mapreduce.output.fileoutputformat.compress.codec</name>
<value>org.apache.hadoop.io.compress.DefaultCodec</value>
</property>
<property>
<name>mapreduce.map.output.compress.codec</name>
<value>org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
<property>
<name>mapreduce.map.output.compress</name>
<value>true</value>
</property>
<property>
<name>zlib.compress.level</name>
<value>DEFAULT_COMPRESSION</value>
</property>
<property>
<name>mapreduce.task.io.sort.factor</name>
<value>64</value>
</property>
<property>
<name>mapreduce.map.sort.spill.percent</name>
<value>0.8</value>
</property>
<property>
<name>mapreduce.reduce.shuffle.parallelcopies</name>
<value>10</value>
</property>
<property>
<name>mapreduce.task.timeout</name>
<value>600000</value>
</property>
<property>
<name>mapreduce.client.submit.file.replication</name>
<value>10</value>
</property>
<property>
<name>mapreduce.job.reduces</name>
<value>1</value>
</property>
<property>
<name>mapreduce.task.io.sort.mb</name>
<value>256</value>
</property>
<property>
<name>mapreduce.map.speculative</name>
<value>false</value>
</property>
<property>
<name>mapreduce.reduce.speculative</name>
<value>false</value>
</property>
<property>
<name>mapreduce.job.reduce.slowstart.completedmaps</name>
<value>0.8</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>test-master-001.novalocal:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>test-master-001.novalocal:19888</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.https.address</name>
<value>test-master-001.novalocal:19890</value>
</property>
<property>
<name>mapreduce.jobhistory.admin.address</name>
<value>test-master-001.novalocal:10033</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/user</value>
</property>
<property>
<name>yarn.app.mapreduce.am.resource.mb</name>
<value>1024</value>
</property>
<property>
<name>yarn.app.mapreduce.am.resource.cpu-vcores</name>
<value>1</value>
</property>
<property>
<name>mapreduce.job.ubertask.enable</name>
<value>false</value>
</property>
<property>
<name>yarn.app.mapreduce.am.command-opts</name>
<value>-Djava.net.preferIPv4Stack=true -Xmx825955249</value>
</property>
<property>
<name>mapreduce.map.java.opts</name>
<value>-Djava.net.preferIPv4Stack=true -Xmx825955249</value>
</property>
<property>
<name>mapreduce.reduce.java.opts</name>
<value>-Djava.net.preferIPv4Stack=true -Xmx825955249</value>
</property>
<property>
<name>yarn.app.mapreduce.am.admin.user.env</name>
<value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value>
</property>
<property>
<name>mapreduce.map.memory.mb</name>
<value>1024</value>
</property>
<property>
<name>mapreduce.map.cpu.vcores</name>
<value>1</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>1024</value>
</property>
<property>
<name>mapreduce.reduce.cpu.vcores</name>
<value>1</value>
</property>
<property>
<name>mapreduce.application.classpath</name>
<value>$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,$MR2_CLASSPATH</value>
</property>
<property>
<name>mapreduce.admin.user.env</name>
<value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value>
</property>
<property>
<name>mapreduce.shuffle.max.connections</name>
<value>80</value>
</property>
</configuration>

View File

@ -1,662 +0,0 @@
[
{
"desc": "The period over which to compute the moving average of the callable queue size.",
"display_name": "Oozie Server Callable Queue Monitoring Period",
"name": "oozie_server_callable_queue_window",
"value": "5"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average extra time the pause monitor spent paused. Specified as a percentage of elapsed wall clock time.",
"display_name": "Pause Duration Thresholds",
"name": "oozie_server_pause_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "Comma-separated list of ActionService executor extension classes. Only action types with associated executors can be used in workflows. For CDH 5.4 and higher, this parameter is used only to specify additional classes for workflows. All executor extension classes included in that release will be added automatically and do not need to be specified.",
"display_name": "Oozie ActionService Executor Extension Classes",
"name": "oozie_executor_extension_classes",
"value": ""
},
{
"desc": "Comma-separated list of Oozie plug-ins to be activated. If one plugin cannot be loaded, all the plugins are ignored.",
"display_name": "Oozie Server Plugins",
"name": "oozie_plugins_list",
"value": ""
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Database Password parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Database Password",
"name": "role_config_suppression_oozie_database_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Plugins parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Plugins",
"name": "role_config_suppression_oozie_plugins_list",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_oozie_server_host_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Oozie TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_oozie_https_keystore_password",
"value": "false"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "oozie_server_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Oozie Server",
"name": "oozie_java_opts",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Email Action SMTP Host parameter.",
"display_name": "Suppress Parameter Validation: Oozie Email Action SMTP Host",
"name": "role_config_suppression_oozie_email_smtp_host",
"value": "false"
},
{
"desc": "Completed workflow jobs older than this value, in days, will be purged by the PurgeService.",
"display_name": "Days to Keep Completed Workflow Jobs",
"name": "purgeservice_older_than",
"value": "30"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_oozie_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Oozie Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie SchemaService Workflow Extension Schemas parameter.",
"display_name": "Suppress Parameter Validation: Oozie SchemaService Workflow Extension Schemas",
"name": "role_config_suppression_oozie_workflow_extension_schemas",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The SMTP server port to use for Oozie email action",
"display_name": "Oozie Email Action SMTP Port",
"name": "oozie_email_smtp_prt",
"value": "25"
},
{
"desc": "Whether to suppress the results of the Callable Queue Size heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Callable Queue Size",
"name": "role_health_suppression_oozie_server_callablequeue_size_health",
"value": "false"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Data Directory parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Data Directory",
"name": "role_config_suppression_oozie_data_dir",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_oozie_server_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_oozie_server_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Email Action From Address parameter.",
"display_name": "Suppress Parameter Validation: Oozie Email Action From Address",
"name": "role_config_suppression_oozie_email_from_address",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Log Directory",
"name": "role_config_suppression_oozie_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Enable SMTP authentication for Oozie email action",
"display_name": "Oozie Email Action SMTP Authentication Enabled",
"name": "oozie_email_smtp_auth",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Oozie TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_oozie_https_keystore_file",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Advanced Configuration Snippet (Safety Valve) for oozie-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Advanced Configuration Snippet (Safety Valve) for oozie-site.xml",
"name": "role_config_suppression_oozie_config_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Enables the health test that the Oozie Server's process state is consistent with the role configuration",
"display_name": "Oozie Server Process Health Test",
"name": "oozie_server_scm_health_enabled",
"value": "true"
},
{
"desc": "SMTP password for Oozie email action",
"display_name": "Oozie Email Action SMTP Authentication Password",
"name": "oozie_email_smtp_password",
"value": null
},
{
"desc": "Maximum concurrency for a given callable type. Each command is a callable type: submit, start, run, etc. Each action type is a callable type: MapReduce, SSH, sub-workflow, etc. All commands that use action executors (action-start, action-end. etc.) use the action type as the callable type.",
"display_name": "Maximum concurrency for a given callable type",
"name": "oozie_service_callablequeueservice_callable_concurrency",
"value": "10"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "If true, enables the Oozie Server web console. ExtJS 2.2 zip archive must be extracted to /var/lib/oozie on the same host as the Oozie Server.",
"display_name": "Enable Oozie Server Web Console",
"name": "oozie_web_console",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "oozie_server_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Username for connecting to the database used by Oozie Server. Does not apply if you are using Derby as the database type.",
"display_name": "Oozie Server Database User",
"name": "oozie_database_user",
"value": "sa"
},
{
"desc": "Whether to suppress the results of the Pause Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Pause Duration",
"name": "role_health_suppression_oozie_server_pause_duration",
"value": "false"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_oozie_server_file_descriptor",
"value": "false"
},
{
"desc": "The period to review when computing the moving average of extra time the pause monitor spent paused.",
"display_name": "Pause Duration Monitoring Period",
"name": "oozie_server_pause_duration_window",
"value": "5"
},
{
"desc": "The from address to be used for mailing all emails for Oozie email action",
"display_name": "Oozie Email Action From Address",
"name": "oozie_email_from_address",
"value": "oozie@localhost"
},
{
"desc": "Port of Oozie Server",
"display_name": "Oozie HTTP Port",
"name": "oozie_http_port",
"value": "11000"
},
{
"desc": "Whether to suppress the results of the Oozie Server Shared Library Check heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Oozie Server Shared Library Check",
"name": "role_health_suppression_oozie_server_shared_lib_version_health",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Directory where Oozie Server will place its log files.",
"display_name": "Oozie Server Log Directory",
"name": "oozie_log_dir",
"value": "/var/log/oozie"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Oozie Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Oozie Server",
"name": "role_config_suppression_oozie_java_opts",
"value": "false"
},
{
"desc": "Completed bundle jobs older than this value, in days, will be purged by the PurgeService.",
"display_name": "Days to Keep Completed Bundle Jobs",
"name": "purgeservice_bundle_older_than",
"value": "7"
},
{
"desc": "Maximum callable queue size",
"display_name": "Maximum Callable Queue Size",
"name": "oozie_service_callablequeueservice_queue_size",
"value": "10000"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "The password for the Oozie JKS keystore file.",
"display_name": "Oozie TLS/SSL Server JKS Keystore File Password",
"name": "oozie_https_keystore_password",
"value": null
},
{
"desc": "Completed coordinator jobs older than this value, in days, will be purged by the PurgeService.",
"display_name": "Days to Keep Completed Coordinator Jobs",
"name": "purgeservice_coord_older_than",
"value": "7"
},
{
"desc": "Workflow Status metrics collection interval.",
"display_name": "Workflow Status Metrics Collection Interval",
"name": "oozie_job_metric_collection_interval",
"value": "1"
},
{
"desc": "The minimum log level for Oozie Server logs",
"display_name": "Oozie Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_oozie_server_web_metric_collection",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Database Name parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Database Name",
"name": "role_config_suppression_oozie_database_name",
"value": "false"
},
{
"desc": "When computing the overall Oozie Server health, consider the host's health.",
"display_name": "Oozie Server Host Health Test",
"name": "oozie_server_host_health_enabled",
"value": "true"
},
{
"desc": "Password for connecting to the database used by Oozie Server. Does not apply if you are using Derby as the database type.",
"display_name": "Oozie Server Database Password",
"name": "oozie_database_password",
"value": ""
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie ActionService Executor Extension Classes parameter.",
"display_name": "Suppress Parameter Validation: Oozie ActionService Executor Extension Classes",
"name": "role_config_suppression_oozie_executor_extension_classes",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds of the weighted average size of the Oozie Server callable queue over a recent period. See also Oozie Server Callable Queue Monitoring Period.",
"display_name": "Oozie Server Callable Queue Monitoring Threshold",
"name": "oozie_server_callable_queue_threshold",
"value": "{\"critical\":\"95.0\",\"warning\":\"80.0\"}"
},
{
"desc": "Directory where the Oozie Server places its data. Only applicable when using Derby as the database type.",
"display_name": "Oozie Server Data Directory",
"name": "oozie_data_dir",
"value": "/var/lib/oozie/data"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_oozie_server_log_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Oozie is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Oozie TLS/SSL Server JKS Keystore File Location",
"name": "oozie_https_keystore_file",
"value": "/var/lib/oozie/.keystore"
},
{
"desc": "Comma-separated list of SchemaService workflow extension schemas for additional action types. From CDH 5.4 and higher, this parameter is used only to specify additional schemas for workflows. All schemas included in that release will be added automatically and do not need to be specified.",
"display_name": "Oozie SchemaService Workflow Extension Schemas",
"name": "oozie_workflow_extension_schemas",
"value": ""
},
{
"desc": "Port of the Oozie Server when using TLS/SSL.",
"display_name": "Oozie HTTPS Port",
"name": "oozie_https_port",
"value": "11443"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to use the Codehale-based metrics for instrumentation. Enabling this disables the 'instrumentation' REST endpoint and enables the 'metrics' REST endpoint (&lt;hostname:port&gt;/v2/admin/metrics).",
"display_name": "Enable The Metrics Instrumentation Service",
"name": "oozie_use_metric_instrumentation",
"value": "true"
},
{
"desc": "Type of the database used by Oozie Server.",
"display_name": "Oozie Server Database Type",
"name": "oozie_database_type",
"value": "derby"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Database Host parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Database Host",
"name": "role_config_suppression_oozie_database_host",
"value": "false"
},
{
"desc": "Whether to purge completed workflows and their corresponding coordinator actions of long-running coordinator jobs if the completed workflow jobs are older than the value specified in oozie.service.PurgeService.older.than.",
"display_name": "Enable Purge for Long-Running Coordinator Jobs",
"name": "purgeservice_purge_old_coord_action",
"value": "true"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Oozie TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_oozie_https_truststore_password",
"value": "false"
},
{
"desc": "The SMTP server host to use for Oozie email action",
"display_name": "Oozie Email Action SMTP Host",
"name": "oozie_email_smtp_host",
"value": "localhost"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_oozie_server_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Oozie TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_oozie_https_truststore_file",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Email Action SMTP Authentication Password parameter.",
"display_name": "Suppress Parameter Validation: Oozie Email Action SMTP Authentication Password",
"name": "role_config_suppression_oozie_email_smtp_password",
"value": "false"
},
{
"desc": "SMTP username for Oozie email action",
"display_name": "Oozie Email Action SMTP Authentication Username",
"name": "oozie_email_smtp_username",
"value": null
},
{
"desc": "The password for the Oozie TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Oozie TLS/SSL Certificate Trust Store Password",
"name": "oozie_https_truststore_password",
"value": null
},
{
"desc": "Number of threads used for executing callables",
"display_name": "Number Threads For Executing Callables",
"name": "oozie_service_callablequeueservice_threads",
"value": "50"
},
{
"desc": "The maximum number of rolled log files to keep for Oozie Server logs. Typically used by log4j or logback.",
"display_name": "Oozie Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "720"
},
{
"desc": "The admin port Oozie server runs.",
"display_name": "Oozie Admin Port",
"name": "oozie_admin_port",
"value": "11001"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Server Database User parameter.",
"display_name": "Suppress Parameter Validation: Oozie Server Database User",
"name": "role_config_suppression_oozie_database_user",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Oozie might connect to. This is used when Oozie is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Oozie TLS/SSL Certificate Trust Store File",
"name": "oozie_https_truststore_file",
"value": null
},
{
"desc": "If true, enables version check for Oozie Server and installed shared libraries.",
"display_name": "Enable Oozie Server Shared Libraries Version Check",
"name": "oozie_server_shared_lib_version_check_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "oozie_server_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Name of the database used by Oozie Server.",
"display_name": "Oozie Server Database Name",
"name": "oozie_database_name",
"value": "oozie"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_oozie_server_swap_memory_usage",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>oozie-site.xml</strong> for this role only.",
"display_name": "Oozie Server Advanced Configuration Snippet (Safety Valve) for oozie-site.xml",
"name": "oozie_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Email Action SMTP Authentication Username parameter.",
"display_name": "Suppress Parameter Validation: Oozie Email Action SMTP Authentication Username",
"name": "role_config_suppression_oozie_email_smtp_username",
"value": "false"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Oozie Server in Bytes",
"name": "oozie_java_heapsize",
"value": "1073741824"
},
{
"desc": "Hostname of the database used by Oozie Server. If the port is non-default for your database type, use host:port notation. Does not apply if you are using Derby as the database type.",
"display_name": "Oozie Server Database Host",
"name": "oozie_database_host",
"value": "localhost"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Oozie Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "OOZIE_SERVER_role_env_safety_valve",
"value": null
}
]

View File

@ -1,236 +0,0 @@
[
{
"desc": "Namespace used by this Oozie service in ZooKeeper when High Availability is enabled.",
"display_name": "ZooKeeper Namespace",
"name": "oozie_zookeeper_namespace",
"value": "oozie"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "oozie"
},
{
"desc": "The timeout in seconds used for the Oozie Upload ShareLib command. When the value is zero, there is no timeout for the command.",
"display_name": "Oozie Upload ShareLib Command Timeout",
"name": "oozie_upload_sharelib_cmd_timeout",
"value": "270"
},
{
"desc": "URL of the JMS Broker used by the Oozie service in JMS integration is enabled.",
"display_name": "JMS Broker",
"name": "oozie_jms_broker",
"value": "tcp://localhost:61616"
},
{
"desc": "Encrypt communication between clients and Oozie using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Oozie",
"name": "oozie_use_ssl",
"value": "false"
},
{
"desc": "List of event listeners used by the Oozie service. Listeners needed for JMS or SLA integration are automatically emitted if they are enabled.",
"display_name": "Oozie Event Listeners",
"name": "oozie_event_listeners",
"value": ""
},
{
"desc": "Name of the ZooKeeper service that this Oozie service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Whether to suppress the results of the Oozie Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Oozie Server Health",
"name": "service_health_suppression_oozie_oozie_servers_healthy",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Oozie Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_oozie_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JMS Broker parameter.",
"display_name": "Suppress Parameter Validation: JMS Broker",
"name": "service_config_suppression_oozie_jms_broker",
"value": "false"
},
{
"desc": "Whether to configure Oozie properties needed for SLA integration",
"display_name": "Enable SLA Integration",
"name": "oozie_use_sla",
"value": "false"
},
{
"desc": "Coordinator Job Lookup trigger command is scheduled at this interval (in seconds).",
"display_name": "Coordinator Job Lookup Interval",
"name": "oozie_service_coord_lookup_interval",
"value": "300"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "oozie"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ShareLib Root Directory parameter.",
"display_name": "Suppress Parameter Validation: ShareLib Root Directory",
"name": "service_config_suppression_oozie_sharelib_rootdir",
"value": "false"
},
{
"desc": "Root of the directory in HDFS where the Oozie ShareLibs are stored. The libraries are stored in the share/lib subdirectory under the specified root directory.",
"display_name": "ShareLib Root Directory",
"name": "oozie_sharelib_rootdir",
"value": "/user/oozie"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Event Listeners parameter.",
"display_name": "Suppress Parameter Validation: Oozie Event Listeners",
"name": "service_config_suppression_oozie_event_listeners",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Load Balancer parameter.",
"display_name": "Suppress Parameter Validation: Oozie Load Balancer",
"name": "service_config_suppression_oozie_load_balancer",
"value": "false"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "oozie"
},
{
"desc": "Whether to configure Oozie properties needed for JMS integration",
"display_name": "Enable JMS Integration",
"name": "oozie_use_jms",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Oozie Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "oozie_env_safety_valve",
"value": null
},
{
"desc": "A list of credential class mappings for CredentialsProvider.",
"display_name": "Oozie Credential Classes",
"name": "oozie_credential_classes",
"value": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hbase=org.apache.oozie.action.hadoop.HbaseCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Address of the load balancer used if Oozie HA is enabled. Should be specified in host:port format.",
"display_name": "Oozie Load Balancer",
"name": "oozie_load_balancer",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "Name of the Spark on Yarn service that this Oozie service instance depends on",
"display_name": "Spark on Yarn Service",
"name": "spark_on_yarn_service",
"value": null
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "The health test thresholds of the overall Oozie Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Oozie Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Oozie Servers falls below the critical threshold.",
"display_name": "Healthy Oozie Server Monitoring Thresholds",
"name": "oozie_servers_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Oozie Credential Classes parameter.",
"display_name": "Suppress Parameter Validation: Oozie Credential Classes",
"name": "service_config_suppression_oozie_credential_classes",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Namespace parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Namespace",
"name": "service_config_suppression_oozie_zookeeper_namespace",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Name of the Hive service that this Oozie service instance depends on. This is used to configure Oozie HCat integration.",
"display_name": "Hive Service",
"name": "hive_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the Oozie Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Oozie Server Count Validator",
"name": "service_config_suppression_oozie_server_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Service to run MapReduce jobs against",
"display_name": "MapReduce Service",
"name": "mapreduce_yarn_service",
"value": null
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Use ACLs on Znode while a secure ZooKeeper is used for Oozie High Availability. <b>Note:</b> This config is not emitted if ZooKeeper is not secure.",
"display_name": "Use ACLs on Znode",
"name": "oozie_zk_secure",
"value": "true"
}
]

View File

@ -1,45 +0,0 @@
[
"dfs_block_size",
"dfs_umaskmode",
"dfs_webhdfs_enabled",
"dfs_permissions",
"dfs_replication",
"io_compression_codecs",
"io_sort_mb",
"dfs_datanode_du_reserved",
"dfs_datanode_failed_volumes_tolerated",
"dfs_name_dir_restore",
"fs_trash_interval",
"dfs_safemode_min_datanodes",
"dfs_safemode_extension",
"dfs_access_time_precision",
"yarn_acl_enable",
"yarn_admin_acl",
"yarn_log_aggregation_enable",
"yarn_log_aggregation_retain_seconds",
"mapreduce_jobhistory_max_age_ms",
"mapreduce_jobhistory_cleaner_interval",
"mapreduce_reduce_memory_mb",
"mapreduce_reduce_java_opts",
"mapreduce_map_memory_mb",
"mapreduce_map_java_opts",
"yarn_nodemanager_container_manager_thread_count",
"yarn_nodemanager_delete_thread_count",
"yarn_nodemanager_heartbeat_interval_ms",
"yarn_nodemanager_localizer_cache_cleanup_interval_ms",
"yarn_nodemanager_localizer_client_thread_count",
"yarn_nodemanager_localizer_cache_target_size_mb",
"yarn_nodemanager_localizer_fetch_thread_count",
"yarn_nodemanager_log_retain_seconds",
"yarn_nodemanager_resource_memory_mb",
"yarn_resourcemanager_client_thread_count",
"yarn_resourcemanager_scheduler_client_thread_count",
"yarn_resourcemanager_admin_client_thread_count",
"yarn_resourcemanager_amliveliness_monitor_interval_ms",
"yarn_am_liveness_monitor_expiry_interval_ms",
"yarn_resourcemanager_am_max_retries",
"yarn_scheduler_minimum_allocation_mb",
"yarn_scheduler_maximum_allocation_mb",
"yarn_app_mapreduce_am_command_opts",
"yarn_app_mapreduce_am_resource_mb"
]

View File

@ -1,16 +0,0 @@
<property>
<name>sentry.service.client.server.rpc-port</name>
<value>3893</value>
</property>
<property>
<name>sentry.service.client.server.rpc-address</name>
<value>hostname</value>
</property>
<property>
<name>sentry.service.client.server.rpc-connection-timeout</name>
<value>200000</value>
</property>
<property>
<name>sentry.service.security.mode</name>
<value>none</value>
</property>

View File

@ -1,308 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "When computing the overall Sentry Server health, consider the host's health.",
"display_name": "Sentry Server Host Health Test",
"name": "sentry_server_host_health_enabled",
"value": "true"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_sentry_server_unexpected_exits",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Sentry Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "sentry_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Sentry Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Sentry Server",
"name": "role_config_suppression_sentry_server_java_opts",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Sentry Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Sentry Server Log Directory",
"name": "role_config_suppression_sentry_server_log_dir",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Sentry Server",
"name": "sentry_server_java_opts",
"value": ""
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_sentry_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Sentry Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Sentry Server in Bytes",
"name": "sentry_server_java_heapsize",
"value": "1073741824"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Sentry Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_sentry_env_safety_valve",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for Sentry Server logs. Typically used by log4j or logback.",
"display_name": "Sentry Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_sentry_server_host_health",
"value": "false"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_sentry_server_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "RPC port number of Sentry Server.",
"display_name": "Sentry Server RPC Port",
"name": "sentry_service_server_rpc_port",
"value": "8038"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_sentry_server_log_directory_free_space",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Directory where Sentry Server will place its log files.",
"display_name": "Sentry Server Log Directory",
"name": "sentry_server_log_dir",
"value": "/var/log/sentry"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "The maximum number of rolled log files to keep for Sentry Server logs. Typically used by log4j or logback.",
"display_name": "Sentry Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "sentry_server_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_sentry_server_swap_memory_usage",
"value": "false"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The minimum log level for Sentry Server logs",
"display_name": "Sentry Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_sentry_server_scm_health",
"value": "false"
},
{
"desc": "Enables the health test that the Sentry Server's process state is consistent with the role configuration",
"display_name": "Sentry Server Process Health Test",
"name": "sentry_server_scm_health_enabled",
"value": "true"
}
]

View File

@ -1,278 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the Sentry Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Sentry Server Count Validator",
"name": "service_config_suppression_sentry_server_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Server Database Password parameter.",
"display_name": "Suppress Parameter Validation: Sentry Server Database Password",
"name": "service_config_suppression_sentry_server_database_password",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Sentry Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Sentry Server Health",
"name": "service_health_suppression_sentry_sentry_servers_healthy",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties parameter.",
"display_name": "Suppress Parameter Validation: Sentry Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "service_config_suppression_navigator_client_config_safety_valve",
"value": "false"
},
{
"desc": "Password for Sentry Server database.",
"display_name": "Sentry Server Database Password",
"name": "sentry_server_database_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Filter parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Filter",
"name": "service_config_suppression_navigator_audit_event_filter",
"value": "false"
},
{
"desc": "<p>\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n</p>\n\n<p>\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n</p>\n\n<p>Event trackers are defined in a JSON object like the following:</p>\n\n<pre>\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n</pre>\n\n<p>\nWhere:\n</p>\n\n<ul>\n <li>timeToLive: maximum amount of time an event will be tracked, in\n milliseconds. Must be provided. This defines how long, since it's\n first seen, an event will be tracked. A value of 0 disables tracking.</li>\n\n <li>fields: list of fields to compare when matching events against\n tracked events.</li>\n</ul>\n\n<p>\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n</p>\n\n<ul>\n <li>value: uses the field value for comparison.</li>\n\n <li>userName: treats the field value as a userName, and ignores any\n host-specific data. This is useful for environment using Kerberos,\n so that only the principal name and realm are compared.</li>\n</ul>\n\n<p>\nThe following is the list of fields that can be used to compare Sentry events:\n</p>\n\n<ul>\n <li>operation: the Sentry operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n</ul>\n",
"display_name": "Audit Event Tracker",
"name": "navigator_event_tracker",
"value": null
},
{
"desc": "List of users allowed to connect to the Sentry Server. These are usually service users such as hive and impala, and the list does not usually need to include end users.",
"display_name": "Allowed Connecting Users",
"name": "sentry_service_allow_connect",
"value": "hive,impala,hue,hdfs"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "sentry"
},
{
"desc": "<p>\nEvent filters are defined in a JSON object like the following:\n</p>\n\n<pre>\n{\n \"defaultAction\" : (\"accept\", \"discard\"),\n \"rules\" : [\n {\n \"action\" : (\"accept\", \"discard\"),\n \"fields\" : [\n {\n \"name\" : \"fieldName\",\n \"match\" : \"regex\"\n }\n ]\n }\n ]\n}\n</pre>\n\n<p>\nA filter has a default action and a list of rules, in order of precedence.\nEach rule defines an action, and a list of fields to match against the\naudit event.\n</p>\n\n<p>\nA rule is \"accepted\" if all the listed field entries match the audit\nevent. At that point, the action declared by the rule is taken.\n</p>\n\n<p>\nIf no rules match the event, the default action is taken. Actions\ndefault to \"accept\" if not defined in the JSON object.\n</p>\n\n<p>\nThe following is the list of fields that can be filtered for Sentry events:\n</p>\n\n<ul>\n <li>operation: the Sentry operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n</ul>\n",
"display_name": "Audit Event Filter",
"name": "navigator_audit_event_filter",
"value": null
},
{
"desc": "User for Sentry Server database.",
"display_name": "Sentry Server Database User",
"name": "sentry_server_database_user",
"value": "sentry"
},
{
"desc": "Name of the HDFS service that this Sentry service instance depends on",
"display_name": "HDFS Service",
"name": "hdfs_service",
"value": null
},
{
"desc": "Type of Sentry Server database.",
"display_name": "Sentry Server Database Type",
"name": "sentry_server_database_type",
"value": "mysql"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Server Database Host parameter.",
"display_name": "Suppress Parameter Validation: Sentry Server Database Host",
"name": "service_config_suppression_sentry_server_database_host",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Sentry Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "SENTRY_service_env_safety_valve",
"value": null
},
{
"desc": "Name of the ZooKeeper service that this Sentry service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Maximum size of audit log file in MB before it is rolled over.",
"display_name": "Maximum Audit Log File Size",
"name": "navigator_audit_log_max_file_size",
"value": "100"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Allowed Connecting Users parameter.",
"display_name": "Suppress Parameter Validation: Allowed Connecting Users",
"name": "service_config_suppression_sentry_service_allow_connect",
"value": "false"
},
{
"desc": "Host name of Sentry Server database.",
"display_name": "Sentry Server Database Host",
"name": "sentry_server_database_host",
"value": "localhost"
},
{
"desc": "The health test thresholds of the overall Sentry Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Sentry Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Sentry Servers falls below the critical threshold.",
"display_name": "Healthy Sentry Server Monitoring Thresholds",
"name": "sentry_sentry_server_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "Path to the directory where audit logs will be written. The directory will be created if it doesn't exist.",
"display_name": "Audit Log Directory",
"name": "audit_event_log_dir",
"value": "/var/log/sentry/audit"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "sentry"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Sentry Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_sentry_service_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Server Database Name parameter.",
"display_name": "Suppress Parameter Validation: Sentry Server Database Name",
"name": "service_config_suppression_sentry_server_database_name",
"value": "false"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "sentry"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>sentry-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Sentry Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "sentry_server_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Sentry Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "service_config_suppression_sentry_server_config_safety_valve",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Name of Sentry Server database.",
"display_name": "Sentry Server Database Name",
"name": "sentry_server_database_name",
"value": "sentry"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "Action to take when the audit event queue is full. Drop the event or shutdown the affected process.",
"display_name": "Audit Queue Policy",
"name": "navigator_audit_queue_policy",
"value": "DROP"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Admin Groups parameter.",
"display_name": "Suppress Parameter Validation: Admin Groups",
"name": "service_config_suppression_sentry_service_admin_group",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Audit Log Directory",
"name": "service_config_suppression_audit_event_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Tracker parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Tracker",
"name": "service_config_suppression_navigator_event_tracker",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Port number of Sentry Server database.",
"display_name": "Sentry Server Database Port",
"name": "sentry_server_database_port",
"value": "3306"
},
{
"desc": "If an end user is in one of these admin groups, that user has administrative privileges on the Sentry Server.",
"display_name": "Admin Groups",
"name": "sentry_service_admin_group",
"value": "hive,impala,hue"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.client.properties</strong>.",
"display_name": "Sentry Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "navigator_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Enable collection of audit events from the service's roles.",
"display_name": "Enable Audit Collection",
"name": "navigator_audit_enabled",
"value": "true"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Server Database User parameter.",
"display_name": "Suppress Parameter Validation: Sentry Server Database User",
"name": "service_config_suppression_sentry_server_database_user",
"value": "false"
}
]

View File

@ -1,32 +0,0 @@
[
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Deploy Directory parameter.",
"display_name": "Suppress Parameter Validation: Deploy Directory",
"name": "role_config_suppression_client_config_root_dir",
"value": "false"
},
{
"desc": "The directory where the client configs will be deployed",
"display_name": "Deploy Directory",
"name": "client_config_root_dir",
"value": "/etc/solr"
},
{
"desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.",
"display_name": "Alternatives Priority",
"name": "client_config_priority",
"value": "90"
}
]

View File

@ -1,404 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "solr"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the SOLR Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties parameter.",
"display_name": "Suppress Parameter Validation: SOLR Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "service_config_suppression_navigator_client_config_safety_valve",
"value": "false"
},
{
"desc": "<p>\nEvent filters are defined in a JSON object like the following:\n</p>\n\n<pre>\n{\n \"defaultAction\" : (\"accept\", \"discard\"),\n \"rules\" : [\n {\n \"action\" : (\"accept\", \"discard\"),\n \"fields\" : [\n {\n \"name\" : \"fieldName\",\n \"match\" : \"regex\"\n }\n ]\n }\n ]\n}\n</pre>\n\n<p>\nA filter has a default action and a list of rules, in order of precedence.\nEach rule defines an action, and a list of fields to match against the\naudit event.\n</p>\n\n<p>\nA rule is \"accepted\" if all the listed field entries match the audit\nevent. At that point, the action declared by the rule is taken.\n</p>\n\n<p>\nIf no rules match the event, the default action is taken. Actions\ndefault to \"accept\" if not defined in the JSON object.\n</p>\n\n<p>\nThe following is the list of fields that can be filtered for Solr events:\n</p>\n\n<ul>\n <li>operation: the Solr operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n</ul>\n",
"display_name": "Audit Event Filter",
"name": "navigator_audit_event_filter",
"value": null
},
{
"desc": "Choose the authentication mechanism used by Solr.",
"display_name": "Solr Secure Authentication",
"name": "solr_security_authentication",
"value": "simple"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Filter parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Filter",
"name": "service_config_suppression_navigator_audit_event_filter",
"value": "false"
},
{
"desc": "Maximum size of audit log file in MB before it is rolled over.",
"display_name": "Maximum Audit Log File Size",
"name": "navigator_audit_log_max_file_size",
"value": "100"
},
{
"desc": "<p>\nConfigures the rules for event tracking and coalescing. This feature is\nused to define equivalency between different audit events. When\nevents match, according to a set of configurable parameters, only one\nentry in the audit list is generated for all the matching events.\n</p>\n\n<p>\nTracking works by keeping a reference to events when they first appear,\nand comparing other incoming events against the \"tracked\" events according\nto the rules defined here.\n</p>\n\n<p>Event trackers are defined in a JSON object like the following:</p>\n\n<pre>\n{\n \"timeToLive\" : [integer],\n \"fields\" : [\n {\n \"type\" : [string],\n \"name\" : [string]\n }\n ]\n}\n</pre>\n\n<p>\nWhere:\n</p>\n\n<ul>\n <li>timeToLive: maximum amount of time an event will be tracked, in\n milliseconds. Must be provided. This defines how long, since it's\n first seen, an event will be tracked. A value of 0 disables tracking.</li>\n\n <li>fields: list of fields to compare when matching events against\n tracked events.</li>\n</ul>\n\n<p>\nEach field has an evaluator type associated with it. The evaluator defines\nhow the field data is to be compared. The following evaluators are\navailable:\n</p>\n\n<ul>\n <li>value: uses the field value for comparison.</li>\n\n <li>userName: treats the field value as a userName, and ignores any\n host-specific data. This is useful for environment using Kerberos,\n so that only the principal name and realm are compared.</li>\n</ul>\n\n<p>\nThe following is the list of fields that can be used to compare Solr events:\n</p>\n\n<ul>\n <li>operation: the Solr operation being performed.</li>\n <li>username: the user performing the action.</li>\n <li>ipAddress: the IP from where the request originated.</li>\n <li>allowed: whether the operation was allowed or denied.</li>\n</ul>\n",
"display_name": "Audit Event Tracker",
"name": "navigator_event_tracker",
"value": null
},
{
"desc": "Action to take when the audit event queue is full. Drop the event or shutdown the affected process.",
"display_name": "Audit Queue Policy",
"name": "navigator_audit_queue_policy",
"value": "DROP"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>navigator.client.properties</strong>.",
"display_name": "SOLR Client Advanced Configuration Snippet (Safety Valve) for navigator.client.properties",
"name": "navigator_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Solr Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "service_config_suppression_solr_sentry_safety_valve",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Solr might connect to. This is used when Solr is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Solr TLS/SSL Certificate Trust Store File",
"name": "solr_https_truststore_file",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP URI parameter.",
"display_name": "Suppress Parameter Validation: LDAP URI",
"name": "service_config_suppression_solr_ldap_uri",
"value": "false"
},
{
"desc": "When set, this value is appended to all usernames before authenticating with the LDAP server. For example, if this parameter is set to \"my.domain.com\", and the user authenticating to the Solr daemon is \"mark\", then \"mark@my.domain.com\" is passed to the LDAP server. If this field is not set, the username remains unaltered before being passed to the LDAP server. This parameter is mutually exclusive with LDAP BaseDN.",
"display_name": "LDAP Domain",
"name": "ldap_domain",
"value": null
},
{
"desc": "The password for the Solr TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Solr TLS/SSL Certificate Trust Store Password",
"name": "solr_https_truststore_password",
"value": null
},
{
"desc": "The password for the Solr JKS keystore file.",
"display_name": "Solr TLS/SSL Server JKS Keystore File Password",
"name": "solr_https_keystore_password",
"value": null
},
{
"desc": "Name of the HDFS service that this Search service instance depends on",
"display_name": "HDFS Service",
"name": "hdfs_service",
"value": null
},
{
"desc": "If Solr does not respond on its web URL within this time interval, the Catalina process is killed.",
"display_name": "Solrd Watchdog Timeout",
"name": "solrd_watchdog_timeout",
"value": "30"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Solr Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_solr_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Data Directory parameter.",
"display_name": "Suppress Parameter Validation: HDFS Data Directory",
"name": "service_config_suppression_hdfs_data_dir",
"value": "false"
},
{
"desc": "Name of the ZooKeeper service that this Search service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Solr TLS/SSL Certificate Trust Store File",
"name": "service_config_suppression_solr_https_truststore_file",
"value": "false"
},
{
"desc": "If true, attempts to establish a TLS (Transport Layer Security) connection with the LDAP server.",
"display_name": "Enable LDAP TLS",
"name": "solr_ldap_enable_starttls",
"value": "false"
},
{
"desc": "HDFS directory used for storage by this Solr service.",
"display_name": "HDFS Data Directory",
"name": "hdfs_data_dir",
"value": "/solr"
},
{
"desc": "Whether to suppress configuration warnings produced by the LDAP Secure URI and Start TLS Validator configuration validator.",
"display_name": "Suppress Configuration Validator: LDAP Secure URI and Start TLS Validator",
"name": "service_config_suppression_solr_ldaps_or_tls_validator",
"value": "false"
},
{
"desc": "HDFS path to the global policy file for Sentry authorization. This should be a relative path (and not a full HDFS URL). The global policy file must be in Sentry policy file format.",
"display_name": "Sentry Global Policy File",
"name": "sentry_solr_provider_resource",
"value": "/user/solr/sentry/sentry-provider.ini"
},
{
"desc": "Path to the directory where audit logs will be written. The directory will be created if it doesn't exist.",
"display_name": "Audit Log Directory",
"name": "audit_event_log_dir",
"value": "/var/log/solr/audit"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Solr TLS/SSL Server JKS Keystore File Password",
"name": "service_config_suppression_solr_https_keystore_password",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hdfs-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Solr Service Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "solr_hdfs_site_safety_valve",
"value": null
},
{
"desc": "The health test thresholds of the overall Solr Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Solr Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Solr Servers falls below the critical threshold.",
"display_name": "Healthy Solr Server Monitoring Thresholds",
"name": "solr_solr_servers_healthy_thresholds",
"value": "{\"critical\":\"90.0\",\"warning\":\"95.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sentry Global Policy File parameter.",
"display_name": "Suppress Parameter Validation: Sentry Global Policy File",
"name": "service_config_suppression_sentry_solr_provider_resource",
"value": "false"
},
{
"desc": "When checked, LDAP-based authentication for users is enabled.",
"display_name": "Enable LDAP",
"name": "solr_enable_ldap_auth",
"value": "false"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "solr"
},
{
"desc": "Whether to suppress configuration warnings produced by the Solr Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Solr Server Count Validator",
"name": "service_config_suppression_solr_server_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Service Advanced Configuration Snippet (Safety Valve) for core-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Solr Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "service_config_suppression_solr_core_site_safety_valve",
"value": "false"
},
{
"desc": "The class to use in Sentry authorization for user to group mapping. Sentry authorization may be configured to use either Hadoop groups or local groups defined in the policy file. When configured with Hadoop groups, Sentry will ask the HDFS Namenode for group mapping for a given user to determine authorization access.",
"display_name": "Sentry User to Group Mapping Class",
"name": "solr_sentry_provider",
"value": "org.apache.sentry.provider.file.HadoopGroupResourceAuthorizationProvider"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>core-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Solr Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "solr_core_site_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Solr TLS/SSL Certificate Trust Store Password",
"name": "service_config_suppression_solr_https_truststore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Solr Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "solr_env_safety_valve",
"value": null
},
{
"desc": "Use Sentry to enable role-based, fine-grained authorization. Sentry is supported only on Search 1.1 or later and CDH 5 or later deployments and requires authentication to be turned on for Solr.</a>.",
"display_name": "Enable Sentry Authorization",
"name": "solr_sentry_enabled",
"value": "false"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "solr"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>sentry-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Solr Service Advanced Configuration Snippet (Safety Valve) for sentry-site.xml",
"name": "solr_sentry_safety_valve",
"value": null
},
{
"desc": "Enable the background watchdog thread that can kill Catalina process if Solr is not responsive.",
"display_name": "Enable Solrd Watchdog",
"name": "solrd_enable_watchdog",
"value": "true"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Audit Log Directory",
"name": "service_config_suppression_audit_event_log_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Service Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml parameter.",
"display_name": "Suppress Parameter Validation: Solr Service Advanced Configuration Snippet (Safety Valve) for hdfs-site.xml",
"name": "service_config_suppression_solr_hdfs_site_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Audit Event Tracker parameter.",
"display_name": "Suppress Parameter Validation: Audit Event Tracker",
"name": "service_config_suppression_navigator_event_tracker",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Solr Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Solr Server Health",
"name": "service_health_suppression_solr_solr_servers_healthy",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Gateway Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Gateway Count Validator",
"name": "service_config_suppression_gateway_count_validator",
"value": "false"
},
{
"desc": "The URI of the LDAP server to use if LDAP authentication is enabled. The URI must be prefixed with ldap:// or ldaps://. Usernames and passwords are transmitted in the clear unless an \"ldaps://\" URI is specified (or LDAP TLS is enabled). The URI can optionally specify the port; for example, ldaps://ldap_server.example.com:636.",
"display_name": "LDAP URI",
"name": "solr_ldap_uri",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Znode parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Znode",
"name": "service_config_suppression_zookeeper_znode",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Solr is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Solr TLS/SSL Server JKS Keystore File Location",
"name": "solr_https_keystore_file",
"value": null
},
{
"desc": "ZooKeeper znode used to store information about this Solr service.",
"display_name": "ZooKeeper Znode",
"name": "zookeeper_znode",
"value": "/solr"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP BaseDN parameter.",
"display_name": "Suppress Parameter Validation: LDAP BaseDN",
"name": "service_config_suppression_solr_ldap_basedn",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Solr TLS/SSL Server JKS Keystore File Location",
"name": "service_config_suppression_solr_https_keystore_file",
"value": "false"
},
{
"desc": "Enable collection of audit events from the service's roles.",
"display_name": "Enable Audit Collection",
"name": "navigator_audit_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the LDAP TLS Validator configuration validator.",
"display_name": "Suppress Configuration Validator: LDAP TLS Validator",
"name": "service_config_suppression_solr_ldap_tls_validator",
"value": "false"
},
{
"desc": "Encrypt communication between clients and Solr using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)). Additional manual steps must be performed; see <a class=\"bold\" href=\"http://tiny.cloudera.com/solr-ssl-5.4\" target=\"_blank\">Enabling TLS/SSL for Solr<i class=\"externalLink\"></i></a>.",
"display_name": "Enable TLS/SSL for Solr",
"name": "solr_use_ssl",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "When set, this parameter is used to convert the username to the LDAP Distinguished Name (DN), so that the resulting DN looks like uid=username,X. For example, if this parameter is set to \"ou=People,dc=example,dc=com\", and the username passed in is \"mark\", the resulting authentication passed to the LDAP server looks like \"uid=mark,ou=People,dc=example,dc=com\". This parameter is generally most useful when authenticating against an OpenLDAP server. This parameter is mutually exclusive with LDAP Domain.",
"display_name": "LDAP BaseDN",
"name": "solr_ldap_basedn",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the LDAP Domain parameter.",
"display_name": "Suppress Parameter Validation: LDAP Domain",
"name": "service_config_suppression_ldap_domain",
"value": "false"
}
]

View File

@ -1,440 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_solr_server_web_metric_collection",
"value": "false"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Solr Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Solr Server",
"name": "role_config_suppression_solr_java_opts",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Enable caching of HDFS blocks in Solr. There is one block cache per Solr collection. configured to use off-heap memory, Maximum Off-Heap Memory must be set high enough to account for all block caches.",
"display_name": "HDFS Block Cache",
"name": "solr_hdfs_blockcache_enabled",
"value": "true"
},
{
"desc": "When computing the overall Solr Server health, consider the host's health.",
"display_name": "Solr Server Host Health Test",
"name": "solr_server_host_health_enabled",
"value": "true"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Admin port of the Solr Server.",
"display_name": "Solr Admin Port",
"name": "solr_admin_port",
"value": "8984"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "solr_server_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Solr Server in Bytes",
"name": "solr_java_heapsize",
"value": "1073741824"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Solr Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Solr Server API Liveness heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Solr Server API Liveness",
"name": "role_health_suppression_solr_core_status_collection_health",
"value": "false"
},
{
"desc": "The maximum number of request processing threads to be created by Solr server, which determines the maximum number of simultaneous requests that can be handled.",
"display_name": "Solr Max Connector Threads",
"name": "solr_max_connector_thread",
"value": "10000"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_solr_server_swap_memory_usage",
"value": "false"
},
{
"desc": "Enables the health test that the Solr Server's process state is consistent with the role configuration",
"display_name": "Solr Server Process Health Test",
"name": "solr_server_scm_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_solr_server_host_health",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the Solr Server API request.",
"display_name": "Solr Server API Liveness Request Duration",
"name": "solr_core_status_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Solr Server Log Directory",
"name": "role_config_suppression_solr_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Heap Size of Solr Server in Bytes parameter.",
"display_name": "Suppress Parameter Validation: Java Heap Size of Solr Server in Bytes",
"name": "role_config_suppression_solr_java_heapsize",
"value": "false"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Directory where Solr Server will place its log files.",
"display_name": "Solr Server Log Directory",
"name": "solr_log_dir",
"value": "/var/log/solr"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Solr Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Solr Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "SOLR_SERVER_role_env_safety_valve",
"value": null
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather status of Solr Cores from the Solr Server with a simple API request.",
"display_name": "Solr Server API Liveness",
"name": "solr_core_status_collection_health_enabled",
"value": "true"
},
{
"desc": "Use off-heap memory when caching HDFS blocks in Solr.",
"display_name": "HDFS Block Cache Off-Heap Memory",
"name": "solr_hdfs_blockcache_direct_memory_allocation",
"value": "true"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "HTTPS port of Solr Server.",
"display_name": "Solr HTTPS port",
"name": "solr_https_port",
"value": "8985"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Data Directory parameter.",
"display_name": "Suppress Parameter Validation: Solr Data Directory",
"name": "role_config_suppression_solr_data_dir",
"value": "false"
},
{
"desc": "The maximum size, in megabytes, per log file for Solr Server logs. Typically used by log4j or logback.",
"display_name": "Solr Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_solr_server_gc_duration",
"value": "false"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Directory on local file system where Solr Server keeps the configurations for collections.",
"display_name": "Solr Data Directory",
"name": "solr_data_dir",
"value": "/var/lib/solr"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "solr_server_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Solr Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_solr_server_role_env_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Number of slabs per block cache. The size of the cache is 8 KB (the block size) times the number of blocks per slab times the number of slabs.",
"display_name": "HDFS Block Cache Number of Slabs",
"name": "solr_hdfs_blockcache_slab_count",
"value": "1"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_solr_server_unexpected_exits",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for Solr Server logs. Typically used by log4j or logback.",
"display_name": "Solr Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Number of blocks per cache slab. The size of the cache is 8 KB (the block size) times the number of blocks per slab times the number of slabs.",
"display_name": "HDFS Block Cache Blocks per Slab",
"name": "solr_hdfs_blockcache_blocksperbank",
"value": "16384"
},
{
"desc": "Address of the load balancer, specified in host:port format.",
"display_name": "Solr Load Balancer",
"name": "solr_load_balancer",
"value": null
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_solr_server_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_solr_server_log_directory_free_space",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "solr_server_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_solr_server_file_descriptor",
"value": "false"
},
{
"desc": "Maximum amount of off-heap memory in bytes that may be allocated by the Java process. Passed to Java -XX:MaxDirectMemorySize. If unset, defaults to the size of the heap.",
"display_name": "Java Direct Memory Size of Solr Server in Bytes",
"name": "solr_java_direct_memory_size",
"value": "1073741824"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "solr_server_gc_duration_window",
"value": "5"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "solr_server_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Solr Server",
"name": "solr_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "The minimum log level for Solr Server logs",
"display_name": "Solr Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_solr_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "HTTP port of Solr Server.",
"display_name": "Solr HTTP Port",
"name": "solr_http_port",
"value": "8983"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Solr Load Balancer parameter.",
"display_name": "Suppress Parameter Validation: Solr Load Balancer",
"name": "role_config_suppression_solr_load_balancer",
"value": "false"
}
]

View File

@ -1,158 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark Data Serializer parameter.",
"display_name": "Suppress Parameter Validation: Spark Data Serializer",
"name": "role_config_suppression_spark_data_serializer",
"value": "false"
},
{
"desc": "Python library paths to add to PySpark applications.",
"display_name": "Extra Python Path",
"name": "spark_python_path",
"value": ""
},
{
"desc": "When dynamic allocation is enabled, time after which idle executors will be stopped.",
"display_name": "Executor Idle Timeout",
"name": "spark_dynamic_allocation_idle_timeout",
"value": "60"
},
{
"desc": "Enable Usage of External Shuffle Service. The External Shuffle Service is not robust to NodeManager restarts and so is not recommended for production use.",
"display_name": "Enable Shuffle Service",
"name": "spark_shuffle_service_enabled",
"value": "true"
},
{
"desc": "The directory where the client configs will be deployed",
"display_name": "Deploy Directory",
"name": "client_config_root_dir",
"value": "/etc/spark"
},
{
"desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.",
"display_name": "Alternatives Priority",
"name": "client_config_priority",
"value": "51"
},
{
"desc": "Enable dynamic allocation of executors in Spark applications.",
"display_name": "Enable Dynamic Allocation",
"name": "spark_dynamic_allocation_enabled",
"value": "true"
},
{
"desc": "Which deploy mode to use by default. Can be overridden by users when launching applications.",
"display_name": "Default Application Deploy Mode",
"name": "spark_deploy_mode",
"value": "client"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>spark-conf/spark-env.sh</strong>.",
"display_name": "Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh",
"name": "spark-conf/spark-env.sh_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Deploy Directory parameter.",
"display_name": "Suppress Parameter Validation: Deploy Directory",
"name": "role_config_suppression_client_config_root_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "When dynamic allocation is enabled, timeout before requesting new executors when there are backlogged tasks.",
"display_name": "Scheduler Backlog Timeout",
"name": "spark_dynamic_allocation_scheduler_backlog_timeout",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Extra Python Path parameter.",
"display_name": "Suppress Parameter Validation: Extra Python Path",
"name": "role_config_suppression_spark_python_path",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>spark-conf/spark-defaults.conf</strong>.",
"display_name": "Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-defaults.conf",
"name": "spark-conf/spark-defaults.conf_client_config_safety_valve",
"value": null
},
{
"desc": "When dynamic allocation is enabled, timeout before requesting new executors after the initial backlog timeout has already expired. By default this is the same value as the initial backlog timeout.",
"display_name": "Sustained Scheduler Backlog Timeout",
"name": "spark_dynamic_allocation_sustained_scheduler_backlog_timeout",
"value": null
},
{
"desc": "When dynamic allocation is enabled, maximum number of executors to allocate. By default, Spark relies on YARN to control the maximum number of executors for the application.",
"display_name": "Maximum Executor Count",
"name": "spark_dynamic_allocation_max_executors",
"value": null
},
{
"desc": "When dynamic allocation is enabled, time after which idle executors with cached RDDs blocks will be stopped. By default, they're never stopped. This configuration is only available starting in CDH 5.5.",
"display_name": "Caching Executor Idle Timeout",
"name": "spark_dynamic_allocation_cached_idle_timeout",
"value": null
},
{
"desc": "Write Spark application history logs to HDFS.",
"display_name": "Enable History",
"name": "spark_history_enabled",
"value": "true"
},
{
"desc": "When dynamic allocation is enabled, minimum number of executors to keep alive while the application is running.",
"display_name": "Minimum Executor Count",
"name": "spark_dynamic_allocation_min_executors",
"value": "0"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-defaults.conf parameter.",
"display_name": "Suppress Parameter Validation: Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-defaults.conf",
"name": "role_config_suppression_spark-conf/spark-defaults.conf_client_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/log4j.properties parameter.",
"display_name": "Suppress Parameter Validation: Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/log4j.properties",
"name": "role_config_suppression_spark-conf/log4j.properties_client_config_safety_valve",
"value": "false"
},
{
"desc": "Name of class implementing org.apache.spark.serializer.Serializer to use in Spark applications.",
"display_name": "Spark Data Serializer",
"name": "spark_data_serializer",
"value": "org.apache.spark.serializer.KryoSerializer"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>spark-conf/log4j.properties</strong>.",
"display_name": "Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/log4j.properties",
"name": "spark-conf/log4j.properties_client_config_safety_valve",
"value": null
},
{
"desc": "When dynamic allocation is enabled, number of executors to allocate when the application starts. By default, this is the same value as the minimum number of executors.",
"display_name": "Initial Executor Count",
"name": "spark_dynamic_allocation_initial_executors",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh parameter.",
"display_name": "Suppress Parameter Validation: Spark Client Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh",
"name": "role_config_suppression_spark-conf/spark-env.sh_client_config_safety_valve",
"value": "false"
}
]

View File

@ -1,152 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Spark Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_spark_on_yarn_service_env_safety_valve",
"value": "false"
},
{
"desc": "Enable whether the Spark communication protocols do authentication using a shared secret.",
"display_name": "Spark Authentication",
"name": "spark_authenticate",
"value": "false"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "spark"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark History Location (HDFS) parameter.",
"display_name": "Suppress Parameter Validation: Spark History Location (HDFS)",
"name": "service_config_suppression_spark_history_log_dir",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>spark-conf/spark-env.sh</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Spark Service Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh",
"name": "spark-conf/spark-env.sh_service_safety_valve",
"value": null
},
{
"desc": "The location of Spark application history logs in HDFS. Changing this value will not move existing logs to the new location.",
"display_name": "Spark History Location (HDFS)",
"name": "spark_history_log_dir",
"value": "/user/spark/applicationHistory"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "spark"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark JAR Location (HDFS) parameter.",
"display_name": "Suppress Parameter Validation: Spark JAR Location (HDFS)",
"name": "service_config_suppression_spark_jar_hdfs_path",
"value": "false"
},
{
"desc": "The location of the Spark JAR in HDFS. If left blank, Cloudera Manager will use the Spark JAR installed on the cluster nodes.",
"display_name": "Spark JAR Location (HDFS)",
"name": "spark_jar_hdfs_path",
"value": ""
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "spark"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Spark Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "SPARK_ON_YARN_service_env_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the History Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: History Server Count Validator",
"name": "service_config_suppression_spark_yarn_history_server_count_validator",
"value": "false"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Gateway Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Gateway Count Validator",
"name": "service_config_suppression_gateway_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Name of the YARN (MR2 Included) service that this Spark service instance depends on",
"display_name": "YARN (MR2 Included) Service",
"name": "yarn_service",
"value": null
},
{
"desc": "The port the Spark Shuffle Service listens for fetch requests.",
"display_name": "Spark Shuffle Service Port",
"name": "spark_shuffle_service_port",
"value": "7337"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Spark Service Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh parameter.",
"display_name": "Suppress Parameter Validation: Spark Service Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh",
"name": "service_config_suppression_spark-conf/spark-env.sh_service_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
}
]

View File

@ -1,236 +0,0 @@
[
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "History Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "SPARK_YARN_HISTORY_SERVER_role_env_safety_valve",
"value": null
},
{
"desc": "The port of the history server WebUI",
"display_name": "History Server WebUI Port",
"name": "history_server_web_port",
"value": "18088"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "How often to poll HDFS for new applications.",
"display_name": "HDFS Polling Interval",
"name": "history_server_fs_poll_interval",
"value": "10"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the History Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: History Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_spark_on_yarn_spark_yarn_history_server_file_descriptor",
"value": "false"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Max number of application UIs to keep in the History Server's memory. All applications will still be available, but may take longer to load if they're not in memory.",
"display_name": "Retained App Count",
"name": "history_server_retained_apps",
"value": "50"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "The log directory for log files of the role History Server.",
"display_name": "History Server Log Directory",
"name": "log_dir",
"value": "/var/log/spark"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the History Server Advanced Configuration Snippet (Safety Valve) for spark-history-server.conf parameter.",
"display_name": "Suppress Parameter Validation: History Server Advanced Configuration Snippet (Safety Valve) for spark-history-server.conf",
"name": "role_config_suppression_spark-history-server.conf_role_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "spark_yarn_history_server_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "History Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Maximum size for the Java process heap memory. Passed to Java -Xmx. Measured in bytes.",
"display_name": "Java Heap Size of History Server in Bytes",
"name": "history_server_max_heapsize",
"value": "268435456"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_spark_on_yarn_spark_yarn_history_server_host_health",
"value": "false"
},
{
"desc": "When computing the overall History Server health, consider the host's health.",
"display_name": "History Server Host Health Test",
"name": "spark_yarn_history_server_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_spark_on_yarn_spark_yarn_history_server_unexpected_exits",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The maximum size, in megabytes, per log file for History Server logs. Typically used by log4j or logback.",
"display_name": "History Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the History Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: History Server Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_spark_on_yarn_spark_yarn_history_server_swap_memory_usage",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>spark-history-server.conf</strong> for this role only.",
"display_name": "History Server Advanced Configuration Snippet (Safety Valve) for spark-history-server.conf",
"name": "spark-history-server.conf_role_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the History Server Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh parameter.",
"display_name": "Suppress Parameter Validation: History Server Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh",
"name": "role_config_suppression_spark-conf/spark-env.sh_role_safety_valve",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for History Server logs. Typically used by log4j or logback.",
"display_name": "History Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Enables the health test that the History Server's process state is consistent with the role configuration",
"display_name": "History Server Process Health Test",
"name": "spark_yarn_history_server_scm_health_enabled",
"value": "true"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the History Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: History Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_spark_yarn_history_server_role_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>spark-conf/spark-env.sh</strong> for this role only.",
"display_name": "History Server Advanced Configuration Snippet (Safety Valve) for spark-conf/spark-env.sh",
"name": "spark-conf/spark-env.sh_role_safety_valve",
"value": null
},
{
"desc": "The minimum log level for History Server logs",
"display_name": "History Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_spark_on_yarn_spark_yarn_history_server_scm_health",
"value": "false"
}
]

View File

@ -1,110 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop 2 Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Sqoop 2 Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_sqoop_env_safety_valve",
"value": "false"
},
{
"desc": "When computing the overall SQOOP health, consider Sqoop 2 Server's health",
"display_name": "Sqoop 2 Server Role Health Test",
"name": "sqoop_sqoop_server_health_enabled",
"value": "true"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "sqoop"
},
{
"desc": "Whether to suppress the results of the Sqoop 2 Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Sqoop 2 Server Health",
"name": "service_health_suppression_sqoop_sqoop_server_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Sqoop 2 Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Sqoop 2 Server Count Validator",
"name": "service_config_suppression_sqoop_server_count_validator",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Sqoop 2 Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "sqoop_env_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "sqoop2"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "sqoop2"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "MapReduce jobs are run against this service.",
"display_name": "MapReduce Service",
"name": "mapreduce_yarn_service",
"value": null
}
]

View File

@ -1,398 +0,0 @@
[
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Directory where Sqoop 2 Server will place its log files.",
"display_name": "Sqoop 2 Server Log Directory",
"name": "sqoop_log_dir",
"value": "/var/log/sqoop2"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Password for Sqoop repository database. Does not apply if you are using Derby as the database type.",
"display_name": "Sqoop Repository Database Password",
"name": "sqoop_repository_database_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop 2 Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Sqoop 2 Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_sqoop_server_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop 2 Server Advanced Configuration Snippet (Safety Valve) for sqoop.properties parameter.",
"display_name": "Suppress Parameter Validation: Sqoop 2 Server Advanced Configuration Snippet (Safety Valve) for sqoop.properties",
"name": "role_config_suppression_sqoop_config_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Admin port of Sqoop 2 Server. (<strong>Note:</strong> This configuration only applies from CDH 4.3 onwards.)",
"display_name": "Sqoop 2 Admin Port",
"name": "sqoop_admin_port",
"value": "8005"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Maximum number of clients allowed to connect to the Sqoop 2 Server.",
"display_name": "Maximum Client Connections",
"name": "max_client_connections",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>sqoop.properties</strong> for this role only.",
"display_name": "Sqoop 2 Server Advanced Configuration Snippet (Safety Valve) for sqoop.properties",
"name": "sqoop_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_sqoop_server_unexpected_exits",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop 2 Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Sqoop 2 Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "HTTP port of Sqoop 2 Server. (<strong>Note:</strong> This configuration only applies from CDH 4.3 onwards.)",
"display_name": "Sqoop 2 HTTP Port",
"name": "sqoop_http_port",
"value": "12000"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_sqoop_server_file_descriptor",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Sqoop 2 Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "SQOOP_SERVER_role_env_safety_valve",
"value": null
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "sqoop_server_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_sqoop_server_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_sqoop_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop Repository Database User parameter.",
"display_name": "Suppress Parameter Validation: Sqoop Repository Database User",
"name": "role_config_suppression_sqoop_repository_database_user",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_sqoop_server_scm_health",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Sqoop 2 Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Type of Sqoop repository database.",
"display_name": "Sqoop Repository Database Type",
"name": "sqoop_repository_database_type",
"value": "derby"
},
{
"desc": "User for Sqoop repository database. Does not apply if you are using Derby as the database type.",
"display_name": "Sqoop Repository Database User",
"name": "sqoop_repository_database_user",
"value": "sa"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop Repository Database Password parameter.",
"display_name": "Suppress Parameter Validation: Sqoop Repository Database Password",
"name": "role_config_suppression_sqoop_repository_database_password",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Name of Sqoop repository database. Does not apply if you are using Derby as the database type.",
"display_name": "Sqoop Repository Database Name",
"name": "sqoop_repository_database_name",
"value": "sqoop"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop 2 Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Sqoop 2 Server Log Directory",
"name": "role_config_suppression_sqoop_log_dir",
"value": "false"
},
{
"desc": "The maximum size, in megabytes, per log file for Sqoop 2 Server logs. Typically used by log4j or logback.",
"display_name": "Sqoop 2 Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Sqoop 2 Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Sqoop 2 Server",
"name": "role_config_suppression_sqoop_java_opts",
"value": "false"
},
{
"desc": "Directory where the Sqoop 2 Server places its metastore data. This is used only when Sqoop Repository Database Type is Derby.",
"display_name": "Sqoop 2 Server Metastore Directory",
"name": "sqoop_metastore_data_dir",
"value": "/var/lib/sqoop2"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop 2 Server Metastore Directory parameter.",
"display_name": "Suppress Parameter Validation: Sqoop 2 Server Metastore Directory",
"name": "role_config_suppression_sqoop_metastore_data_dir",
"value": "false"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Sqoop 2 Server",
"name": "sqoop_java_opts",
"value": ""
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop Repository Database Host parameter.",
"display_name": "Suppress Parameter Validation: Sqoop Repository Database Host",
"name": "role_config_suppression_sqoop_repository_database_host",
"value": "false"
},
{
"desc": "When computing the overall Sqoop 2 Server health, consider the host's health.",
"display_name": "Sqoop 2 Server Host Health Test",
"name": "sqoop_server_host_health_enabled",
"value": "true"
},
{
"desc": "The maximum number of rolled log files to keep for Sqoop 2 Server logs. Typically used by log4j or logback.",
"display_name": "Sqoop 2 Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Enables the health test that the Sqoop 2 Server's process state is consistent with the role configuration",
"display_name": "Sqoop 2 Server Process Health Test",
"name": "sqoop_server_scm_health_enabled",
"value": "true"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of Sqoop 2 Server in Bytes",
"name": "sqoop_java_heapsize",
"value": "1073741824"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_sqoop_server_log_directory_free_space",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "Host name of Sqoop repository database. If the port is non-default for your database type, use host:port notation. Does not apply if you are using Derby as the database type.",
"display_name": "Sqoop Repository Database Host",
"name": "sqoop_repository_database_host",
"value": "localhost"
},
{
"desc": "The minimum log level for Sqoop 2 Server logs",
"display_name": "Sqoop 2 Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Sqoop Repository Database Name parameter.",
"display_name": "Suppress Parameter Validation: Sqoop Repository Database Name",
"name": "role_config_suppression_sqoop_repository_database_name",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_sqoop_server_host_health",
"value": "false"
}
]

View File

@ -1,482 +0,0 @@
[
{
"desc": "For MapReduce job outputs that are compressed, specify the compression codec to use. Will be part of generated client configuration.",
"display_name": "Compression Codec of MapReduce Job Output",
"name": "mapred_output_compression_codec",
"value": "org.apache.hadoop.io.compress.DefaultCodec"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>mapred-site.xml</strong>.",
"display_name": "MapReduce Client Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "mapreduce_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Reduce Task Java Opts Base parameter.",
"display_name": "Suppress Parameter Validation: Reduce Task Java Opts Base",
"name": "role_config_suppression_mapreduce_reduce_java_opts",
"value": "false"
},
{
"desc": "Whether map tasks should attempt to use the optimized native implementation of the map-side output collector. This can improve performance of many jobs that are shuffle-intensive. Experimental in CDH 5.2.",
"display_name": "Enable Optimized Map-side Output Collector",
"name": "mapreduce_enable_native_map_output_collector",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Map Task Maximum Heap Size Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Map Task Maximum Heap Size Validator",
"name": "role_config_suppression_mapreduce_map_java_opts_max_heap_mapreduce_map_memory_mb_validator",
"value": "false"
},
{
"desc": "The directory where the client configs will be deployed",
"display_name": "Deploy Directory",
"name": "client_config_root_dir",
"value": "/etc/hadoop"
},
{
"desc": "Whether to suppress configuration warnings produced by the ApplicationMaster Java Maximum Heap Size Validator configuration validator.",
"display_name": "Suppress Configuration Validator: ApplicationMaster Java Maximum Heap Size Validator",
"name": "role_config_suppression_yarn_app_mapreduce_am_max_heap_yarn_app_mapreduce_am_resource_mb_validator",
"value": "false"
},
{
"desc": "The number of streams to merge at the same time while sorting files. That is, the number of sort heads to use during the merge sort on the reducer side. This determines the number of open file handles. Merging more files in parallel reduces merge sort iterations and improves run time by eliminating disk I/O. Note that merging more files in parallel uses more memory. If 'io.sort.factor' is set too high or the maximum JVM heap is set too low, excessive garbage collection will occur. The Hadoop default is 10, but Cloudera recommends a higher value. Will be part of generated client configuration.",
"display_name": "I/O Sort Factor",
"name": "io_sort_factor",
"value": "64"
},
{
"desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.",
"display_name": "Alternatives Priority",
"name": "client_config_priority",
"value": "92"
},
{
"desc": "Location to store the job history files of running jobs. This is a path on the host where the JobTracker is running.",
"display_name": "Running Job History Location",
"name": "hadoop_job_history_dir",
"value": "/var/log/hadoop-mapreduce/history"
},
{
"desc": "The number of virtual CPU cores allocated for each map task of a job. This parameter has no effect prior to CDH 4.4.",
"display_name": "Map Task CPU Virtual Cores",
"name": "mapreduce_map_cpu_vcores",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Compression Codec of MapReduce Job Output parameter.",
"display_name": "Suppress Parameter Validation: Compression Codec of MapReduce Job Output",
"name": "role_config_suppression_mapred_output_compression_codec",
"value": "false"
},
{
"desc": "If enabled, multiple instances of some reduce tasks may be executed in parallel.",
"display_name": "Reduce Tasks Speculative Execution",
"name": "mapred_reduce_tasks_speculative_execution",
"value": "false"
},
{
"desc": "Limit on the number of counters allowed per job.",
"display_name": "Job Counters Limit",
"name": "mapreduce_job_counters_limit",
"value": "120"
},
{
"desc": "The application framework to run jobs with. If not set, jobs will be run with the local job runner.",
"display_name": "Application Framework",
"name": "mapreduce_framework_name",
"value": "yarn"
},
{
"desc": "Whether to suppress configuration warnings produced by the Job Submit Replication Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Job Submit Replication Validator",
"name": "role_config_suppression_mapreduce_replication_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Reduce Task Maximum Heap Size Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Reduce Task Maximum Heap Size Validator",
"name": "role_config_suppression_mapreduce_reduce_java_opts_max_heap_mapreduce_reduce_memory_mb_validator",
"value": "false"
},
{
"desc": "Base sleep time between failover attempts. Used only if RM HA is enabled.",
"display_name": "Client Failover Sleep Base Time",
"name": "client_failover_sleep_base",
"value": "100"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Running Job History Location parameter.",
"display_name": "Suppress Parameter Validation: Running Job History Location",
"name": "role_config_suppression_hadoop_job_history_dir",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into the client configuration for <strong>yarn-site.xml</strong>.",
"display_name": "YARN Client Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "yarn_client_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Gateway Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Size of buffer for read and write operations of SequenceFiles.",
"display_name": "SequenceFile I/O Buffer Size",
"name": "io_file_buffer_size",
"value": "65536"
},
{
"desc": "Fraction of the number of map tasks in the job which should be completed before reduce tasks are scheduled for the job.",
"display_name": "Number of Map Tasks to Complete Before Reduce Tasks",
"name": "mapred_reduce_slowstart_completed_maps",
"value": "0.8"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Maximum Number of Attempts for MapReduce Jobs parameter.",
"display_name": "Suppress Parameter Validation: Maximum Number of Attempts for MapReduce Jobs",
"name": "role_config_suppression_mapreduce_am_max_attempts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Deploy Directory parameter.",
"display_name": "Suppress Parameter Validation: Deploy Directory",
"name": "role_config_suppression_client_config_root_dir",
"value": "false"
},
{
"desc": "The maximum heap size, in bytes, of the Java MapReduce ApplicationMaster. This number will be formatted and concatenated with 'ApplicationMaster Java Opts Base' to pass to Hadoop.",
"display_name": "ApplicationMaster Java Maximum Heap Size",
"name": "yarn_app_mapreduce_am_max_heap",
"value": "825955249"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Compression Codec of MapReduce Map Output parameter.",
"display_name": "Suppress Parameter Validation: Compression Codec of MapReduce Map Output",
"name": "role_config_suppression_mapred_map_output_compression_codec",
"value": "false"
},
{
"desc": "The replication level for submitted job files.",
"display_name": "Mapreduce Submit Replication",
"name": "mapred_submit_replication",
"value": "10"
},
{
"desc": "The total amount of memory buffer, in megabytes, to use while sorting files. Note that this memory comes out of the user JVM heap size (meaning total user JVM heap - this amount of memory = total user usable heap space. Note that Cloudera's default differs from Hadoop's default; Cloudera uses a bigger buffer by default because modern machines often have more RAM. The smallest value across all TaskTrackers will be part of generated client configuration.",
"display_name": "I/O Sort Memory Buffer (MiB)",
"name": "io_sort_mb",
"value": "256"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN Client Advanced Configuration Snippet (Safety Valve) for yarn-site.xml parameter.",
"display_name": "Suppress Parameter Validation: YARN Client Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "role_config_suppression_yarn_client_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "The ratio of heap size to container size for both map and reduce tasks. The heap should be smaller than the container size to allow for some overhead of the JVM.",
"display_name": "Heap to Container Size Ratio",
"name": "mapreduce_job_heap_memory_mb_ratio",
"value": "0.8"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Client Java Configuration Options parameter.",
"display_name": "Suppress Parameter Validation: Client Java Configuration Options",
"name": "role_config_suppression_mapreduce_client_java_opts",
"value": "false"
},
{
"desc": "The default number of parallel transfers run by reduce during the copy (shuffle) phase. This number should be between sqrt(nodes*number_of_map_slots_per_node) and nodes*number_of_map_slots_per_node/2. Will be part of generated client configuration.",
"display_name": "Default Number of Parallel Transfers During Shuffle",
"name": "mapred_reduce_parallel_copies",
"value": "10"
},
{
"desc": "The amount of physical memory, in MiB, allocated for each reduce task of a job. This parameter has no effect prior to CDH 4.4.",
"display_name": "Reduce Task Memory",
"name": "mapreduce_reduce_memory_mb",
"value": "0"
},
{
"desc": "The maximum permissible size of the split metainfo file. The JobTracker won't attempt to read split metainfo files bigger than the configured value. No limits if set to -1.",
"display_name": "JobTracker MetaInfo Maxsize",
"name": "mapreduce_jobtracker_split_metainfo_maxsize",
"value": "10000000"
},
{
"desc": "Compression level for the codec used to compress MapReduce outputs. Default compression is a balance between speed and compression ratio.",
"display_name": "Compression Level of Codecs",
"name": "zlib_compress_level",
"value": "DEFAULT_COMPRESSION"
},
{
"desc": "Whether to enable ubertask optimization, which runs \"sufficiently small\" jobs sequentially within a single JVM. \"Small\" is defined by the mapreduce.job.ubertask.maxmaps, mapreduce.job.ubertask.maxreduces, and mapreduce.job.ubertask.maxbytes settings.",
"display_name": "Enable Ubertask Optimization",
"name": "mapreduce_job_ubertask_enabled",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Gateway Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MR Application Environment parameter.",
"display_name": "Suppress Parameter Validation: MR Application Environment",
"name": "role_config_suppression_mapreduce_admin_user_env",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ApplicationMaster Environment parameter.",
"display_name": "Suppress Parameter Validation: ApplicationMaster Environment",
"name": "role_config_suppression_yarn_app_mapreduce_am_admin_user_env",
"value": "false"
},
{
"desc": "These are Java command line arguments. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Client Java Configuration Options",
"name": "mapreduce_client_java_opts",
"value": "-Djava.net.preferIPv4Stack=true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MR Application Classpath parameter.",
"display_name": "Suppress Parameter Validation: MR Application Classpath",
"name": "role_config_suppression_mapreduce_application_classpath",
"value": "false"
},
{
"desc": "The physical memory requirement, in MiB, for the ApplicationMaster.",
"display_name": "ApplicationMaster Memory",
"name": "yarn_app_mapreduce_am_resource_mb",
"value": "1024"
},
{
"desc": "For MapReduce map outputs that are compressed, specify the compression codec to use. Will be part of generated client configuration.",
"display_name": "Compression Codec of MapReduce Map Output",
"name": "mapred_map_output_compression_codec",
"value": "org.apache.hadoop.io.compress.SnappyCodec"
},
{
"desc": "The maximum Java heap size, in bytes, of the reduce processes. This number will be formatted and concatenated with 'Reduce Task Java Opts Base' to pass to Hadoop.",
"display_name": "Reduce Task Maximum Heap Size",
"name": "mapreduce_reduce_java_opts_max_heap",
"value": "0"
},
{
"desc": "Maximum size in bytes for the Java process heap memory. Passed to Java -Xmx.",
"display_name": "Client Java Heap Size in Bytes",
"name": "mapreduce_client_java_heapsize",
"value": "825955249"
},
{
"desc": "The number of milliseconds before a task will be terminated if it neither reads an input, writes an output, nor updates its status string.",
"display_name": "MapReduce Task Timeout",
"name": "mapred_task_timeout",
"value": "600000"
},
{
"desc": "The virtual CPU cores requirement, for the ApplicationMaster. This parameter has no effect prior to CDH 4.4.",
"display_name": "ApplicationMaster Virtual CPU Cores",
"name": "yarn_app_mapreduce_am_resource_cpu_vcores",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Map Task Java Opts Base parameter.",
"display_name": "Suppress Parameter Validation: Map Task Java Opts Base",
"name": "role_config_suppression_mapreduce_map_java_opts",
"value": "false"
},
{
"desc": "The amount of physical memory, in MiB, allocated for each map task of a job.",
"display_name": "Map Task Memory",
"name": "mapreduce_map_memory_mb",
"value": "0"
},
{
"desc": "Threshold for number of reduces, beyond which a job is considered too big for ubertask optimization. <strong>Note: As of CDH 5, MR2 does not support more than one reduce in an ubertask.</strong> (Zero is valid.)",
"display_name": "Ubertask Maximum Reduces",
"name": "mapreduce_job_ubertask_maxreduces",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MapReduce Client Advanced Configuration Snippet (Safety Valve) for mapred-site.xml parameter.",
"display_name": "Suppress Parameter Validation: MapReduce Client Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "role_config_suppression_mapreduce_client_config_safety_valve",
"value": "false"
},
{
"desc": "Classpaths to include for MapReduce applications. During evaluation, the string '{version}' in the value of this parameter will be replaced by the actual MapReduce version.",
"display_name": "MR Application Classpath",
"name": "mapreduce_application_classpath",
"value": "$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,$MR2_CLASSPATH"
},
{
"desc": "Java command line arguments passed to the MapReduce ApplicationMaster.",
"display_name": "ApplicationMaster Java Opts Base",
"name": "yarn_app_mapreduce_am_command_opts",
"value": "-Djava.net.preferIPv4Stack=true"
},
{
"desc": "When set, each role identifies important log events and forwards them to Cloudera Manager.",
"display_name": "Enable Log Event Capture",
"name": "catch_events",
"value": "true"
},
{
"desc": "Maximum sleep time between failover attempts. Used only if RM HA is enabled.",
"display_name": "Client Failover Sleep Max Time",
"name": "client_failover_sleep_max",
"value": "2000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Gateway Client Environment Advanced Configuration Snippet (Safety Valve) for hadoop-env.sh parameter.",
"display_name": "Suppress Parameter Validation: Gateway Client Environment Advanced Configuration Snippet (Safety Valve) for hadoop-env.sh",
"name": "role_config_suppression_mapreduce_client_env_safety_valve",
"value": "false"
},
{
"desc": "Compress the output of MapReduce jobs. Will be part of generated client configuration.",
"display_name": "Compress MapReduce Job Output",
"name": "mapred_output_compress",
"value": "false"
},
{
"desc": "For MapReduce job outputs that are compressed as SequenceFiles, you can select one of these compression type options: NONE, RECORD or BLOCK. Cloudera recommends BLOCK. Will be part of generated client configuration.",
"display_name": "Compression Type of MapReduce Job Output",
"name": "mapred_output_compression_type",
"value": "BLOCK"
},
{
"desc": "Java opts for the map processes. The following symbol, if present, will be interpolated: @taskid@ is replaced by current TaskID. Any other occurrences of '@' will go unchanged. For example, to enable verbose gc logging to a file named for the taskid in /tmp pass a value of: \"-verbose:gc -Xloggc:/tmp/@taskid@.gc\". The configuration variable 'Map Task Memory' can be used to control the maximum memory of the map processes.",
"display_name": "Map Task Java Opts Base",
"name": "mapreduce_map_java_opts",
"value": "-Djava.net.preferIPv4Stack=true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ApplicationMaster Java Opts Base parameter.",
"display_name": "Suppress Parameter Validation: ApplicationMaster Java Opts Base",
"name": "role_config_suppression_yarn_app_mapreduce_am_command_opts",
"value": "false"
},
{
"desc": "Additional execution environment entries for map and reduce task processes.",
"display_name": "MR Application Environment",
"name": "mapreduce_admin_user_env",
"value": "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH"
},
{
"desc": "The soft limit in either the buffer or record collection buffers. When this limit is reached, a thread will begin to spill the contents to disk in the background. Note that this does not imply any chunking of data to the spill. A value less than 0.5 is not recommended. The syntax is in decimal units; the default is 80% and is formatted 0.8. Will be part of generated client configuration.",
"display_name": "I/O Sort Spill Percent",
"name": "io_sort_spill_percent",
"value": "0.8"
},
{
"desc": "The default number of reduce tasks per job. Will be part of generated client configuration.",
"display_name": "Default Number of Reduce Tasks per Job",
"name": "mapred_reduce_tasks",
"value": "1"
},
{
"desc": "Maximum allowed connections for the shuffle. Set to 0 (zero) to indicate no limit on the number of connections.",
"display_name": "Max Shuffle Connections",
"name": "mapreduce_shuffle_max_connections",
"value": "80"
},
{
"desc": "The maximum Java heap size, in bytes, of the map processes. This number will be formatted and concatenated with 'Map Task Java Opts Base' to pass to Hadoop.",
"display_name": "Map Task Maximum Heap Size",
"name": "mapreduce_map_java_opts_max_heap",
"value": "0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the I/O Sort Factor parameter.",
"display_name": "Suppress Parameter Validation: I/O Sort Factor",
"name": "role_config_suppression_io_sort_factor",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into the client configuration for <strong>hadoop-env.sh</strong>",
"display_name": "Gateway Client Environment Advanced Configuration Snippet (Safety Valve) for hadoop-env.sh",
"name": "mapreduce_client_env_safety_valve",
"value": null
},
{
"desc": "The number of virtual CPU cores for each reduce task of a job.",
"display_name": "Reduce Task CPU Virtual Cores",
"name": "mapreduce_reduce_cpu_vcores",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the Max Attempts Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Max Attempts Validator",
"name": "role_config_suppression_yarn_mapreduce_max_attempts_validator",
"value": "false"
},
{
"desc": "Environment variables for the MapReduce ApplicationMaster. These settings can be overridden in the ApplicationMaster User Environment (<strong>yarn.app.mapreduce.am.env</strong>).",
"display_name": "ApplicationMaster Environment",
"name": "yarn_app_mapreduce_am_admin_user_env",
"value": "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH"
},
{
"desc": "Java opts for the reduce processes. The following symbol, if present, will be interpolated: @taskid@ is replaced by current TaskID. Any other occurrences of '@' will go unchanged. For example, to enable verbose gc logging to a file named for the taskid in /tmp pass a value of: \"-verbose:gc -Xloggc:/tmp/@taskid@.gc\". The configuration variable 'Reduce Task Memory' can be used to control the maximum memory of the reduce processes.",
"display_name": "Reduce Task Java Opts Base",
"name": "mapreduce_reduce_java_opts",
"value": "-Djava.net.preferIPv4Stack=true"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "If enabled, uses compression on the map outputs before they are sent across the network. Will be part of generated client configuration.",
"display_name": "Use Compression on Map Outputs",
"name": "mapred_compress_map_output",
"value": "true"
},
{
"desc": "The maximum number of application attempts for MapReduce jobs. The value of this parameter overrides ApplicationMaster Maximum Attempts for MapReduce jobs.",
"display_name": "Maximum Number of Attempts for MapReduce Jobs",
"name": "mapreduce_am_max_attempts",
"value": "2"
},
{
"desc": "The minimum log level for Gateway logs",
"display_name": "Gateway Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Threshold for number of maps, beyond which a job is considered too big for ubertask optimization.",
"display_name": "Ubertask Maximum Maps",
"name": "mapreduce_job_ubertask_maxmaps",
"value": "9"
},
{
"desc": "If enabled, multiple instances of some map tasks may be executed in parallel.",
"display_name": "Map Tasks Speculative Execution",
"name": "mapred_map_tasks_speculative_execution",
"value": "false"
},
{
"desc": "Threshold for number of input bytes, beyond which a job is considered too big for ubertask optimization. If no value is specified, dfs.block.size is used as a default.",
"display_name": "Ubertask Maximum Job Size",
"name": "mapreduce_job_ubertask_maxbytes",
"value": null
}
]

View File

@ -1,464 +0,0 @@
[
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of JobHistory Server in Bytes",
"name": "mr2_jobhistory_java_heapsize",
"value": "1073741824"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for JobHistory Server",
"name": "mr2_jobhistory_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Kerberos principal used by the JobHistory Server roles.",
"display_name": "Role-Specific Kerberos Principal",
"name": "kerberos_role_princ_name",
"value": "mapred"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_jobhistory_log_directory_free_space",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Enables the health test that the JobHistory Server's process state is consistent with the role configuration",
"display_name": "JobHistory Server Process Health Test",
"name": "jobhistory_scm_health_enabled",
"value": "true"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_jobhistory_gc_duration",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_jobhistory_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_jobhistory_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "role_config_suppression_history_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role-Specific Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Role-Specific Kerberos Principal",
"name": "role_config_suppression_kerberos_role_princ_name",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_jobhistory_host_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JobHistory Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: JobHistory Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Directory where JobHistory Server will place its log files.",
"display_name": "JobHistory Server Log Directory",
"name": "mr2_jobhistory_log_dir",
"value": "/var/log/hadoop-mapreduce"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_jobhistory_swap_memory_usage",
"value": "false"
},
{
"desc": "Time interval for history cleaner to check for files to delete. Files are only deleted if they are older than mapreduce.jobhistory.max-age-ms.",
"display_name": "Job History Files Cleaner Interval",
"name": "mapreduce_jobhistory_cleaner_interval",
"value": "86400000"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "jobhistory_gc_duration_window",
"value": "5"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "When computing the overall JobHistory Server health, consider the host's health.",
"display_name": "JobHistory Server Host Health Test",
"name": "jobhistory_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JobHistory Server Advanced Configuration Snippet (Safety Valve) for yarn-site.xml parameter.",
"display_name": "Suppress Parameter Validation: JobHistory Server Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "role_config_suppression_jobhistory_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the MapReduce ApplicationMaster Staging Root Directory parameter.",
"display_name": "Suppress Parameter Validation: MapReduce ApplicationMaster Staging Root Directory",
"name": "role_config_suppression_yarn_app_mapreduce_am_staging_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "JobHistory Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JobHistory Server Log Directory parameter.",
"display_name": "Suppress Parameter Validation: JobHistory Server Log Directory",
"name": "role_config_suppression_mr2_jobhistory_log_dir",
"value": "false"
},
{
"desc": "The port of the MapReduce JobHistory Server administrative interface. Together with the host name of the JobHistory role forms the address.",
"display_name": "MapReduce JobHistory Server Admin Interface Port",
"name": "mapreduce_jobhistory_admin_address",
"value": "10033"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_jobhistory_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_jobhistory_file_descriptor",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>yarn-site.xml</strong> for this role only.",
"display_name": "JobHistory Server Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "jobhistory_config_safety_valve",
"value": null
},
{
"desc": "Job history files older than this time duration will deleted when the history cleaner runs.",
"display_name": "Job History Files Maximum Age",
"name": "mapreduce_jobhistory_max_age_ms",
"value": "604800000"
},
{
"desc": "The group that the JobHistory Server process should run as.",
"display_name": "System Group",
"name": "history_process_groupname",
"value": "hadoop"
},
{
"desc": "The port of the MapReduce JobHistory Server. Together with the hostname of the JobHistory role, forms the address.",
"display_name": "MapReduce JobHistory Server Port",
"name": "mapreduce_jobhistory_address",
"value": "10020"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "role_config_suppression_history_process_username",
"value": "false"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "jobhistory_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "The HTTP port of the MapReduce JobHistory Server web application. Together with the host name of the JobHistory role forms the address.",
"display_name": "MapReduce JobHistory Web Application HTTP Port",
"name": "mapreduce_jobhistory_webapp_address",
"value": "19888"
},
{
"desc": "The root HDFS directory of the staging area for users' MR2 jobs; for example /user. The staging directories are always named after the user.",
"display_name": "MapReduce ApplicationMaster Staging Root Directory",
"name": "yarn_app_mapreduce_am_staging_dir",
"value": "/user"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "jobhistory_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The minimum log level for JobHistory Server logs",
"display_name": "JobHistory Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The maximum size, in megabytes, per log file for JobHistory Server logs. Typically used by log4j or logback.",
"display_name": "JobHistory Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mapred-site.xml</strong> for this role only.",
"display_name": "JobHistory Server Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "jobhistory_mapred_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JobHistory Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: JobHistory Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_jobhistory_role_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "JobHistory Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "JOBHISTORY_role_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for JobHistory Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for JobHistory Server",
"name": "role_config_suppression_mr2_jobhistory_java_opts",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for JobHistory Server logs. Typically used by log4j or logback.",
"display_name": "JobHistory Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "If enabled, the JobHistory Server binds to the wildcard address (\"0.0.0.0\") on all of its ports.",
"display_name": "Bind JobHistory Server to Wildcard Address",
"name": "yarn_jobhistory_bind_wildcard",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The user that the JobHistory Server process should run as.",
"display_name": "System User",
"name": "history_process_username",
"value": "mapred"
},
{
"desc": "The HTTPS port of the MapReduce JobHistory Server web application. Together with the host name of the JobHistory role forms the address.",
"display_name": "MapReduce JobHistory Web Application HTTPS Port (TLS/SSL)",
"name": "mapreduce_jobhistory_webapp_https_address",
"value": "19890"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the JobHistory Server Advanced Configuration Snippet (Safety Valve) for mapred-site.xml parameter.",
"display_name": "Suppress Parameter Validation: JobHistory Server Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "role_config_suppression_jobhistory_mapred_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "jobhistory_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "jobhistory_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_jobhistory_web_metric_collection",
"value": "false"
}
]

View File

@ -1,728 +0,0 @@
[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Containers Environment Variable parameter.",
"display_name": "Suppress Parameter Validation: Containers Environment Variable",
"name": "role_config_suppression_yarn_nodemanager_admin_env",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for NodeManager parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for NodeManager",
"name": "role_config_suppression_node_manager_java_opts",
"value": "false"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Allowed System Users parameter.",
"display_name": "Suppress Parameter Validation: Allowed System Users",
"name": "role_config_suppression_container_executor_allowed_system_users",
"value": "false"
},
{
"desc": "When computing the overall NodeManager health, consider the host's health.",
"display_name": "NodeManager Host Health Test",
"name": "nodemanager_host_health_enabled",
"value": "true"
},
{
"desc": "Enables the health test that the NodeManager's process state is consistent with the role configuration",
"display_name": "NodeManager Process Health Test",
"name": "nodemanager_scm_health_enabled",
"value": "true"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_node_manager_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Local Directories parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Local Directories",
"name": "role_config_suppression_yarn_nodemanager_local_dirs",
"value": "false"
},
{
"desc": "Whether to suppress the results of the ResourceManager Connectivity heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: ResourceManager Connectivity",
"name": "role_health_suppression_node_manager_connectivity",
"value": "false"
},
{
"desc": "Number of threads to use for localization fetching.",
"display_name": "Localizer Fetch Thread Count",
"name": "yarn_nodemanager_localizer_fetch_thread_count",
"value": "4"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "nodemanager_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NodeManager Recovery Directory.",
"display_name": "NodeManager Recovery Directory Free Space Monitoring Absolute Thresholds",
"name": "nodemanager_recovery_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "NodeManager Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Time in seconds to retain user logs. Only applicable if log aggregation is disabled.",
"display_name": "Log Retain Duration",
"name": "yarn_nodemanager_log_retain_seconds",
"value": "10800"
},
{
"desc": "Number of threads container manager uses.",
"display_name": "Container Manager Thread Count",
"name": "yarn_nodemanager_container_manager_thread_count",
"value": "20"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Address where the localizer IPC is.",
"display_name": "Localizer Cache Cleanup Interval",
"name": "yarn_nodemanager_localizer_cache_cleanup_interval_ms",
"value": "600000"
},
{
"desc": "List of users banned from running containers.",
"display_name": "Banned System Users",
"name": "container_executor_banned_users",
"value": "hdfs,yarn,mapred,bin"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Container Log Directories parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Container Log Directories",
"name": "role_config_suppression_yarn_nodemanager_log_dirs",
"value": "false"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Banned System Users parameter.",
"display_name": "Suppress Parameter Validation: Banned System Users",
"name": "role_config_suppression_container_executor_banned_users",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NodeManager Local Directories.",
"display_name": "NodeManager Local Directories Free Space Monitoring Absolute Thresholds",
"name": "nodemanager_local_data_directories_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mapred-site.xml</strong> for this role only.",
"display_name": "NodeManager Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "nodemanager_mapred_safety_valve",
"value": null
},
{
"desc": "Amount of physical memory, in MiB, that can be allocated for containers.",
"display_name": "Container Memory",
"name": "yarn_nodemanager_resource_memory_mb",
"value": "8192"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Comma-separated list of arguments which are to be passed to node health script when it is being launched.",
"display_name": "Healthchecker Script Arguments",
"name": "mapred_healthchecker_script_args",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_node_manager_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_node_manager_unexpected_exits",
"value": "false"
},
{
"desc": "If enabled, adds 'org.apache.hadoop.mapred.ShuffleHandler' to the NodeManager auxiliary services. This is required for MapReduce applications.",
"display_name": "Enable Shuffle Auxiliary Service",
"name": "mapreduce_aux_service",
"value": "true"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NodeManager Container Log Directories. Specified as a percentage of the capacity on that filesystem. This setting is not used if a NodeManager Container Log Directories Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "NodeManager Container Log Directories Free Space Monitoring Percentage Thresholds",
"name": "nodemanager_log_directories_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Log Directory parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Log Directory",
"name": "role_config_suppression_node_manager_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The minimum Linux user ID allowed. Used to prevent other super users.",
"display_name": "Minimum User ID",
"name": "container_executor_min_user_id",
"value": "1000"
},
{
"desc": "The maximum size, in megabytes, per log file for NodeManager logs. Typically used by log4j or logback.",
"display_name": "NodeManager Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "List of users explicitly whitelisted to be allowed to run containers. Users with IDs lower than the \"Minimum User Id\" setting may be whitelisted by using this setting.",
"display_name": "Allowed System Users",
"name": "container_executor_allowed_system_users",
"value": "nobody,impala,hive,llama"
},
{
"desc": "Directory where NodeManager will place its log files.",
"display_name": "NodeManager Log Directory",
"name": "node_manager_log_dir",
"value": "/var/log/hadoop-yarn"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Recovery Directory parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Recovery Directory",
"name": "role_config_suppression_yarn_nodemanager_recovery_dir",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "nodemanager_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into <strong>hadoop-metrics2.properties</strong>.",
"display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "hadoop_metrics2_safety_valve",
"value": null
},
{
"desc": "Environment variables that containers may override rather than use NodeManager's default.",
"display_name": "Containers Environment Variables Whitelist ",
"name": "yarn_nodemanager_env_whitelist",
"value": "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME"
},
{
"desc": "The address of the NodeManager IPC.",
"display_name": "NodeManager IPC Address",
"name": "yarn_nodemanager_address",
"value": "8041"
},
{
"desc": "Environment variables that should be forwarded from the NodeManager's environment to the container's.",
"display_name": "Containers Environment Variable",
"name": "yarn_nodemanager_admin_env",
"value": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the CGroups Hierarchy parameter.",
"display_name": "Suppress Parameter Validation: CGroups Hierarchy",
"name": "role_config_suppression_linux_container_executor_cgroups_hierarchy",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_node_manager_web_metric_collection",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Remote App Log Directory Suffix parameter.",
"display_name": "Suppress Parameter Validation: Remote App Log Directory Suffix",
"name": "role_config_suppression_yarn_nodemanager_remote_app_log_dir_suffix",
"value": "false"
},
{
"desc": "Number of threads used in cleanup.",
"display_name": "Cleanup Thread Count",
"name": "yarn_nodemanager_delete_thread_count",
"value": "4"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_node_manager_swap_memory_usage",
"value": "false"
},
{
"desc": "Target size of localizer cache in MB, per local directory.",
"display_name": "Localizer Cache Target Size",
"name": "yarn_nodemanager_localizer_cache_target_size_mb",
"value": "10240"
},
{
"desc": "Absolute path to the script which is periodically run by the node health monitoring service to determine if the node is healthy or not. If the value of this key is empty or the file does not exist in the location configured here, the node health monitoring service is not started.",
"display_name": "Healthchecker Script Path",
"name": "mapred_healthchecker_script_path",
"value": ""
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hadoop_metrics2_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NodeManager Recovery Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a NodeManager Recovery Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "NodeManager Recovery Directory Free Space Monitoring Percentage Thresholds",
"name": "nodemanager_recovery_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Advanced Configuration Snippet (Safety Valve) for yarn-site.xml parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "role_config_suppression_nodemanager_config_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "nodemanager_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Healthchecker Script Path parameter.",
"display_name": "Suppress Parameter Validation: Healthchecker Script Path",
"name": "role_config_suppression_mapred_healthchecker_script_path",
"value": "false"
},
{
"desc": "The HTTPS port of the NodeManager web application.",
"display_name": "NodeManager Web Application HTTPS Port (TLS/SSL)",
"name": "nodemanager_webserver_https_port",
"value": "8044"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "nodemanager_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>yarn-site.xml</strong> for this role only.",
"display_name": "NodeManager Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "nodemanager_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_node_manager_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "The local filesystem directory in which the NodeManager stores state when recovery is enabled. Recovery is enabled by default.",
"display_name": "NodeManager Recovery Directory",
"name": "yarn_nodemanager_recovery_dir",
"value": "/var/lib/hadoop-yarn/yarn-nm-recovery"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the NodeManager Local Directories Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: NodeManager Local Directories Free Space",
"name": "role_health_suppression_nodemanager_local_data_directories_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "HDFS directory where application logs are stored when an application completes.",
"display_name": "Remote App Log Directory",
"name": "yarn_nodemanager_remote_app_log_dir",
"value": "/tmp/logs"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_node_manager_gc_duration",
"value": "false"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "nodemanager_gc_duration_window",
"value": "5"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_nodemanager_role_env_safety_valve",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "Path (rooted in the cgroups hierarchy on the machine) where to place YARN-managed cgroups.",
"display_name": "CGroups Hierarchy",
"name": "linux_container_executor_cgroups_hierarchy",
"value": "/hadoop-yarn"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for NodeManager",
"name": "node_manager_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "Number of threads to handle localization requests.",
"display_name": "Localizer Client Thread Count",
"name": "yarn_nodemanager_localizer_client_thread_count",
"value": "5"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NodeManager Local Directories. Specified as a percentage of the capacity on that filesystem. This setting is not used if a NodeManager Local Directories Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "NodeManager Local Directories Free Space Monitoring Percentage Thresholds",
"name": "nodemanager_local_data_directories_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the Single User Mode Overrides Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Single User Mode Overrides Validator",
"name": "role_config_suppression_single_user_mode_override_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the NodeManager Advanced Configuration Snippet (Safety Valve) for mapred-site.xml parameter.",
"display_name": "Suppress Parameter Validation: NodeManager Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "role_config_suppression_nodemanager_mapred_safety_valve",
"value": "false"
},
{
"desc": "The HTTP Port of the NodeManager web application.",
"display_name": "NodeManager Web Application HTTP Port",
"name": "nodemanager_webserver_port",
"value": "8042"
},
{
"desc": "Heartbeat interval to ResourceManager",
"display_name": "Heartbeat Interval",
"name": "yarn_nodemanager_heartbeat_interval_ms",
"value": "1000"
},
{
"desc": "The amount of time to wait for the NodeManager to fully start up and connect to the ResourceManager before enforcing the connectivity check.",
"display_name": "NodeManager Connectivity Tolerance at Startup",
"name": "nodemanager_connectivity_tolerance_seconds",
"value": "180"
},
{
"desc": "List of directories on the local filesystem where a NodeManager stores intermediate data files.",
"display_name": "NodeManager Local Directories",
"name": "yarn_nodemanager_local_dirs",
"value": null
},
{
"desc": "Enables the health check that verifies the NodeManager is connected to the ResourceManager",
"display_name": "NodeManager Connectivity Health Check",
"name": "nodemanager_connectivity_health_enabled",
"value": "true"
},
{
"desc": "Number of seconds after an application finishes before the NodeManager's DeletionService will delete the application's localized file and log directory. To diagnose YARN application problems, set this property's value large enough (for example, to 600 = 10 minutes) to permit examination of these directories.",
"display_name": "Localized Dir Deletion Delay",
"name": "yarn_nodemanager_delete_debug_delay_sec",
"value": "0"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Healthchecker Script Arguments parameter.",
"display_name": "Suppress Parameter Validation: Healthchecker Script Arguments",
"name": "role_config_suppression_mapred_healthchecker_script_args",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for NodeManager logs. Typically used by log4j or logback.",
"display_name": "NodeManager Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the NodeManager Container Log Directories Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: NodeManager Container Log Directories Free Space",
"name": "role_health_suppression_nodemanager_log_directories_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Remote App Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Remote App Log Directory",
"name": "role_config_suppression_yarn_nodemanager_remote_app_log_dir",
"value": "false"
},
{
"desc": "List of directories on the local filesystem where a NodeManager stores container log files.",
"display_name": "NodeManager Container Log Directories",
"name": "yarn_nodemanager_log_dirs",
"value": "/var/log/hadoop-yarn/container"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "NodeManager Environment Advanced Configuration Snippet (Safety Valve)",
"name": "NODEMANAGER_role_env_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the NodeManager Recovery Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: NodeManager Recovery Directory Free Space",
"name": "role_health_suppression_nodemanager_recovery_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_node_manager_log_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Address where the localizer IPC is.",
"display_name": "Localizer Port",
"name": "yarn_nodemanager_localizer_address",
"value": "8040"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Container Executor Group parameter.",
"display_name": "Suppress Parameter Validation: Container Executor Group",
"name": "role_config_suppression_container_executor_group",
"value": "false"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Containers Environment Variables Whitelist parameter.",
"display_name": "Suppress Parameter Validation: Containers Environment Variables Whitelist ",
"name": "role_config_suppression_yarn_nodemanager_env_whitelist",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The minimum log level for NodeManager logs",
"display_name": "NodeManager Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The remote log dir will be created at {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}",
"display_name": "Remote App Log Directory Suffix",
"name": "yarn_nodemanager_remote_app_log_dir_suffix",
"value": "logs"
},
{
"desc": "The system group that owns the container-executor binary. This does not need to be changed unless the ownership of the binary is explicitly changed.",
"display_name": "Container Executor Group",
"name": "container_executor_group",
"value": "yarn"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_node_manager_host_health",
"value": "false"
},
{
"desc": "Number of virtual CPU cores that can be allocated for containers. This parameter has no effect prior to CDH 4.4.",
"display_name": "Container Virtual CPU Cores",
"name": "yarn_nodemanager_resource_cpu_vcores",
"value": "8"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's NodeManager Container Log Directories.",
"display_name": "NodeManager Container Log Directories Free Space Monitoring Absolute Thresholds",
"name": "nodemanager_log_directories_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Maximum allowed threads for serving shuffle connections. Set to zero to indicate the default of 2 times the number of available processors.",
"display_name": "Max Shuffle Threads",
"name": "mapreduce_shuffle_max_threads",
"value": "80"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of NodeManager in Bytes",
"name": "node_manager_java_heapsize",
"value": "1073741824"
}
]

View File

@ -1,668 +0,0 @@
[
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "resourcemanager_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "For applications that request containers on particular racks, the minimum time in milliseconds the Fair Scheduler waits before accepting a placement on another rack. Requires Fair Scheduler continuous scheduling to be enabled. If continuous scheduling is disabled, yarn.scheduler.fair.locality.threshold.rack should be used instead.",
"display_name": "Fair Scheduler Rack Locality Delay",
"name": "yarn_scheduler_fair_locality_delay_rack_ms",
"value": "4000"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "An XML string that will be inserted verbatim into the Fair Scheduler allocations file. For CDH5, overrides the configuration set using the Pools configuration UI. For CDH4, this is the only way to configure the Fair Scheduler for YARN.",
"display_name": "Fair Scheduler XML Advanced Configuration Snippet (Safety Valve)",
"name": "resourcemanager_fair_scheduler_configuration",
"value": null
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "The expiry interval to wait until a NodeManager is considered dead.",
"display_name": "NodeManager Monitor Expiry",
"name": "yarn_nm_liveness_monitor_expiry_interval_ms",
"value": "600000"
},
{
"desc": "The HTTP port of the ResourceManager web application.",
"display_name": "ResourceManager Web Application HTTP Port",
"name": "resourcemanager_webserver_port",
"value": "8088"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The address of the applications manager interface in the ResourceManager.",
"display_name": "ResourceManager Address",
"name": "yarn_resourcemanager_address",
"value": "8032"
},
{
"desc": "Enables the health test that the ResourceManager's process state is consistent with the role configuration",
"display_name": "ResourceManager Process Health Test",
"name": "resourcemanager_scm_health_enabled",
"value": "true"
},
{
"desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
"display_name": "Web Metric Collection",
"name": "resourcemanager_web_metric_collection_enabled",
"value": "true"
},
{
"desc": "The periodic interval that the ResourceManager will check whether ApplicationMasters is still alive.",
"display_name": "ApplicationMaster Monitor Interval",
"name": "yarn_resourcemanager_amliveliness_monitor_interval_ms",
"value": "1000"
},
{
"desc": "Number of threads used to handle the ResourceManager admin interface.",
"display_name": "Admin Client Thread Count",
"name": "yarn_resourcemanager_admin_client_thread_count",
"value": "1"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "If enabled, the ResourceManager binds to the wildcard address (\"0.0.0.0\") on all of its ports.",
"display_name": "Bind ResourceManager to Wildcard Address",
"name": "yarn_rm_bind_wildcard",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Fair Scheduler XML Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Fair Scheduler XML Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_resourcemanager_fair_scheduler_configuration",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_resourcemanager_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_resource_manager_file_descriptor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "The address of the admin interface in the ResourceManager.",
"display_name": "Administration Address",
"name": "yarn_resourcemanager_admin_address",
"value": "8033"
},
{
"desc": "The periodic interval that the ResourceManager will check whether NodeManagers are still alive.",
"display_name": "NodeManager Monitor Interval",
"name": "yarn_resourcemanager_nm_liveness_monitor_interval_ms",
"value": "1000"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_resource_manager_swap_memory_usage",
"value": "false"
},
{
"desc": "The maximum number of application attempts. This is a global setting for all ApplicationMasters. Each ApplicationMaster can specify its individual maximum through the API, but if the individual maximum is more than the global maximum, the ResourceManager overrides it.",
"display_name": "ApplicationMaster Maximum Attempts",
"name": "yarn_resourcemanager_am_max_retries",
"value": "2"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>nodes_exclude.txt</strong> for this role only.",
"display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_exclude.txt",
"name": "rm_hosts_exclude_safety_valve",
"value": null
},
{
"desc": "Enable continuous scheduling in the Fair Scheduler. When enabled, scheduling decisions are decoupled from NodeManager heartbeats, leading to faster resource allocations.",
"display_name": "Enable Fair Scheduler Continuous Scheduling",
"name": "yarn_scheduler_fair_continuous_scheduling_enabled",
"value": "true"
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for ResourceManager",
"name": "resource_manager_java_opts",
"value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
},
{
"desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Web Server Status",
"name": "role_health_suppression_resource_manager_web_metric_collection",
"value": "false"
},
{
"desc": "The periodic interval that the ResourceManager will check whether containers are still alive.",
"display_name": "Container Monitor Interval",
"name": "yarn_resourcemanager_container_liveness_monitor_interval_ms",
"value": "600000"
},
{
"desc": "The maximum size, in megabytes, per log file for ResourceManager logs. Typically used by log4j or logback.",
"display_name": "ResourceManager Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>mapred-site.xml</strong> for this role only.",
"display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "resourcemanager_mapred_safety_valve",
"value": null
},
{
"desc": "For applications that request containers on particular racks, the number of scheduling opportunities since the last container assignment to wait before accepting a placement on another rack. Expressed as a float between 0 and 1, which, as a fraction of the cluster size, is the number of scheduling opportunities to pass up. If not set, this means don't pass up any scheduling opportunities. Requires Fair Scheduler continuous scheduling to be disabled. If continuous scheduling is enabled, yarn.scheduler.fair.locality-delay-rack-ms should be used instead.",
"display_name": "Fair Scheduler Rack Locality Threshold",
"name": "resourcemanager_fair_scheduler_locality_threshold_rack",
"value": null
},
{
"desc": "The class to use as the resource scheduler. FairScheduler is only supported in CDH 4.2.1 and later.",
"display_name": "Scheduler Class",
"name": "yarn_resourcemanager_scheduler_class",
"value": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler"
},
{
"desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into <strong>hadoop-metrics2.properties</strong>.",
"display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "hadoop_metrics2_safety_valve",
"value": null
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "When enabled, any applications that were running on the cluster when the ResourceManager died will be recovered when the ResourceManager next starts. <strong>Note:</strong> If RM-HA is enabled, then this configuration is always enabled.",
"display_name": "Enable ResourceManager Recovery",
"name": "yarn_resourcemanager_recovery_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ApplicationMaster Maximum Attempts parameter.",
"display_name": "Suppress Parameter Validation: ApplicationMaster Maximum Attempts",
"name": "role_config_suppression_yarn_resourcemanager_am_max_retries",
"value": "false"
},
{
"desc": "The expiry interval to wait until an ApplicationMaster is considered dead.",
"display_name": "ApplicationMaster Monitor Expiry",
"name": "yarn_am_liveness_monitor_expiry_interval_ms",
"value": "600000"
},
{
"desc": "Enter an XML string that represents the Capacity Scheduler configuration.",
"display_name": "Capacity Scheduler Configuration Advanced Configuration Snippet (Safety Valve)",
"name": "resourcemanager_capacity_scheduler_configuration",
"value": "<?xml version=\"1.0\"?>\n<configuration>\n <property>\n <name>yarn.scheduler.capacity.root.queues</name>\n <value>default</value>\n </property>\n <property>\n <name>yarn.scheduler.capacity.root.capacity</name>\n <value>100</value>\n </property>\n <property>\n <name>yarn.scheduler.capacity.root.default.capacity</name>\n <value>100</value>\n </property>\n</configuration>\n"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_hadoop_metrics2_safety_valve",
"value": "false"
},
{
"desc": "The largest amount of physical memory, in MiB, that can be requested for a container.",
"display_name": "Container Memory Maximum",
"name": "yarn_scheduler_maximum_allocation_mb",
"value": "65536"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager Log Directory parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager Log Directory",
"name": "role_config_suppression_resource_manager_log_dir",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "When enabled, if a pool's minimum share is not met for some period of time, the Fair Scheduler preempts applications in other pools. Preemption guarantees that production applications are not starved while also allowing the cluster to be used for experimental and research applications. To minimize wasted computation, the Fair Scheduler preempts the most recently launched applications.",
"display_name": "Fair Scheduler Preemption",
"name": "resourcemanager_fair_scheduler_preemption",
"value": "false"
},
{
"desc": "For applications that request containers on particular nodes, the number of scheduling opportunities since the last container assignment to wait before accepting a placement on another node. Expressed as a float between 0 and 1, which, as a fraction of the cluster size, is the number of scheduling opportunities to pass up. If not set, this means don't pass up any scheduling opportunities. Requires Fair Scheduler continuous scheduling to be disabled. If continuous scheduling is enabled, yarn.scheduler.fair.locality-delay-node-ms should be used instead.",
"display_name": "Fair Scheduler Node Locality Threshold",
"name": "resourcemanager_fair_scheduler_locality_threshold_node",
"value": null
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "For applications that request containers on particular nodes, the minimum time in milliseconds the Fair Scheduler waits before accepting a placement on another node. Requires Fair Scheduler continuous scheduling to be enabled. If continuous scheduling is disabled, yarn.scheduler.fair.locality.threshold.node should be used instead.",
"display_name": "Fair Scheduler Node Locality Delay",
"name": "yarn_scheduler_fair_locality_delay_node_ms",
"value": "2000"
},
{
"desc": "The minimum log level for ResourceManager logs",
"display_name": "ResourceManager Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "The amount of time allowed after this role is started that failures of health checks that rely on communication with this role will be tolerated.",
"display_name": "Health Check Startup Tolerance",
"name": "resourcemanager_startup_tolerance_minutes",
"value": "5"
},
{
"desc": "When enabled, ResourceManager has proxy user privileges.",
"display_name": "Enable ResourceManger Proxy User Privileges",
"name": "yarn_resourcemanager_proxy_user_privileges_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager Advanced Configuration Snippet (Safety Valve) for yarn-site.xml parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "role_config_suppression_resourcemanager_config_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>nodes_allow.txt</strong> for this role only.",
"display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_allow.txt",
"name": "rm_hosts_allow_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_resource_manager_scm_health",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_resource_manager_log_directory_free_space",
"value": "false"
},
{
"desc": "The maximum number of completed applications that the ResourceManager keeps.",
"display_name": "Max Completed Applications",
"name": "yarn_resourcemanager_max_completed_applications",
"value": "10000"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>yarn-site.xml</strong> for this role only.",
"display_name": "ResourceManager Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "resourcemanager_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Capacity Scheduler Configuration Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Capacity Scheduler Configuration Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_resourcemanager_capacity_scheduler_configuration",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_allow.txt parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_allow.txt",
"name": "role_config_suppression_rm_hosts_allow_safety_valve",
"value": "false"
},
{
"desc": "The address of the scheduler interface in the ResourceManager.",
"display_name": "Scheduler Address",
"name": "yarn_resourcemanager_scheduler_address",
"value": "8030"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "resourcemanager_gc_duration_window",
"value": "5"
},
{
"desc": "The timeout for the ResourceManager session with ZooKeeper. The session expires if the ZooKeeper ensemble does not hear from the ResourceManager within the specified timeout period (no heartbeat). Session expiration is managed by the ZooKeeper ensemble, not by the ResourceManager.",
"display_name": "ZooKeeper Session Timeout",
"name": "yarn_resourcemanager_zk_timeout_ms",
"value": "60000"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_resource_manager_host_health",
"value": "false"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "resourcemanager_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_resource_manager_gc_duration",
"value": "false"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "The smallest number of virtual CPU cores that can be requested for a container. If using the Capacity or FIFO scheduler (or any scheduler, prior to CDH 5), virtual core requests will be rounded up to the nearest multiple of this number. This parameter has no effect prior to CDH 4.4.",
"display_name": "Container Virtual CPU Cores Minimum",
"name": "yarn_scheduler_minimum_allocation_vcores",
"value": "1"
},
{
"desc": "If using the Fair Scheduler, memory requests will be rounded up to the nearest multiple of this number. This parameter has no effect prior to CDH 5.",
"display_name": "Container Memory Increment",
"name": "yarn_scheduler_increment_allocation_mb",
"value": "512"
},
{
"desc": "When computing the overall ResourceManager health, consider the host's health.",
"display_name": "ResourceManager Host Health Test",
"name": "resourcemanager_host_health_enabled",
"value": "true"
},
{
"desc": "When enabled, the Fair Scheduler will assign shares to individual apps based on their size, rather than providing an equal share to all apps regardless of size.",
"display_name": "Fair Scheduler Size-Based Weight",
"name": "resourcemanager_fair_scheduler_size_based_weight",
"value": "false"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The address of the resource tracker interface in the ResourceManager.",
"display_name": "Resource Tracker Address",
"name": "yarn_resourcemanager_resource_tracker_address",
"value": "8031"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_resource_manager_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_exclude.txt parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager Advanced Configuration Snippet (Safety Valve) for nodes_exclude.txt",
"name": "role_config_suppression_rm_hosts_exclude_safety_valve",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "ResourceManager Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Enables multiple Fair Scheduler container assignments in one heartbeat, which improves cluster throughput when there are many small tasks to run.",
"display_name": "Fair Scheduler Assign Multiple Tasks",
"name": "resourcemanager_fair_scheduler_assign_multiple",
"value": "false"
},
{
"desc": "The health test thresholds on the duration of the metrics request to the web server.",
"display_name": "Web Metric Collection Duration",
"name": "resourcemanager_web_metric_collection_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager Advanced Configuration Snippet (Safety Valve) for mapred-site.xml parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "role_config_suppression_resourcemanager_mapred_safety_valve",
"value": "false"
},
{
"desc": "Directory where ResourceManager will place its log files.",
"display_name": "ResourceManager Log Directory",
"name": "resource_manager_log_dir",
"value": "/var/log/hadoop-yarn"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "Number of threads to handle resource tracker calls.",
"display_name": "Resource Tracker Thread Count",
"name": "yarn_resourcemanager_resource_tracker_client_thread_count",
"value": "50"
},
{
"desc": "If using the Fair Scheduler, virtual core requests will be rounded up to the nearest multiple of this number. This parameter has no effect prior to CDH 5.",
"display_name": "Container Virtual CPU Cores Increment",
"name": "yarn_scheduler_increment_allocation_vcores",
"value": "1"
},
{
"desc": "The HTTPS port of the ResourceManager web application.",
"display_name": "ResourceManager Web Application HTTPS Port (TLS/SSL)",
"name": "resourcemanager_webserver_https_port",
"value": "8090"
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of ResourceManager in Bytes",
"name": "resource_manager_java_heapsize",
"value": "1073741824"
},
{
"desc": "The number of threads used to handle applications manager requests.",
"display_name": "Client Thread Count",
"name": "yarn_resourcemanager_client_thread_count",
"value": "50"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_resource_manager_unexpected_exits",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "The maximum number of rolled log files to keep for ResourceManager logs. Typically used by log4j or logback.",
"display_name": "ResourceManager Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "ResourceManager Environment Advanced Configuration Snippet (Safety Valve)",
"name": "RESOURCEMANAGER_role_env_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for ResourceManager parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for ResourceManager",
"name": "role_config_suppression_resource_manager_java_opts",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "The number of threads used to handle requests through the scheduler interface.",
"display_name": "Scheduler Thread Count",
"name": "yarn_resourcemanager_scheduler_client_thread_count",
"value": "50"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "When set to <b>true</b>, the Fair Scheduler uses the username as the default pool name, in the event that a pool name is not specified. When set to <b>false</b>, all applications are run in a shared pool, called <b>default</b>.",
"display_name": "Fair Scheduler User As Default Queue",
"name": "resourcemanager_fair_scheduler_user_as_default_queue",
"value": "true"
},
{
"desc": "The largest number of virtual CPU cores that can be requested for a container. This parameter has no effect prior to CDH 4.4.",
"display_name": "Container Virtual CPU Cores Maximum",
"name": "yarn_scheduler_maximum_allocation_vcores",
"value": "32"
},
{
"desc": "The smallest amount of physical memory, in MiB, that can be requested for a container. If using the Capacity or FIFO scheduler (or any scheduler, prior to CDH 5), memory requests will be rounded up to the nearest multiple of this number.",
"display_name": "Container Memory Minimum",
"name": "yarn_scheduler_minimum_allocation_mb",
"value": "1024"
}
]

View File

@ -1,512 +0,0 @@
[
{
"desc": "ACL that determines which users and groups can submit and kill applications in any pool, and can issue commands on ResourceManager roles.",
"display_name": "Admin ACL",
"name": "yarn_admin_acl",
"value": "*"
},
{
"desc": "The health test thresholds of the overall NodeManager health. The check returns \"Concerning\" health if the percentage of \"Healthy\" NodeManagers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" NodeManagers falls below the critical threshold.",
"display_name": "Healthy NodeManager Monitoring Thresholds",
"name": "yarn_nodemanagers_healthy_thresholds",
"value": "{\"critical\":\"90.0\",\"warning\":\"95.0\"}"
},
{
"desc": "Name of the ZooKeeper service that this YARN service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "JSON representation of all the configurations that the Fair Scheduler can take on across all schedules. Typically edited using the Pools configuration UI.",
"display_name": "Fair Scheduler Allocations",
"name": "yarn_fs_scheduled_allocations",
"value": "{\"defaultMinSharePreemptionTimeout\":null,\"defaultQueueSchedulingPolicy\":null,\"fairSharePreemptionTimeout\":null,\"queueMaxAMShareDefault\":null,\"queueMaxAppsDefault\":null,\"queuePlacementRules\":null,\"queues\":[{\"aclAdministerApps\":null,\"aclSubmitApps\":null,\"minSharePreemptionTimeout\":null,\"name\":\"root\",\"queues\":[{\"aclAdministerApps\":null,\"aclSubmitApps\":null,\"minSharePreemptionTimeout\":null,\"name\":\"default\",\"queues\":[],\"schedulablePropertiesList\":[{\"impalaMaxMemory\":null,\"impalaMaxQueuedQueries\":null,\"impalaMaxRunningQueries\":null,\"maxAMShare\":null,\"maxResources\":null,\"maxRunningApps\":null,\"minResources\":null,\"scheduleName\":\"default\",\"weight\":null}],\"schedulingPolicy\":null}],\"schedulablePropertiesList\":[{\"impalaMaxMemory\":null,\"impalaMaxQueuedQueries\":null,\"impalaMaxRunningQueries\":null,\"maxAMShare\":null,\"maxResources\":null,\"maxRunningApps\":null,\"minResources\":null,\"scheduleName\":\"default\",\"weight\":null}],\"schedulingPolicy\":null}],\"userMaxAppsDefault\":null,\"users\":[]}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop TLS/SSL Server Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Hadoop TLS/SSL Server Keystore File Location",
"name": "service_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "The frequency in which the log4j event publication appender will retry sending undelivered log events to the Event server, in seconds",
"display_name": "Log Event Retry Frequency",
"name": "log_event_retry_frequency",
"value": "30"
},
{
"desc": "Controls which applications non-admin users can see in the applications list view",
"display_name": "Non-Admin Users Applications List Visibility Settings",
"name": "user_application_list_settings",
"value": "ALL"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Replication Advanced Configuration Snippet (Safety Valve) for yarn-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HDFS Replication Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "service_config_suppression_yarn_service_replication_config_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>core-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "YARN Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "yarn_core_site_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the ResourceManager Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: ResourceManager Count Validator",
"name": "service_config_suppression_resourcemanager_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN Application Aggregates parameter.",
"display_name": "Suppress Parameter Validation: YARN Application Aggregates",
"name": "service_config_suppression_yarn_application_aggregates",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the TLS/SSL Client Truststore File Password parameter.",
"display_name": "Suppress Parameter Validation: TLS/SSL Client Truststore File Password",
"name": "service_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "Password that protects the private key contained in the server keystore used for encrypted shuffle and encrypted web UIs. Applies to all configurations of daemon roles of this service.",
"display_name": "Hadoop TLS/SSL Server Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN Service Advanced Configuration Snippet (Safety Valve) for hadoop-policy.xml parameter.",
"display_name": "Suppress Parameter Validation: YARN Service Advanced Configuration Snippet (Safety Valve) for hadoop-policy.xml",
"name": "service_config_suppression_yarn_hadoop_policy_config_safety_valve",
"value": "false"
},
{
"desc": "Entries to add to the classpaths of YARN applications.",
"display_name": "YARN Application Classpath",
"name": "yarn_application_classpath",
"value": "$HADOOP_CLIENT_CONF_DIR,$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,$HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,$HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>yarn-site.xml</strong>. Applies to all HDFS Replication jobs.",
"display_name": "HDFS Replication Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "yarn_service_replication_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HDFS Replication Advanced Configuration Snippet (Safety Valve) for mapred-site.xml parameter.",
"display_name": "Suppress Parameter Validation: HDFS Replication Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "service_config_suppression_mapreduce_service_replication_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the JobHistory Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: JobHistory Server Count Validator",
"name": "service_config_suppression_jobhistory_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN Service Advanced Configuration Snippet (Safety Valve) for core-site.xml parameter.",
"display_name": "Suppress Parameter Validation: YARN Service Advanced Configuration Snippet (Safety Valve) for core-site.xml",
"name": "service_config_suppression_yarn_core_site_safety_valve",
"value": "false"
},
{
"desc": "UNIX user that containers run as when Linux-container-executor is used in nonsecure mode.",
"display_name": "UNIX User for Nonsecure Mode with Linux Container Executor",
"name": "yarn_nodemanager_linux_container_executor_nonsecure_mode_local_user",
"value": "nobody"
},
{
"desc": "Whether YARN creates a cgroup per container, thereby isolating the CPU usage of containers. When set, <tt>yarn.nodemanager.linux-container-executor.resources-handler.class</tt> is configured to <tt>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</tt>. The host (in Cloudera Manager) must have cgroups enabled. The number of shares allocated to all YARN containers is configured by adjusting the CPU shares value of the Node Manager in the Resource Management configuration group.",
"display_name": "Use CGroups for Resource Management",
"name": "yarn_service_cgroups",
"value": "false"
},
{
"desc": "Name of the HDFS service that this YARN service instance depends on",
"display_name": "HDFS Service",
"name": "hdfs_service",
"value": null
},
{
"desc": "For advanced use only, a string to be inserted into <strong>mapred-site.xml</strong>. Applies to all HDFS Replication jobs.",
"display_name": "HDFS Replication Advanced Configuration Snippet (Safety Valve) for mapred-site.xml",
"name": "mapreduce_service_replication_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the ResourceManager Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: ResourceManager Health",
"name": "service_health_suppression_yarn_resourcemanagers_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop TLS/SSL Server Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Hadoop TLS/SSL Server Keystore File Password",
"name": "service_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the UNIX User for Nonsecure Mode with Linux Container Executor parameter.",
"display_name": "Suppress Parameter Validation: UNIX User for Nonsecure Mode with Linux Container Executor",
"name": "service_config_suppression_yarn_nodemanager_linux_container_executor_nonsecure_mode_local_user",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "YARN (MR2 Included) Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "yarn_service_env_safety_valve",
"value": null
},
{
"desc": "The tolerance window used in YARN service tests that depend on detection of the active ResourceManager.",
"display_name": "Active ResourceManager Detection Window",
"name": "yarn_active_resourcemanager_detecton_window",
"value": "3"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>ssl-server.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "YARN (MR2 Included) Service Advanced Configuration Snippet (Safety Valve) for ssl-server.xml",
"name": "yarn_ssl_server_safety_valve",
"value": null
},
{
"desc": "A list specifying the rules to run to determine which Fair Scheduler configuration to use. Typically edited using the Rules configuration UI.",
"display_name": "Fair Scheduler Configuration Rules",
"name": "yarn_fs_schedule_rules",
"value": "[]"
},
{
"desc": "Whether to suppress the results of the JobHistory Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: JobHistory Server Health",
"name": "service_health_suppression_yarn_jobhistory_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Llama Proxy User Hosts parameter.",
"display_name": "Suppress Parameter Validation: Llama Proxy User Hosts",
"name": "service_config_suppression_llama_proxy_user_hosts_list",
"value": "false"
},
{
"desc": "Path to the keystore file containing the server certificate and private key used for encrypted shuffle and encrypted web UIs. Applies to configurations of all daemon roles of this service.",
"display_name": "Hadoop TLS/SSL Server Keystore File Location",
"name": "ssl_server_keystore_location",
"value": null
},
{
"desc": "Enables Kerberos authentication for Hadoop HTTP web consoles for all roles of this service using the SPNEGO protocol. <b>Note:</b> This is effective only if Kerberos is enabled for the HDFS service.",
"display_name": "Enable Kerberos Authentication for HTTP Web-Consoles",
"name": "hadoop_secure_web_ui",
"value": "false"
},
{
"desc": "Password for the server keystore file used for encrypted shuffle and encrypted web UIs. Applies to configurations of all daemon roles of this service.",
"display_name": "Hadoop TLS/SSL Server Keystore File Password",
"name": "ssl_server_keystore_password",
"value": null
},
{
"desc": "The amount of time after ResourceManager(s) start that the lack of an active ResourceManager will be tolerated. This is an advanced option that does not often need to be changed.",
"display_name": "ResourceManager Activation Startup Tolerance",
"name": "yarn_resourcemanager_activation_startup_tolerance",
"value": "180"
},
{
"desc": "When computing the overall YARN service health, whether to consider the active ResourceManager's health.",
"display_name": "Active ResourceManager Role Health Check",
"name": "yarn_resourcemanagers_health_enabled",
"value": "true"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "yarn"
},
{
"desc": "When set to <b>true</b>, pools specified in applications but not explicitly configured, are created at runtime with default settings. When set to <b>false</b>, applications specifying pools not explicitly configured run in a pool named <b>default</b>. This setting applies when an application explicitly specifies a pool and when the application runs in a pool named with the username associated with the application.",
"display_name": "Allow Undeclared Pools",
"name": "yarn_scheduler_fair_allow_undeclared_pools",
"value": "true"
},
{
"desc": "When computing the overall YARN health, consider JobHistory Server's health",
"display_name": "JobHistory Server Role Health Test",
"name": "yarn_jobhistoryserver_health_enabled",
"value": "true"
},
{
"desc": "Path to the truststore file used when roles of this service act as TLS/SSL clients. Overrides the cluster-wide default truststore location set in HDFS. This truststore must be in JKS format. The truststore contains certificates of trusted servers, or of Certificate Authorities trusted to identify servers. The contents of the truststore can be modified without restarting any roles. By default, changes to its contents are picked up within ten seconds. If not set, the default Java truststore is used to verify certificates.",
"display_name": "TLS/SSL Client Truststore File Location",
"name": "ssl_client_truststore_location",
"value": null
},
{
"desc": "How long to keep aggregation logs before deleting them.",
"display_name": "Log Aggregation Retention Period",
"name": "yarn_log_aggregation_retain_seconds",
"value": "604800"
},
{
"desc": "When computing the overall YARN service health, whether to consider the health of the standby ResourceManager.",
"display_name": "Standby ResourceManager Health Check",
"name": "yarn_standby_resourcemanager_health_enabled",
"value": "true"
},
{
"desc": "Cluster ID used when ResourceManager is Highly Available.",
"display_name": "ResourceManager HA Cluster ID",
"name": "yarn_rm_ha_cluster_id",
"value": "yarnRM"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ResourceManager HA Cluster ID parameter.",
"display_name": "Suppress Parameter Validation: ResourceManager HA Cluster ID",
"name": "service_config_suppression_yarn_rm_ha_cluster_id",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Admin ACL parameter.",
"display_name": "Suppress Parameter Validation: Admin ACL",
"name": "service_config_suppression_yarn_admin_acl",
"value": "false"
},
{
"desc": "Whether users and groups specified in Admin ACL should be checked for authorization to perform admin operations.",
"display_name": "Enable ResourceManager ACLs",
"name": "yarn_acl_enable",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the Hadoop TLS/SSL Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Hadoop TLS/SSL Validator",
"name": "service_config_suppression_hadoop_ssl_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN Service MapReduce Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: YARN Service MapReduce Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_yarn_service_mapred_safety_valve",
"value": "false"
},
{
"desc": "The group that this service's processes should run as. (Except the Job History Server, which has its own group)",
"display_name": "System Group",
"name": "process_groupname",
"value": "hadoop"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>hadoop-policy.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "YARN Service Advanced Configuration Snippet (Safety Valve) for hadoop-policy.xml",
"name": "yarn_hadoop_policy_config_safety_valve",
"value": null
},
{
"desc": "Comma-delimited list of hosts where you want to allow the Llama (AM for Impala) user to impersonate other users. The default '*' allows all hosts. To disable entirely, use a string that doesn't correspond to a host name, such as '_no_host'.",
"display_name": "Llama Proxy User Hosts",
"name": "llama_proxy_user_hosts_list",
"value": "*"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN (MR2 Included) Service Advanced Configuration Snippet (Safety Valve) for ssl-server.xml parameter.",
"display_name": "Suppress Parameter Validation: YARN (MR2 Included) Service Advanced Configuration Snippet (Safety Valve) for ssl-server.xml",
"name": "service_config_suppression_yarn_ssl_server_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN Application Classpath parameter.",
"display_name": "Suppress Parameter Validation: YARN Application Classpath",
"name": "service_config_suppression_yarn_application_classpath",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "Password for the TLS/SSL client truststore. Overrides the cluster-wide default truststore password set in HDFS.",
"display_name": "TLS/SSL Client Truststore File Password",
"name": "ssl_client_truststore_password",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>yarn-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "YARN Service Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "yarn_service_config_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop TLS/SSL Server Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Hadoop TLS/SSL Server Keystore Key Password",
"name": "service_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "The user that this service's processes should run as. (Except the Job History Server, which has its own user)",
"display_name": "System User",
"name": "process_username",
"value": "yarn"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "Controls which applications an admin user can see in the applications list view",
"display_name": "Admin Users Applications List Visibility Settings",
"name": "admin_application_list_settings",
"value": "ALL"
},
{
"desc": "Whether YARN uses the Linux Container Executor both in secure (Kerberos) and insecure (not Kerberos) environments. Cgroups enforcement only works when the Linux Container Executor is used.",
"display_name": "Always Use Linux Container Executor",
"name": "yarn_service_lce_always",
"value": "false"
},
{
"desc": "When set, each role identifies important log events and forwards them to Cloudera Manager.",
"display_name": "Enable Log Event Capture",
"name": "catch_events",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Comma-delimited list of groups that you want to allow the Llama (AM for Impala) user to impersonate. The default '*' allows all groups. To disable entirely, use a string that doesn't correspond to a group name, such as '_no_group_'.",
"display_name": "Llama Proxy User Groups",
"name": "llama_proxy_user_groups_list",
"value": "*"
},
{
"desc": "Whether to suppress configuration warnings produced by the NodeManager Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: NodeManager Count Validator",
"name": "service_config_suppression_nodemanager_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the Gateway Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Gateway Count Validator",
"name": "service_config_suppression_gateway_count_validator",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>ssl-client.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "YARN (MR2 Included) Service Advanced Configuration Snippet (Safety Valve) for ssl-client.xml",
"name": "yarn_ssl_client_safety_valve",
"value": null
},
{
"desc": "Whether to suppress the results of the NodeManager Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: NodeManager Health",
"name": "service_health_suppression_yarn_node_managers_healthy",
"value": "false"
},
{
"desc": "Controls the aggregate metrics generated for YARN applications. The structure is a JSON list of the attributes to aggregate and the entities to aggregate to. For example, if the attributeName is 'maps_completed' and the aggregationTargets is ['USER'] then the Service Monitor will create the metric 'yarn_application_maps_completed_rate' and, every ten minutes, will record the total maps completed for each user across all their YARN applications. By default it will also record the number of applications submitted ('apps_submitted_rate') for both users and pool. For a full list of the supported attributes see the YARN search page. Note that the valid aggregation targets are USER, YARN_POOL, and YARN (the service), and that these aggregate metrics can be viewed on both the reports and charts search pages.",
"display_name": "YARN Application Aggregates",
"name": "yarn_application_aggregates",
"value": "[\n {\n \"attributeName\": \"maps_total\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"reduces_total\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"cpu_milliseconds\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"mb_millis_maps\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"mb_millis_reduces\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"vcores_millis_maps\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"vcores_millis_reduces\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"file_bytes_read\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"file_bytes_written\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"hdfs_bytes_read\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"hdfs_bytes_written\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"cm_cpu_milliseconds\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n },\n {\n \"attributeName\": \"application_duration\",\n \"aggregationTargets\": [\"USER\", \"YARN_POOL\", \"YARN\", \"CLUSTER\"]\n }\n]\n"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the TLS/SSL Client Truststore File Location parameter.",
"display_name": "Suppress Parameter Validation: TLS/SSL Client Truststore File Location",
"name": "service_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "The home directory of the system user on the local filesystem. This setting must reflect the system's configured value - only changing it here will not change the actual home directory.",
"display_name": "System User's Home Directory",
"name": "hdfs_user_home_dir",
"value": "/var/lib/hadoop-yarn"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User's Home Directory parameter.",
"display_name": "Suppress Parameter Validation: System User's Home Directory",
"name": "service_config_suppression_hdfs_user_home_dir",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN (MR2 Included) Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: YARN (MR2 Included) Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_yarn_service_env_safety_valve",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>mapred-site.xml</strong>. Applies to configurations of all roles in this service except client configuration.",
"display_name": "YARN Service MapReduce Advanced Configuration Snippet (Safety Valve)",
"name": "yarn_service_mapred_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN Service Advanced Configuration Snippet (Safety Valve) for yarn-site.xml parameter.",
"display_name": "Suppress Parameter Validation: YARN Service Advanced Configuration Snippet (Safety Valve) for yarn-site.xml",
"name": "service_config_suppression_yarn_service_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Whether to enable log aggregation",
"display_name": "Enable Log Aggregation",
"name": "yarn_log_aggregation_enable",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Llama Proxy User Groups parameter.",
"display_name": "Suppress Parameter Validation: Llama Proxy User Groups",
"name": "service_config_suppression_llama_proxy_user_groups_list",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the YARN (MR2 Included) Service Advanced Configuration Snippet (Safety Valve) for ssl-client.xml parameter.",
"display_name": "Suppress Parameter Validation: YARN (MR2 Included) Service Advanced Configuration Snippet (Safety Valve) for ssl-client.xml",
"name": "service_config_suppression_yarn_ssl_client_safety_valve",
"value": "false"
}
]

View File

@ -1,596 +0,0 @@
[
{
"desc": "The port to monitor for inter-server communication",
"display_name": "Quorum Port",
"name": "quorumPort",
"value": "3181"
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "zookeeper_server_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Log Directory parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Log Directory",
"name": "role_config_suppression_zk_server_log_dir",
"value": "false"
},
{
"desc": "Specifies the name of the user that has read-only privileges when using password file based authentication for JMX access. JMX authentication must be enabled for this setting to take effect.",
"display_name": "Name of User with Read-Only access to the JMX Agent",
"name": "jmx_passwd_file_readonly_user",
"value": "monitorRole"
},
{
"desc": "Enables the health test that the Server's process state is consistent with the role configuration",
"display_name": "Server Process Health Test",
"name": "zookeeper_server_scm_health_enabled",
"value": "true"
},
{
"desc": "The address (IPv4, IPv6, or hostname) to monitor for client connections. This is the address that clients attempt to connect to. This setting is optional, because by default, ZooKeeper binds in such a way that any connection to the client port for any address/interface/NIC on the server will be accepted.",
"display_name": "Client Port Address",
"name": "clientPortAddress",
"value": null
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "Specifies the password of the user that has read-only privileges when using password file based authentication for JMX access. JMX authentication must be enabled for this setting to take effect.",
"display_name": "Password of User with Read-Only Access to the JMX agent",
"name": "jmx_passwd_file_readonly_user_password",
"value": "MONITOR"
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Quorum Membership heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Quorum Membership",
"name": "role_health_suppression_zookeeper_server_quorum_membership",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Client Port Address parameter.",
"display_name": "Suppress Parameter Validation: Client Port Address",
"name": "role_config_suppression_clientportaddress",
"value": "false"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "The health check thresholds of the weighted average size of the ZooKeeper Server connection count over a recent period. See ZooKeeper Server Connection Count Monitoring Period.",
"display_name": "ZooKeeper Server Connection Count Thresholds",
"name": "zookeeper_server_connection_count_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>zoo.cfg</strong> for this role only.",
"display_name": "Server Advanced Configuration Snippet (Safety Valve) for zoo.cfg",
"name": "zookeeper_config_safety_valve",
"value": null
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Transaction Log Directory.",
"display_name": "Transaction Log Directory Free Space Monitoring Absolute Thresholds",
"name": "zookeeper_server_data_log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The port to monitor for client connections. This is the port that clients attempt to connect to.",
"display_name": "Client Port",
"name": "clientPort",
"value": "2181"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Server Advanced Configuration Snippet (Safety Valve) for zoo.cfg parameter.",
"display_name": "Suppress Parameter Validation: Server Advanced Configuration Snippet (Safety Valve) for zoo.cfg",
"name": "role_config_suppression_zookeeper_config_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Password of User with Read-Only Access to the JMX agent parameter.",
"display_name": "Suppress Parameter Validation: Password of User with Read-Only Access to the JMX agent",
"name": "role_config_suppression_jmx_passwd_file_readonly_user_password",
"value": "false"
},
{
"desc": "The disk location that ZooKeeper will use to store its database snapshots.",
"display_name": "Data Directory",
"name": "dataDir",
"value": "/var/lib/zookeeper"
},
{
"desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: GC Duration",
"name": "role_health_suppression_zookeeper_server_gc_duration",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Heap Dump Directory Free Space",
"name": "role_health_suppression_zookeeper_server_heap_dump_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "Unique identifier for each ZooKeeper server, typically starts at 1",
"display_name": "ZooKeeper Server ID",
"name": "serverId",
"value": null
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_zookeeper_server_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_zookeeper_server_file_descriptor",
"value": "false"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
"name": "log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "<p>This file contains the rules which govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message.</p><p>Each rule has some or all of the following fields:</p><ul><li><span class='code'>alert</span> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><span class='code'>rate</span> <strong>(mandatory)</strong> - the maximum number of log messages matching this rule that may be sent as events every minute. If more than <tt>rate</tt> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><span class='code'>periodminutes</span> - the number of minutes during which the publisher will only publish <tt>rate</tt> events or fewer. If not specified, the default is <strong>one minute</strong></li><li><span class='code'>threshold</span> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><span class='code'>content</span> - match only those messages whose contents match this regular expression.</li><li><span class='code'>exceptiontype</span> - match only those messages which are part of an exception message. The exception type must match this regular expression.</li></ul><br/><p>Example:<span class='code'>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</span></p><p>This rule will send events to Cloudera Manager for every <span class='code'>StringIndexOutOfBoundsException</span>, up to a maximum of 10 every minute.</p>",
"display_name": "Rules to Extract Events from Log Files",
"name": "log_event_whitelist",
"value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
},
{
"desc": "The disk location that ZooKeeper will use to store its transaction logs.",
"display_name": "Transaction Log Directory",
"name": "dataLogDir",
"value": "/var/lib/zookeeper"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Data Directory.",
"display_name": "Data Directory Free Space Monitoring Absolute Thresholds",
"name": "zookeeper_server_data_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
"display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
"name": "heap_dump_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The maximum size, in megabytes, per log file for Server logs. Typically used by log4j or logback.",
"display_name": "Server Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Password of user with read-write access to the JMX agent parameter.",
"display_name": "Suppress Parameter Validation: Password of user with read-write access to the JMX agent",
"name": "role_config_suppression_jmx_passwd_file_readwrite_user_password",
"value": "false"
},
{
"desc": "The minimum session timeout, in milliseconds, that the ZooKeeper Server will allow the client to negotiate",
"display_name": "Minimum Session Timeout",
"name": "minSessionTimeout",
"value": "4000"
},
{
"desc": "The percentage thresholds of the ratio of the maximum request latency to the maximum client-negotiable session timeout since the server was started.",
"display_name": "Maximum Latency Monitoring Thresholds",
"name": "zookeeper_server_max_latency_thresholds",
"value": "{\"critical\":\"100.0\",\"warning\":\"75.0\"}"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Enables authentication when interacting with the JMX agent on the ZooKeeper server.",
"display_name": "Enable Authenticated Communication with the JMX Agent",
"name": "enable_jmx_authentication",
"value": "false"
},
{
"desc": "Enables the quorum membership check for this ZooKeeper Server.",
"display_name": "Enable the Quorum Membership Check",
"name": "zookeeper_server_quorum_membership_enabled",
"value": "true"
},
{
"desc": "The port used by the ZooKeeper Server's RMI server to handle JMX RMI requests. This is added as \"-Dcom.sun.management.jmxremote.rmi.port=<port>\" to the ZooKeeper Server's JVM command line. This has an effect only in Oracle JDK 7u4 and higher. If the setting is left blank, the JMX Remote Port value is used. If set to 0 or -1, this setting is ignored. When this setting is not in effect, the JVM uses a random port for the RMI server.",
"display_name": "JMX RMI Server Port",
"name": "server_jmx_rmi_port",
"value": null
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_zookeeper_server_host_health",
"value": "false"
},
{
"desc": "Directory where ZooKeeper will place its log files.",
"display_name": "ZooKeeper Log Directory",
"name": "zk_server_log_dir",
"value": "/var/log/zookeeper"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
"name": "heap_dump_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Transaction Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Transaction Log Directory",
"name": "role_config_suppression_datalogdir",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "The minimum log level for Server logs",
"display_name": "Server Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "SERVER_role_env_safety_valve",
"value": null
},
{
"desc": "The period to review when computing the moving average of the outstanding requests queue size. Specified in minutes.",
"display_name": "ZooKeeper Server Outstanding Requests Monitoring Period",
"name": "zookeeper_server_outstanding_requests_window",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Name of User with Read-Write Access to the JMX Agent parameter.",
"display_name": "Suppress Parameter Validation: Name of User with Read-Write Access to the JMX Agent",
"name": "role_config_suppression_jmx_passwd_file_readwrite_user",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Zookeeper Server parameter.",
"display_name": "Suppress Parameter Validation: Java Configuration Options for Zookeeper Server",
"name": "role_config_suppression_zk_server_java_opts",
"value": "false"
},
{
"desc": "The port used by the ZooKeeper Server's RMI registry. This is required to enable JMX access through RMI which is required for Cloudera Manager ZooKeeper monitoring. This is added as \"-Dcom.sun.management.jmxremote.port\" to the ZooKeeper Server's JVM command line.",
"display_name": "JMX Remote Port",
"name": "server_jmx_agent_port",
"value": "9010"
},
{
"desc": "The tolerance window that will be used in the detection of a ZooKeeper server's membership in a quorum. Specified in minutes.",
"display_name": "Quorum Membership Detection Window",
"name": "zookeeper_server_quorum_membership_detection_window",
"value": "3"
},
{
"desc": "The health check thresholds of the weighted average size of the ZooKeeper Server outstanding requests queue over a recent period. See ZooKeeper Server Outstanding Requests Monitoring Period.",
"display_name": "ZooKeeper Server Outstanding Requests Thresholds",
"name": "zookeeper_server_outstanding_requests_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Server Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_server_role_env_safety_valve",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Specifies the name of the user that has read-write privileges when using password file based authentication for JMX access. JMX authentication must be enabled for this setting to take effect.",
"display_name": "Name of User with Read-Write Access to the JMX Agent",
"name": "jmx_passwd_file_readwrite_user",
"value": "controlRole"
},
{
"desc": "The port to monitor for leadership election",
"display_name": "Election Port",
"name": "electionPort",
"value": "4181"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
"display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
"name": "role_config_suppression_log_event_whitelist",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
"display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
"name": "log_directory_free_space_absolute_thresholds",
"value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
},
{
"desc": "The maximum number of concurrent connections (at the socket level) that a single client, identified by the IP address, may make to a single member of the ZooKeeper ensemble. This setting is used to prevent certain classes of DoS attacks, including file descriptor exhaustion. To remove the limit on concurrent connections, set this value to 0.",
"display_name": "Maximum Client Connections",
"name": "maxClientCnxns",
"value": "60"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here.",
"display_name": "Java Configuration Options for Zookeeper Server",
"name": "zk_server_java_opts",
"value": ""
},
{
"desc": "Whether to suppress the results of the Maximum Request Latency heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Maximum Request Latency",
"name": "role_health_suppression_zookeeper_server_max_latency",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_zookeeper_server_unexpected_exits",
"value": "false"
},
{
"desc": "The period to review when computing the moving average of garbage collection time.",
"display_name": "Garbage Collection Duration Monitoring Period",
"name": "zookeeper_server_gc_duration_window",
"value": "5"
},
{
"desc": "Specifies the password of the user that has read-write privileges when using password file based authentication for JMX access. JMX authentication must be enabled for this setting to take effect.",
"display_name": "Password of user with read-write access to the JMX agent",
"name": "jmx_passwd_file_readwrite_user_password",
"value": "CONTROL"
},
{
"desc": "Enables the JMX agent on the ZooKeeper server. Turning this off on any of the ZooKeeper servers that are part of a service will prevent Cloudera Manager from being able to monitor that server and may affect the monitoring provided on the entire service.",
"display_name": "Enable JMX Agent",
"name": "enable_jmx_agent",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_zookeeper_server_scm_health",
"value": "false"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Transaction Log Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Transaction Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Transaction Log Directory Free Space Monitoring Percentage Thresholds",
"name": "zookeeper_server_data_log_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Data Directory parameter.",
"display_name": "Suppress Parameter Validation: Data Directory",
"name": "role_config_suppression_datadir",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Outstanding Requests heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Outstanding Requests",
"name": "role_health_suppression_zookeeper_server_outstanding_requests",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Name of User with Read-Only access to the JMX Agent parameter.",
"display_name": "Suppress Parameter Validation: Name of User with Read-Only access to the JMX Agent",
"name": "role_config_suppression_jmx_passwd_file_readonly_user",
"value": "false"
},
{
"desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
"display_name": "Garbage Collection Duration Thresholds",
"name": "zookeeper_server_gc_duration_thresholds",
"value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Server Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
"display_name": "Java Heap Size of ZooKeeper Server in Bytes",
"name": "zookeeper_server_java_heapsize",
"value": "1073741824"
},
{
"desc": "The maximum number of rolled log files to keep for Server logs. Typically used by log4j or logback.",
"display_name": "Server Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Log Directory Free Space",
"name": "role_health_suppression_zookeeper_server_log_directory_free_space",
"value": "false"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
},
{
"desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's Data Directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Data Directory Free Space Monitoring Absolute Thresholds setting is configured.",
"display_name": "Data Directory Free Space Monitoring Percentage Thresholds",
"name": "zookeeper_server_data_directory_free_space_percentage_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"never\"}"
},
{
"desc": "The period to review when computing the moving average of the connection count. Specified in minutes.",
"display_name": "ZooKeeper Server Connection Count Monitoring Period",
"name": "zookeeper_server_connection_count_window",
"value": "3"
},
{
"desc": "When computing the overall Server health, consider the host's health.",
"display_name": "Server Host Health Test",
"name": "zookeeper_server_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "The maximum session timeout, in milliseconds, that the ZooKeeper Server will allow the client to negotiate",
"display_name": "Maximum Session Timeout",
"name": "maxSessionTimeout",
"value": "40000"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <span class='code'>stacks</span> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress the results of the Transaction Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Transaction Log Directory Free Space",
"name": "role_health_suppression_zookeeper_server_data_log_directory_free_space",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "Whether to suppress the results of the Data Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Data Directory Free Space",
"name": "role_health_suppression_zookeeper_server_data_directory_free_space",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Connection Count heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Connection Count",
"name": "role_health_suppression_zookeeper_server_connection_count",
"value": "false"
}
]

View File

@ -1,224 +0,0 @@
[
{
"desc": "When enabled, ZooKeeper auto purge feature retains this many most recent snapshots and the corresponding transaction logs in the dataDir and dataLogDir respectively and deletes the rest. Defaults to 5. Minimum value is 3.",
"display_name": "Auto Purge Snapshots Retain Count",
"name": "autopurgeSnapRetainCount",
"value": "5"
},
{
"desc": "Configures the path of the root znode under which all canary updates are performed",
"display_name": "ZooKeeper Canary Root Znode Path",
"name": "zookeeper_canary_root_path",
"value": "/cloudera_manager_zookeeper_canary"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "zookeeper"
},
{
"desc": "The frequency in which the log4j event publication appender will retry sending undelivered log events to the Event server, in seconds",
"display_name": "Log Event Retry Frequency",
"name": "log_event_retry_frequency",
"value": "30"
},
{
"desc": "Amount of time, in ticks, to allow followers to sync with ZooKeeper. If followers fall too far behind a leader, they are dropped.",
"display_name": "Synchronization Limit",
"name": "syncLimit",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Server Count Validator",
"name": "service_config_suppression_server_count_validator",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Server Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Server Health",
"name": "service_health_suppression_zookeeper_servers_healthy",
"value": "false"
},
{
"desc": "The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. Defaults to 24.",
"display_name": "Auto Purge Time Interval",
"name": "autopurgeInterval",
"value": "24"
},
{
"desc": "Enable Kerberos authentication for ZooKeeper.",
"display_name": "Enable Kerberos Authentication",
"name": "enableSecurity",
"value": "false"
},
{
"desc": "Whether to suppress the results of the ZooKeeper Canary heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: ZooKeeper Canary",
"name": "service_health_suppression_zookeeper_canary_health",
"value": "false"
},
{
"desc": "The number of snapshot files and corresponding transaction logs to keep when running the Cleanup command.",
"display_name": "Cleanup Retain Count",
"name": "cleanupRetainCount",
"value": "5"
},
{
"desc": "Enables the health check that a client can connect to ZooKeeper and perform basic operations",
"display_name": "ZooKeeper Canary Health Check",
"name": "zookeeper_canary_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Canary Root Znode Path parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Canary Root Znode Path",
"name": "service_config_suppression_zookeeper_canary_root_path",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_zookeeper_env_safety_valve",
"value": "false"
},
{
"desc": "Kerberos principal short name used by all roles of this service.",
"display_name": "Kerberos Principal",
"name": "kerberos_princ_name",
"value": "zookeeper"
},
{
"desc": "Configures the timeout used by the canary sessions with ZooKeeper servers",
"display_name": "ZooKeeper Canary Session Timeout",
"name": "zookeeper_canary_session_timeout",
"value": "30000"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "The maximum size of the data that can be stored in a znode in bytes.",
"display_name": "Jute Max Buffer",
"name": "jute_maxbuffer",
"value": "4194304"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "zookeeper"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Amount of time, in ticks, to allow followers to connect and sync to a leader. Increase this value as needed, if the amount of data managed by ZooKeeper is large.",
"display_name": "Initialization Limit",
"name": "initLimit",
"value": "10"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "ZooKeeper Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "zookeeper_env_safety_valve",
"value": null
},
{
"desc": "Configures the timeout used by the canary for connection establishment with ZooKeeper servers",
"display_name": "ZooKeeper Canary Connection Timeout",
"name": "zookeeper_canary_connection_timeout",
"value": "10000"
},
{
"desc": "Configures the timeout used by the canary for ZooKeeper operations",
"display_name": "ZooKeeper Canary Operation Timeout",
"name": "zookeeper_canary_operation_timeout",
"value": "30000"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "The length of time, in milliseconds, of a single tick, which is the basic time unit used by ZooKeeper. A tick is used to regulate heartbeats and timeouts.",
"display_name": "Tick Time",
"name": "tickTime",
"value": "2000"
},
{
"desc": "When set, each role identifies important log events and forwards them to Cloudera Manager.",
"display_name": "Enable Log Event Capture",
"name": "catch_events",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
"display_name": "Suppress Parameter Validation: Kerberos Principal",
"name": "service_config_suppression_kerberos_princ_name",
"value": "false"
},
{
"desc": "The health test thresholds of the overall Server health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Servers falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Servers falls below the critical threshold.",
"display_name": "Healthy Server Monitoring Thresholds",
"name": "zookeeper_servers_healthy_thresholds",
"value": "{\"critical\":\"51.0\",\"warning\":\"99.0\"}"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the ZooKeeper Server Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: ZooKeeper Server Count Validator",
"name": "service_config_suppression_zookeeper_odd_number_of_servers_validator",
"value": "false"
},
{
"desc": "Whether the leader accepts client connections.",
"display_name": "Leader Serves",
"name": "leaderServes",
"value": "yes"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "Automatically create data directories at startup, if they do not exist. Enabling this configuration should be used with care as it will suppress any errors in setup of data directories.",
"display_name": "Enable auto-creation of data directories",
"name": "zookeeper_datadir_autocreate",
"value": "false"
}
]

View File

@ -1,21 +0,0 @@
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import plugin_utils as pu
from sahara.plugins.cdh import validation
class ValidatorV550(validation.Validator):
PU = pu.PluginUtilsV550()

View File

@ -1,35 +0,0 @@
# Copyright (c) 2015 Intel Corporation
# Copyright (c) 2015 ISPRAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avm
from sahara.plugins.cdh.v5_5_0 import cloudera_utils
from sahara.plugins.cdh.v5_5_0 import config_helper
from sahara.plugins.cdh.v5_5_0 import deploy
from sahara.plugins.cdh.v5_5_0 import edp_engine
from sahara.plugins.cdh.v5_5_0 import plugin_utils
from sahara.plugins.cdh.v5_5_0 import validation
class VersionHandler(avm.BaseVersionHandler):
def __init__(self):
super(VersionHandler, self).__init__()
self.config_helper = config_helper.ConfigHelperV550()
self.cloudera_utils = cloudera_utils.ClouderaUtilsV550()
self.plugin_utils = plugin_utils.PluginUtilsV550()
self.deploy = deploy
self.edp_engine = edp_engine
self.validation = validation.ValidatorV550()

View File

@ -39,8 +39,6 @@ class MapRPlugin(p.ProvisioningPluginBase):
'plugin_labels': {'enabled': {'status': True}},
'version_labels': {
'5.2.0.mrv2': {'enabled': {'status': True}},
'5.1.0.mrv2': {'enabled': {'status': False},
'deprecated': {'status': True}},
}
}

View File

@ -166,24 +166,3 @@ class OozieV420(Oozie):
def post_install(self, cluster_context, instances):
super(OozieV420, self).post_install(cluster_context, instances)
self.fix_oozie_bug(cluster_context)
def fix_oozie_bug(self, cluster_context):
"""Wrong maprfs jar bug
On some environments Oozie installation
process takes incorrect jar that causes failure
to run jobs. This is a temporary bug in Oozie and
is going to be fixed soon.
"""
if cluster_context.mapr_version != '5.1.0':
return
oozie_inst = cluster_context.get_instance(OOZIE)
command = "sudo rm /opt/mapr/hadoop/hadoop-2.7.0/share/hadoop/kms/" \
"tomcat/webapps/kms/WEB-INF/lib/maprfs-5.1.0-mapr.jar" \
" && sudo ln -s /opt/mapr/lib/maprfs-5.1.0-mapr.jar" \
" /opt/mapr/hadoop/hadoop-2.7.0/share/hadoop/kms/" \
"tomcat/webapps/kms/WEB-INF/lib/maprfs-5.1.0-mapr.jar"
with oozie_inst.remote() as r:
r.execute_command(command, run_as_root=True)

View File

@ -1,60 +0,0 @@
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.base.base_cluster_context as bc
import sahara.plugins.mapr.services.yarn.yarn as yarn
class Context(bc.BaseClusterContext):
def __init__(self, cluster, version_handler, added=None, removed=None):
super(Context, self).__init__(cluster, version_handler, added, removed)
self._hadoop_version = yarn.YARNv270().version
self._hadoop_lib = None
self._hadoop_conf = None
self._cluster_mode = yarn.YARNv270.cluster_mode
self._node_aware = True
self._resource_manager_uri = "maprfs:///"
self._mapr_version = "5.1.0"
self._ubuntu_ecosystem_repo = (
"http://package.mapr.com/releases/ecosystem-5.x/ubuntu binary/")
self._centos_ecosystem_repo = (
"http://package.mapr.com/releases/ecosystem-5.x/redhat")
@property
def hadoop_lib(self):
if not self._hadoop_lib:
self._hadoop_lib = "%s/share/hadoop/common" % self.hadoop_home
return self._hadoop_lib
@property
def hadoop_conf(self):
if not self._hadoop_conf:
self._hadoop_conf = "%s/etc/hadoop" % self.hadoop_home
return self._hadoop_conf
@property
def resource_manager_uri(self):
return self._resource_manager_uri
@property
def configure_sh(self):
if not self._configure_sh:
configure_sh_template = "%(base)s -HS %(history_server)s"
args = {
"base": super(Context, self).configure_sh,
"history_server": self.get_historyserver_ip(),
}
self._configure_sh = configure_sh_template % args
return self._configure_sh

View File

@ -1,76 +0,0 @@
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.base.base_version_handler as bvh
from sahara.plugins.mapr.services.drill import drill
from sahara.plugins.mapr.services.flume import flume
from sahara.plugins.mapr.services.hbase import hbase
from sahara.plugins.mapr.services.hive import hive
from sahara.plugins.mapr.services.httpfs import httpfs
from sahara.plugins.mapr.services.hue import hue
from sahara.plugins.mapr.services.impala import impala
from sahara.plugins.mapr.services.mahout import mahout
from sahara.plugins.mapr.services.management import management as mng
from sahara.plugins.mapr.services.maprfs import maprfs
from sahara.plugins.mapr.services.oozie import oozie
from sahara.plugins.mapr.services.pig import pig
from sahara.plugins.mapr.services.spark import spark
from sahara.plugins.mapr.services.sqoop import sqoop2
from sahara.plugins.mapr.services.swift import swift
from sahara.plugins.mapr.services.yarn import yarn
import sahara.plugins.mapr.versions.v5_1_0_mrv2.context as c
version = "5.1.0.mrv2"
class VersionHandler(bvh.BaseVersionHandler):
def __init__(self):
super(VersionHandler, self).__init__()
self._version = version
self._required_services = [
yarn.YARNv270(),
maprfs.MapRFS(),
mng.Management(),
oozie.Oozie(),
]
self._services = [
hive.HiveV013(),
hive.HiveV12(),
impala.ImpalaV220(),
pig.PigV014(),
pig.PigV015(),
flume.FlumeV16(),
flume.FlumeV15(),
sqoop2.Sqoop2(),
mahout.MahoutV010(),
oozie.OozieV410(),
oozie.OozieV420(),
hue.HueV381(),
hue.HueV390(),
hbase.HBaseV09812(),
drill.DrillV11(),
drill.DrillV12(),
drill.DrillV14(),
yarn.YARNv270(),
maprfs.MapRFS(),
mng.Management(),
httpfs.HttpFS(),
swift.Swift(),
spark.SparkOnYarn()
]
def get_context(self, cluster, added=None, removed=None):
return c.Context(cluster, self, added, removed)

View File

@ -63,7 +63,7 @@ class SparkProvider(p.ProvisioningPluginBase):
deprecated = {'enabled': {'status': True},
'deprecated': {'status': True}}
result = {'plugin_labels': copy.deepcopy(default)}
stable_versions = ['2.2', '2.1.0', '1.6.0']
stable_versions = ['2.2']
result['version_labels'] = {
version: copy.deepcopy(
default if version in stable_versions else deprecated
@ -72,7 +72,7 @@ class SparkProvider(p.ProvisioningPluginBase):
return result
def get_versions(self):
return ['2.2', '2.1.0', '1.6.0', '1.3.1']
return ['2.2', '2.1.0', '1.6.0']
def get_configs(self, hadoop_version):
return c_helper.get_plugin_configs()

View File

@ -42,13 +42,8 @@ def get_plugin_configs():
def generate_storm_config(master_hostname, zk_hostnames, version):
if version in ['1.0.1', '1.1.0']:
host_cfg = 'nimbus.seeds'
master_value = [master_hostname.encode('ascii', 'ignore')]
else:
host_cfg = 'nimbus.host'
master_value = master_hostname.encode('ascii', 'ignore')
host_cfg = 'nimbus.seeds'
master_value = [master_hostname.encode('ascii', 'ignore')]
cfg = {
host_cfg: master_value,

View File

@ -58,13 +58,12 @@ class StormProvider(p.ProvisioningPluginBase):
result = {'plugin_labels': copy.deepcopy(default)}
result['version_labels'] = {
'1.1.0': copy.deepcopy(default),
'1.0.1': copy.deepcopy(default),
'0.9.2': copy.deepcopy(deprecated),
'1.0.1': copy.deepcopy(deprecated),
}
return result
def get_versions(self):
return ['0.9.2', '1.0.1', '1.1.0']
return ['1.0.1', '1.1.0']
def get_configs(self, storm_version):
return c_helper.get_plugin_configs()

View File

@ -24,11 +24,11 @@ from sahara.tests.unit import base as unit_base
class TestHealthCheck(unit_base.SaharaTestCase):
def test_check_health_availability(self):
cluster = mock.Mock(plugin_name='cdh', hadoop_version='5.5.0')
cluster = mock.Mock(plugin_name='cdh', hadoop_version='5.11.0')
self.assertTrue(health.ClouderaManagerHealthCheck(
cluster, mock.Mock()).is_available())
cluster = mock.Mock(plugin_name='cdh', hadoop_version='5.5.0')
cluster = mock.Mock(plugin_name='cdh', hadoop_version='5.11.0')
self.assertTrue(health.ServiceHealthCheck(
cluster, mock.Mock(), mock.Mock()).is_available())

View File

@ -40,4 +40,4 @@ class VersionFactoryTestCase(base.SaharaTestCase):
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5.5.0', '5.7.0', '5.9.0', '5.11.0', '5.13.0']
return ['5.7.0', '5.9.0', '5.11.0', '5.13.0']

View File

@ -1,25 +0,0 @@
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import config_helper
from sahara.tests.unit.plugins.cdh import base_config_helper_test as bcht
class TestConfigHelperV550(bcht.TestConfigHelper):
def setUp(self):
super(TestConfigHelperV550, self).setUp()
self.c_h = config_helper.ConfigHelperV550()
self.path_to_config = 'plugins/cdh/v5_5_0/resources/'

View File

@ -1,168 +0,0 @@
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
from sahara.plugins.cdh.v5_5_0 import config_helper
from sahara.tests.unit import base
from sahara.tests.unit.plugins.cdh import utils as ctu
from sahara.utils import files as f
c_h = config_helper.ConfigHelperV550()
path_to_config = 'plugins/cdh/v5_5_0/resources/'
json_files = [
'hdfs-service.json',
'hdfs-namenode.json',
'hdfs-datanode.json',
'hdfs-secondarynamenode.json',
'hdfs-gateway.json',
'hdfs-journalnode.json',
'yarn-service.json',
'yarn-resourcemanager.json',
'yarn-nodemanager.json',
'yarn-jobhistory.json',
'yarn-gateway.json',
'oozie-service.json',
'oozie-oozie_server.json',
'hive-service.json',
'hive-hivemetastore.json',
'hive-hiveserver2.json',
'hive-webhcat.json',
'hue-service.json',
'hue-hue_server.json',
'spark-service.json',
'spark-spark_yarn_history_server.json',
'zookeeper-service.json',
'zookeeper-server.json',
'hbase-service.json',
'hbase-master.json',
'hbase-regionserver.json',
'flume-service.json',
'flume-agent.json',
'sentry-service.json',
'sentry-sentry_server.json',
'solr-service.json',
'solr-solr_server.json',
'sqoop-service.json',
'sqoop-sqoop_server.json',
'ks_indexer-service.json',
'ks_indexer-hbase_indexer.json',
'impala-service.json',
'impala-catalogserver.json',
'impala-impalad.json',
'impala-statestore.json',
'kms-service.json',
'kms-kms.json',
'kafka-kafka_broker.json',
'kafka-kafka_mirror_maker.json',
'kafka-service.json'
]
class ConfigHelperTestCase(base.SaharaTestCase):
def test_get_ng_plugin_configs(self):
actual_configs = c_h._get_ng_plugin_configs()
expected_configs = []
for json_file in json_files:
expected_configs += json.loads(
f.get_file_text(path_to_config + json_file))
# compare names
expected_names = set(i['name'] for i in expected_configs)
actual_names = set(i.to_dict()['name'] for i in actual_configs)
self.assertEqual(expected_names, actual_names)
def test_get_cdh5_repo_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.CDH5_REPO_URL.default_value,
c_h.get_cdh5_repo_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.CDH5_REPO_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_cdh5_repo_url(cluster))
def test_get_cdh5_key_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.CDH5_REPO_KEY_URL.default_value,
c_h.get_cdh5_key_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.CDH5_REPO_KEY_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_cdh5_key_url(cluster))
def test_get_cm5_repo_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.CM5_REPO_URL.default_value,
c_h.get_cm5_repo_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.CM5_REPO_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_cm5_repo_url(cluster))
def test_get_cm5_key_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.CM5_REPO_KEY_URL.default_value,
c_h.get_cm5_key_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.CM5_REPO_KEY_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_cm5_key_url(cluster))
def test_is_swift_enabled(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertTrue(c_h.is_swift_enabled(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.ENABLE_SWIFT.name: False}})
self.assertFalse(c_h.is_swift_enabled(cluster))
def test_get_swift_lib_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.DEFAULT_SWIFT_LIB_URL,
c_h.get_swift_lib_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.SWIFT_LIB_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_swift_lib_url(cluster))
def test_is_hbase_common_lib_enabled(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertTrue(c_h.is_hbase_common_lib_enabled(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general':
{c_h.ENABLE_HBASE_COMMON_LIB.name: False}})
self.assertFalse(c_h.is_hbase_common_lib_enabled(cluster))
def test_get_extjs_lib_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.DEFAULT_EXTJS_LIB_URL,
c_h.get_extjs_lib_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.EXTJS_LIB_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_extjs_lib_url(cluster))
def test_get_kms_key_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.KMS_REPO_KEY_URL.default_value,
c_h.get_kms_key_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.KMS_REPO_KEY_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_kms_key_url(cluster))

View File

@ -1,222 +0,0 @@
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.plugins.cdh.v5_5_0 import deploy
from sahara.tests.unit import base
class DeployCDHV550(base.SaharaTestCase):
def setUp(self):
super(DeployCDHV550, self).setUp()
self.master = mock.MagicMock()
self.master.node_group.node_processes = [
"HDFS_NAMENODE", "YARN_RESOURCEMANAGER", "CLOUDERA_MANAGER",
"SENTRY_SERVER", "YARN_NODEMANAGER", "ZOOKEEPER_SERVER",
"OOZIE_SERVER", "YARN_JOBHISTORY", "HDFS_SECONDARYNAMENODE",
"HIVE_METASTORE", "HIVE_SERVER2", "SPARK_YARN_HISTORY_SERVER",
"HBASE_MASTER", "HBASE_REGIONSERVER", "HUE_SERVER", "KMS",
"FLUME_AGENT", "SOLR_SERVER", "SQOOP_SERVER", "IMPALA_STATESTORE",
"IMPALA_CATALOGSERVER", "IMPALAD", "KEY_VALUE_STORE_INDEXER",
]
self.worker = mock.MagicMock()
self.worker.node_group.node_processes = [
"HDFS_DATANODE", "HDFS_JOURNALNODE", "JOURNALNODE",
"YARN_NODEMANAGER", "YARN_STANDBYRM",
]
self.instances = [self.master, self.worker]
self.cluster = mock.MagicMock()
self.is_cdh_exists = mock.patch(
"sahara.plugins.cdh.commands.is_pre_installed_cdh",
return_value=False)
self.is_cdh_exists.start()
self._create_facade = mock.patch(
"sahara.db.sqlalchemy.api._create_facade_lazily")
self._create_facade.start()
def tearDown(self):
self.is_cdh_exists.stop()
self._create_facade.stop()
super(DeployCDHV550, self).tearDown()
@mock.patch("sahara.plugins.utils.get_instances")
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy.CU")
def test_configure_cluster(self, mock_cu, mock_get_instances):
mock_get_instances.return_value = self.instances
deploy.configure_cluster(self.cluster)
mock_cu.pu.configure_os.assert_called_once_with(self.instances)
mock_cu.pu.install_packages.assert_called_once_with(self.instances,
deploy.PACKAGES)
mock_cu.pu.start_cloudera_agents.assert_called_once_with(
self.instances)
mock_cu.pu.start_cloudera_manager.assert_called_once_with(self.cluster)
mock_cu.update_cloudera_password.assert_called_once_with(self.cluster)
mock_cu.await_agents.assert_called_once_with(self.cluster,
self.instances)
mock_cu.create_mgmt_service.assert_called_once_with(self.cluster)
mock_cu.create_services.assert_called_once_with(self.cluster)
mock_cu.configure_services.assert_called_once_with(self.cluster)
mock_cu.configure_instances.assert_called_once_with(self.instances,
self.cluster)
mock_cu.deploy_configs.assert_called_once_with(self.cluster)
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy.CU")
def test__start_roles(self, mock_cu):
deploy._start_roles(self.cluster, self.instances)
mock_cu.get_service_by_role.assert_any_call('DATANODE',
instance=self.worker)
mock_cu.get_service_by_role.assert_any_call('NODEMANAGER',
instance=self.master)
mock_cu.get_service_by_role.assert_any_call('NODEMANAGER',
instance=self.worker)
self.assertEqual(mock_cu.start_roles.call_count, 3)
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy._start_roles")
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy.CU")
def test_scale_cluster(self, mock_cu, mock__start_roles):
deploy.scale_cluster(self.cluster, None)
self.assertEqual(mock_cu.call_count, 0)
self.assertEqual(mock__start_roles.call_count, 0)
deploy.scale_cluster(self.cluster, self.instances)
mock_cu.pu.configure_os.assert_called_once_with(self.instances)
mock_cu.pu.install_packages.assert_called_once_with(self.instances,
deploy.PACKAGES)
mock_cu.pu.start_cloudera_agents.assert_called_once_with(
self.instances)
mock_cu.await_agents.assert_called_once_with(self.cluster,
self.instances)
mock_cu.configure_instances.assert_called_once_with(self.instances,
self.cluster)
mock_cu.update_configs.assert_called_once_with(self.instances)
mock_cu.pu.configure_swift.assert_called_once_with(self.cluster,
self.instances)
mock_cu.refresh_datanodes.assert_called_once_with(self.cluster)
mock__start_roles.assert_called_once_with(self.cluster,
self.instances)
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy.CU")
def test_decommission_cluster(self, mock_cu):
deploy.decommission_cluster(self.cluster, self.instances)
dns = []
dns_2 = []
nms = []
nms_2 = []
for i in self.instances:
if 'HDFS_DATANODE' in i.node_group.node_processes:
dns.append(mock_cu.pu.get_role_name(i, 'DATANODE'))
dns_2.append(mock_cu.pu.get_role_name(i, 'HDFS_GATEWAY'))
if 'YARN_NODEMANAGER' in i.node_group.node_processes:
nms.append(mock_cu.pu.get_role_name(i, 'NODEMANAGER'))
nms_2.append(mock_cu.pu.get_role_name(i, 'YARN_GATEWAY'))
mock_cu.decommission_nodes.assert_any_call(
self.cluster, 'DATANODE', dns, dns_2)
mock_cu.decommission_nodes.assert_any_call(
self.cluster, 'NODEMANAGER', nms, nms_2)
mock_cu.delete_instances.assert_called_once_with(self.cluster,
self.instances)
mock_cu.refresh_datanodes.assert_called_once_with(self.cluster)
mock_cu.refresh_yarn_nodes.assert_called_once_with(self.cluster)
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy.CU")
def test__prepare_cluster(self, mock_cu):
deploy._prepare_cluster(self.cluster)
mock_cu.pu.install_extjs.assert_called_once_with(self.cluster)
mock_cu.pu.configure_hive.assert_called_once_with(self.cluster)
mock_cu.pu.configure_sentry.assert_called_once_with(self.cluster)
@mock.patch("sahara.service.edp.hdfs_helper.create_hbase_common_lib")
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy.CU")
def test__finish_cluster_starting(self, mock_cu, mock_create_hbase):
deploy._finish_cluster_starting(self.cluster)
mock_cu.pu.put_hive_hdfs_xml.assert_called_once_with(self.cluster)
self.assertTrue(mock_create_hbase.called)
mock_cu.start_service.assert_called_once_with(
mock_cu.get_service_by_role('AGENT', self.cluster))
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy._finish_cluster_starting")
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy._prepare_cluster")
@mock.patch("sahara.plugins.cdh.v5_5_0.deploy.CU")
def test_start_cluster(self, mock_cu, mock_prepare, mock_finish):
jns_count = 0
for i in self.instances:
if "HDFS_JOURNALNODE" in i.node_group.node_processes:
jns_count += 1
mock_cu.pu.get_jns.return_value.__len__.return_value = jns_count
deploy.start_cluster(self.cluster)
mock_prepare.assert_called_once_with(self.cluster)
mock_cu.first_run.assert_called_once_with(self.cluster)
mock_cu.pu.configure_swift.assert_called_once_with(self.cluster)
if jns_count > 0:
mock_cu.enable_namenode_ha.assert_called_once_with(self.cluster)
mock_cu.update_role_config.assert_any_call(
mock_cu.pu.get_secondarynamenode(self.cluster),
'HDFS_NAMENODE'
)
mock_cu.enable_resourcemanager_ha.assert_called_once_with(self.cluster)
mock_cu.update_role_config.assert_any_call(
mock_cu.pu.get_stdb_rm(self.cluster), 'YARN_STANDBYRM')
mock_finish.assert_called_once_with(self.cluster)
def test_get_open_ports(self):
master_ports = [
9000,
7180, 7182, 7183, 7432, 7184, 8084, 8086, 10101,
9997, 9996, 8087, 9998, 9999, 8085, 9995, 9994,
8020, 8022, 50070, 50470,
50090, 50495,
8030, 8031, 8032, 8033, 8088,
8040, 8041, 8042,
10020, 19888,
9083,
10000,
8888,
11000, 11001,
18088,
2181, 3181, 4181, 9010,
60000,
60020,
41414,
8038,
8983, 8984,
8005, 12000,
25020, 26000,
25010, 24000,
21050, 21000, 23000, 25000, 28000, 22000,
16000, 16001
]
deploy.get_open_ports(self.master.node_group)
self.assertItemsEqual(master_ports,
deploy.get_open_ports(self.master.node_group))
worker_ports = [
9000,
50010, 1004, 50075, 1006, 50020,
8480, 8481, 8485,
8040, 8041, 8042,
8030, 8031, 8032, 8033, 8088
]
self.assertItemsEqual(worker_ports,
deploy.get_open_ports(self.worker.node_group))

View File

@ -1,200 +0,0 @@
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara import exceptions as ex
from sahara.plugins import base as pb
from sahara.plugins.cdh.v5_5_0 import edp_engine
from sahara.plugins import exceptions as pl_ex
from sahara.tests.unit import base as sahara_base
from sahara.tests.unit.plugins.cdh import utils as c_u
from sahara.utils import edp
def get_cluster(version='5.5.0'):
cluster = c_u.get_fake_cluster(plugin_name='CDH', hadoop_version=version)
return cluster
class EdpEngineTestV550(sahara_base.SaharaTestCase):
def setUp(self):
super(EdpEngineTestV550, self).setUp()
pb.setup_plugins()
def test_get_hdfs_user(self):
eng = edp_engine.EdpOozieEngine(get_cluster())
self.assertEqual('hdfs', eng.get_hdfs_user())
@mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop2')
def test_create_hdfs_dir(self, create_dir_hadoop2):
eng = edp_engine.EdpOozieEngine(get_cluster())
remote = mock.Mock()
dir_name = mock.Mock()
eng.create_hdfs_dir(remote, dir_name)
create_dir_hadoop2.assert_called_once_with(remote,
dir_name,
eng.get_hdfs_user())
def test_get_oozie_server_uri(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
uri = eng.get_oozie_server_uri(cluster)
self.assertEqual("http://1.2.3.5:11000/oozie", uri)
def test_get_name_node_uri(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
uri = eng.get_name_node_uri(cluster)
self.assertEqual("hdfs://master_inst.novalocal:8020", uri)
# has HDFS_JOURNALNODE
cluster = get_cluster()
jns_node_group = mock.MagicMock()
jns_node_group.node_processes = ['HDFS_JOURNALNODE']
jns_node_group.instances = [mock.Mock()]
list.append(cluster.node_groups, jns_node_group)
uri = eng.get_name_node_uri(cluster)
self.assertEqual("hdfs://nameservice01", uri)
def test_get_resource_manager_uri(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
uri = eng.get_resource_manager_uri(cluster)
self.assertEqual("master_inst.novalocal:8032", uri)
def test_get_oozie_server(self):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
actual = eng.get_oozie_server(cluster)
expect = cluster.node_groups[1].instances[0]
self.assertEqual(expect, actual)
@mock.patch('sahara.service.edp.oozie.engine.'
'OozieJobEngine.validate_job_execution')
def test_validate_job_execution(self, c):
cluster = get_cluster()
eng = edp_engine.EdpOozieEngine(cluster)
eng.validate_job_execution(cluster, mock.Mock(), mock.Mock())
# more than one oozie server
dict.__setitem__(cluster.node_groups[1], 'count', 2)
self.assertRaises(pl_ex.InvalidComponentCountException,
eng.validate_job_execution, cluster,
mock.Mock(), mock.Mock())
@mock.patch(
'sahara.plugins.cdh.confighints_helper.get_possible_hive_config_from',
return_value={})
def test_get_possible_job_config_hive(self,
get_possible_hive_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_HIVE)
get_possible_hive_config_from.assert_called_once_with(
'plugins/cdh/v5_5_0/resources/hive-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.cdh.v5_5_0.edp_engine.EdpOozieEngine')
def test_get_possible_job_config_java(self, BaseCDHEdpOozieEngine):
expected_config = {'job_config': {}}
BaseCDHEdpOozieEngine.get_possible_job_config.return_value = (
expected_config)
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_JAVA)
BaseCDHEdpOozieEngine.get_possible_job_config.assert_called_once_with(
edp.JOB_TYPE_JAVA)
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.cdh.confighints_helper.'
'get_possible_mapreduce_config_from',
return_value={})
def test_get_possible_job_config_mapreduce(
self, get_possible_mapreduce_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_MAPREDUCE)
get_possible_mapreduce_config_from.assert_called_once_with(
'plugins/cdh/v5_5_0/resources/mapred-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.cdh.confighints_helper.'
'get_possible_mapreduce_config_from',
return_value={})
def test_get_possible_job_config_mapreduce_streaming(
self, get_possible_mapreduce_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_MAPREDUCE_STREAMING)
get_possible_mapreduce_config_from.assert_called_once_with(
'plugins/cdh/v5_5_0/resources/mapred-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.cdh.confighints_helper.get_possible_pig_config_from',
return_value={})
def test_get_possible_job_config_pig(self,
get_possible_pig_config_from):
expected_config = {'job_config': {}}
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_PIG)
get_possible_pig_config_from.assert_called_once_with(
'plugins/cdh/v5_5_0/resources/mapred-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.cdh.v5_5_0.edp_engine.EdpOozieEngine')
def test_get_possible_job_config_shell(self, BaseCDHEdpOozieEngine):
expected_config = {'job_config': {}}
BaseCDHEdpOozieEngine.get_possible_job_config.return_value = (
expected_config)
actual_config = edp_engine.EdpOozieEngine.get_possible_job_config(
edp.JOB_TYPE_SHELL)
BaseCDHEdpOozieEngine.get_possible_job_config.assert_called_once_with(
edp.JOB_TYPE_SHELL)
self.assertEqual(expected_config, actual_config)
@mock.patch('sahara.plugins.utils.get_config_value_or_default')
@mock.patch('sahara.plugins.utils.get_instance')
@mock.patch('sahara.service.edp.spark.engine.'
'SparkJobEngine.validate_job_execution')
def test_spark_engine_validate_job_execution(self,
validate_job_execution,
get_instance,
get_config_value_or_default):
# version unsupported
cluster = get_cluster(version='5.4.0')
eng = edp_engine.EdpSparkEngine(cluster)
self.assertRaises(ex.InvalidDataException,
eng.validate_job_execution, cluster,
mock.Mock(), mock.Mock())
# none yarn history server
cluster = get_cluster()
eng = edp_engine.EdpSparkEngine(cluster)
self.assertRaises(pl_ex.InvalidComponentCountException,
eng.validate_job_execution, cluster,
mock.Mock(), mock.Mock())
# valid
cluster = get_cluster()
yarn_history_node_group = mock.Mock()
yarn_history_node_group.node_processes = ['SPARK_YARN_HISTORY_SERVER']
yarn_history_node_group.count = 1
list.append(cluster.node_groups, yarn_history_node_group)
eng = edp_engine.EdpSparkEngine(cluster)
eng.validate_job_execution(cluster, mock.Mock(), mock.Mock())

View File

@ -1,25 +0,0 @@
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import plugin_utils as pu
from sahara.tests.unit.plugins.cdh import base_plugin_utils_test
class TestPluginUtilsV550(base_plugin_utils_test.TestPluginUtilsHigherThanV5):
def setUp(self):
super(TestPluginUtilsV550, self).setUp()
self.plug_utils = pu.PluginUtilsV550()
self.version = "v5_5_0"

View File

@ -1,25 +0,0 @@
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import plugin_utils as pu
from sahara.tests.unit.plugins.cdh import base_plugin_utils_test
class TestPluginUtilsV550(base_plugin_utils_test.TestPluginUtilsHigherThanV5):
def setUp(self):
super(TestPluginUtilsV550, self).setUp()
self.plug_utils = pu.PluginUtilsV550()
self.version = "v5_5_0"

View File

@ -1,69 +0,0 @@
# Copyright (c) 2015 ISPRAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from sahara import conductor as cond
from sahara import context
from sahara.plugins import base as pb
from sahara.plugins import exceptions as ex
from sahara.service.edp.spark import engine
from sahara.tests.unit import base
from sahara.utils import edp
conductor = cond.API
class SparkPluginTest(base.SaharaWithDbTestCase):
def setUp(self):
super(SparkPluginTest, self).setUp()
self.override_config("plugins", ["cdh"])
pb.setup_plugins()
def test_plugin_edp_engine_no_spark(self):
cluster_dict = {
'name': 'cluster',
'plugin_name': 'cdh',
'hadoop_version': '5.5.0',
'default_image_id': 'image'}
job = mock.Mock()
job.type = edp.JOB_TYPE_SPARK
cluster = conductor.cluster_create(context.ctx(), cluster_dict)
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
edp_engine = plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK)
with testtools.ExpectedException(
ex.InvalidComponentCountException,
value_re="Hadoop cluster should contain 1 "
"SPARK_YARN_HISTORY_SERVER component\(s\). Actual "
"SPARK_YARN_HISTORY_SERVER count is 0\nError ID: .*"):
edp_engine.validate_job_execution(cluster, job, mock.Mock())
def test_plugin_edp_engine(self):
cluster_dict = {
'name': 'cluster',
'plugin_name': 'cdh',
'hadoop_version': '5.5.0',
'default_image_id': 'image'}
cluster = conductor.cluster_create(context.ctx(), cluster_dict)
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
self.assertIsInstance(
plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK),
engine.SparkJobEngine)

View File

@ -1,24 +0,0 @@
# Copyright (c) 2015 Intel Corpration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import validation
from sahara.tests.unit.plugins.cdh import base_validation_tests as bvt
class ValidationTestCase(bvt.BaseValidationTestCase):
def setUp(self):
super(ValidationTestCase, self).setUp()
self.module = validation.ValidatorV550

View File

@ -1,140 +0,0 @@
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from sahara.plugins.cdh.v5_5_0 import edp_engine
from sahara.plugins.cdh.v5_5_0 import versionhandler
from sahara.tests.unit import base
class VersionHandlerTestCase(base.SaharaTestCase):
plugin_path = "sahara.plugins.cdh.v5_5_0."
cloudera_utils_path = plugin_path + "cloudera_utils.ClouderaUtilsV550."
plugin_utils_path = plugin_path + "plugin_utils.PluginUtilsV550."
def setUp(self):
super(VersionHandlerTestCase, self).setUp()
self.vh = versionhandler.VersionHandler()
def test_get_node_processes(self):
processes = self.vh.get_node_processes()
for k, v in six.iteritems(processes):
for p in v:
self.assertIsInstance(p, str)
@mock.patch("sahara.conductor.API.cluster_update")
@mock.patch("sahara.context.ctx")
@mock.patch(plugin_path + "deploy.configure_cluster")
@mock.patch(cloudera_utils_path + "get_cloudera_manager_info",
return_value={"fake_cm_info": "fake"})
def test_config_cluster(self, get_cm_info, configure_cluster,
ctx, cluster_update):
cluster = mock.Mock()
self.vh.configure_cluster(cluster)
configure_cluster.assert_called_once_with(cluster)
cluster_update.assert_called_once_with(
ctx(), cluster,
{'info': {"fake_cm_info": "fake"}})
@mock.patch(plugin_path + "deploy.start_cluster")
def test_start_cluster(self, start_cluster):
cluster = mock.Mock()
self.vh._set_cluster_info = mock.Mock()
self.vh.start_cluster(cluster)
start_cluster.assert_called_once_with(cluster)
self.vh._set_cluster_info.assert_called_once_with(cluster)
@mock.patch(plugin_path + "deploy.decommission_cluster")
def test_decommission_nodes(self, decommission_cluster):
cluster = mock.Mock()
instances = mock.Mock()
self.vh.decommission_nodes(cluster, instances)
decommission_cluster.assert_called_once_with(cluster,
instances)
@mock.patch(plugin_path + "deploy.scale_cluster")
def test_scale_cluster(self, scale_cluster):
cluster = mock.Mock()
instances = mock.Mock()
self.vh.scale_cluster(cluster, instances)
scale_cluster.assert_called_once_with(cluster, instances)
@mock.patch("sahara.conductor.API.cluster_update")
@mock.patch("sahara.context.ctx")
@mock.patch(cloudera_utils_path + "get_cloudera_manager_info",
return_value={})
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}
cluster_update.assert_called_once_with(ctx(), cluster, info)
@mock.patch("sahara.plugins.utils.get_instance")
@mock.patch("sahara.plugins.utils.get_config_value_or_default")
@mock.patch("sahara.service.edp.job_utils.get_plugin")
def test_get_edp_engine(self, get_plugin, get_config_value_or_default,
get_instance):
cluster = mock.Mock()
job_type = 'Java'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsInstance(ret, edp_engine.EdpOozieEngine)
job_type = 'Spark'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsInstance(ret, edp_engine.EdpSparkEngine)
job_type = 'unsupported'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsNone(ret)
def test_get_edp_job_types(self):
ret = self.vh.get_edp_job_types()
expect = edp_engine.EdpOozieEngine.get_supported_job_types() + \
edp_engine.EdpSparkEngine.get_supported_job_types()
self.assertEqual(expect, ret)
@mock.patch(plugin_path +
"edp_engine.EdpOozieEngine.get_possible_job_config",
return_value={'job_config': {}})
def test_edp_config_hints(self, get_possible_job_config):
job_type = mock.Mock()
ret = self.vh.get_edp_config_hints(job_type)
get_possible_job_config.assert_called_once_with(job_type)
self.assertEqual(ret, {'job_config': {}})
@mock.patch(plugin_path + "deploy.get_open_ports", return_value=[1234])
def test_get_open_ports(self, get_open_ports):
node_group = mock.Mock()
ret = self.vh.get_open_ports(node_group)
get_open_ports.assert_called_once_with(node_group)
self.assertEqual(ret, [1234])
@mock.patch(plugin_utils_path + "recommend_configs")
def test_recommend_configs(self, recommend_configs):
cluster = mock.Mock()
scaling = mock.Mock()
self.vh.get_plugin_configs = mock.Mock()
self.vh.recommend_configs(cluster, scaling)
recommend_configs.assert_called_once_with(cluster,
self.vh.get_plugin_configs(),
scaling)

View File

@ -18,7 +18,6 @@ import testtools
from sahara import conductor as cond
from sahara import context
from sahara import exceptions as ex
from sahara.plugins import base as pb
from sahara.plugins import exceptions as pe
from sahara.plugins.spark import plugin as pl
@ -45,29 +44,6 @@ class SparkPluginTest(base.SaharaWithDbTestCase):
'default_image_id': 'image'}
return cluster_dict
def test_plugin09_edp_engine_validation(self):
cluster_dict = self._init_cluster_dict('0.9.1')
job = mock.Mock()
job.type = edp.JOB_TYPE_SPARK
cluster = conductor.cluster_create(context.ctx(), cluster_dict)
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
edp_engine = plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK)
with testtools.ExpectedException(
ex.InvalidDataException,
value_re="Spark 1.3.1 or higher required to run "
"Spark jobs\nError ID: .*"):
edp_engine.validate_job_execution(cluster, job, mock.Mock())
def test_plugin10_edp_engine(self):
self._test_engine('1.3.1', edp.JOB_TYPE_SPARK,
engine.SparkJobEngine)
def test_plugin10_shell_engine(self):
self._test_engine('1.3.1', edp.JOB_TYPE_SHELL,
engine.SparkShellJobEngine)
def test_plugin11_edp_engine(self):
self._test_engine('1.6.0', edp.JOB_TYPE_SPARK,
engine.SparkJobEngine)
@ -99,19 +75,6 @@ class SparkPluginTest(base.SaharaWithDbTestCase):
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
self.assertIsInstance(plugin.get_edp_engine(cluster, job_type), eng)
def test_plugin13_edp_engine(self):
cluster_dict = {
'name': 'cluster',
'plugin_name': 'spark',
'hadoop_version': '1.3.1',
'default_image_id': 'image'}
cluster = conductor.cluster_create(context.ctx(), cluster_dict)
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
self.assertIsInstance(
plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK),
engine.SparkJobEngine)
def test_cleanup_configs(self):
remote = mock.Mock()
instance = mock.Mock()
@ -122,7 +85,7 @@ class SparkPluginTest(base.SaharaWithDbTestCase):
'cron': 'cron_text'}}
instance.node_group.node_processes = ["master"]
instance.node_group.id = id
cluster_dict = self._init_cluster_dict('1.3.1')
cluster_dict = self._init_cluster_dict('2.2')
cluster = conductor.cluster_create(context.ctx(), cluster_dict)
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
@ -188,7 +151,7 @@ class SparkValidationTest(base.SaharaTestCase):
lst.append(self.ng[i])
return tu.create_cluster("cluster1", "tenant1", "spark",
"1.60", lst, **kwargs)
"2.2", lst, **kwargs)
def _validate_case(self, *args):
cl = self._create_cluster(*args)
@ -203,8 +166,6 @@ class SparkProviderTest(base.SaharaTestCase):
provider = pl.SparkProvider()
res = provider.get_edp_job_types()
self.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],
res['1.3.1'])
self.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],
res['1.6.0'])
self.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],
@ -215,18 +176,10 @@ class SparkProviderTest(base.SaharaTestCase):
def test_edp_config_hints(self):
provider = pl.SparkProvider()
res = provider.get_edp_config_hints(edp.JOB_TYPE_SHELL, "1.3.1")
self.assertEqual({'configs': {}, 'args': [], 'params': {}},
res['job_config'])
res = provider.get_edp_config_hints(edp.JOB_TYPE_SHELL, "1.6.0")
self.assertEqual({'configs': {}, 'args': [], 'params': {}},
res['job_config'])
res = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, "1.3.1")
self.assertEqual({'args': [], 'configs': []},
res['job_config'])
res = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, "1.6.0")
self.assertEqual({'args': [], 'configs': []},
res['job_config'])

View File

@ -21,17 +21,11 @@ from sahara.plugins.storm import plugin as s_plugin
class TestStormConfigHelper(testcase.TestCase):
def test_generate_storm_config(self):
STORM_092 = '0.9.2'
STORM_101 = '1.0.1'
STORM_110 = '1.1.0'
tested_versions = []
master_hostname = "s-master"
zk_hostnames = ["s-zoo"]
configs_092 = s_config.generate_storm_config(
master_hostname, zk_hostnames, STORM_092)
self.assertIn('nimbus.host', configs_092.keys())
self.assertNotIn('nimbus.seeds', configs_092.keys())
tested_versions.append(STORM_092)
configs_101 = s_config.generate_storm_config(
master_hostname, zk_hostnames, STORM_101)
self.assertNotIn('nimbus.host', configs_101.keys())

View File

@ -69,14 +69,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):
'node_processes': ['zookeeper']}
]
cluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')
cluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')
cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')
cluster_data_092['node_groups'] = data
cluster_data_101['node_groups'] = data
cluster_data_110['node_groups'] = data
clusters = [cluster_data_092, cluster_data_101, cluster_data_110]
clusters = [cluster_data_101, cluster_data_110]
for cluster_data in clusters:
cluster = conductor.cluster_create(context.ctx(), cluster_data)
@ -90,7 +88,7 @@ class StormPluginTest(base.SaharaWithDbTestCase):
@mock.patch("sahara.plugins.storm.plugin.utils")
def test_validate(self, mock_utils):
cluster_data = self._get_cluster('cluster', '0.9.2')
cluster_data = self._get_cluster('cluster', '1.1.0')
cluster = conductor.cluster_create(context.ctx(), cluster_data)
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
@ -162,14 +160,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):
'node_processes': ['supervisor']}
]
cluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')
cluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')
cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')
cluster_data_092['node_groups'] = data
cluster_data_101['node_groups'] = data
cluster_data_110['node_groups'] = data
clusters = [cluster_data_092, cluster_data_101, cluster_data_110]
clusters = [cluster_data_101, cluster_data_110]
for cluster_data in clusters:
cluster = conductor.cluster_create(context.ctx(), cluster_data)
@ -196,14 +192,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):
'node_processes': ['zookeeper']}
]
cluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')
cluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')
cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')
cluster_data_092['node_groups'] = data
cluster_data_101['node_groups'] = data
cluster_data_110['node_groups'] = data
clusters = [cluster_data_092, cluster_data_101, cluster_data_110]
clusters = [cluster_data_101, cluster_data_110]
for cluster_data in clusters:
cluster = conductor.cluster_create(context.ctx(), cluster_data)
@ -234,14 +228,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):
'node_processes': ['nimbus']}
]
cluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')
cluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')
cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')
cluster_data_092['node_groups'] = data
cluster_data_101['node_groups'] = data
cluster_data_110['node_groups'] = data
clusters = [cluster_data_092, cluster_data_101, cluster_data_110]
clusters = [cluster_data_101, cluster_data_110]
for cluster_data in clusters:
cluster = conductor.cluster_create(context.ctx(), cluster_data)
@ -269,14 +261,6 @@ class StormPluginTest(base.SaharaWithDbTestCase):
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
self.assertIsInstance(plugin.get_edp_engine(cluster, job_type), eng)
def test_plugin092_edp_storm_engine(self):
self._test_engine('0.9.2', edp.JOB_TYPE_STORM,
engine.StormJobEngine)
def test_plugin092_edp_storm_pyleus_engine(self):
self._test_engine('0.9.2', edp.JOB_TYPE_PYLEUS,
engine.StormJobEngine)
def test_plugin101_edp_storm_engine(self):
self._test_engine('1.0.1', edp.JOB_TYPE_STORM,
engine.StormJobEngine)

View File

@ -188,7 +188,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
ng = tu.make_ng_dict('master', 42, [], 1,
instances=[tu.make_inst_dict('id', 'name')])
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
"spark", "1.3.1", [ng])
"spark", "2.2", [ng])
# Everything is okay, spark cluster supports EDP by default
# because cluster requires a master and slaves >= 1
@ -252,7 +252,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
ng = tu.make_ng_dict('master', 42, ['namenode'], 1,
instances=[tu.make_inst_dict('id', 'name')])
cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
"spark", "1.3.1", [ng])
"spark", "2.2", [ng])
self._assert_create_object_validation(
data={

Some files were not shown because too many files have changed in this diff Show More