summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTelles Nobrega <tenobreg@redhat.com>2018-06-07 16:41:03 -0300
committerTelles Nobrega <tenobreg@redhat.com>2018-06-13 11:06:14 -0300
commit28eb4ce1b725092e99a08cea9a2e7c9e755e5172 (patch)
tree4d75314c3bd73a3213fea298b405acf3fbb38cb4
parent4f074856b6edb59d0c6d6988e0eaf683b6a68b16 (diff)
Adding CDH 5.13
We are adding a new version of CDH to the list of supported plugins. Change-Id: Ia55c6729dc6c4640b83e1d2d4dba88d8bba29e36 Story: #2002183 Task: #20056
Notes
Notes (review): Code-Review+2: Luigi Toscano <ltoscano@redhat.com> Code-Review+2: Jeremy Freudberg <jeremyfreudberg@gmail.com> Workflow+1: Jeremy Freudberg <jeremyfreudberg@gmail.com> Verified+2: Zuul Submitted-by: Zuul Submitted-at: Wed, 13 Jun 2018 17:35:54 +0000 Reviewed-on: https://review.openstack.org/573802 Project: openstack/sahara Branch: refs/heads/master
-rw-r--r--releasenotes/notes/cdh-513-bdce0d5d269d8f20.yaml3
-rw-r--r--sahara/plugins/cdh/config_helper.py3
-rw-r--r--sahara/plugins/cdh/plugin.py1
-rw-r--r--sahara/plugins/cdh/plugin_utils.py18
-rw-r--r--sahara/plugins/cdh/v5_13_0/__init__.py0
-rw-r--r--sahara/plugins/cdh/v5_13_0/cloudera_utils.py28
-rw-r--r--sahara/plugins/cdh/v5_13_0/config_helper.py89
-rw-r--r--sahara/plugins/cdh/v5_13_0/deploy.py168
-rw-r--r--sahara/plugins/cdh/v5_13_0/edp_engine.py47
-rw-r--r--sahara/plugins/cdh/v5_13_0/images.py43
-rw-r--r--sahara/plugins/cdh/v5_13_0/plugin_utils.py23
-rwxr-xr-xsahara/plugins/cdh/v5_13_0/resources/cdh_config.py68
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/flume-agent.json440
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/flume-service.json164
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hbase-gateway.json122
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hbase-hbaserestserver.json452
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hbase-hbasethriftserver.json458
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hbase-master.json530
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hbase-regionserver.json884
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hbase-service.json692
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-balancer.json128
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-datanode.json656
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-failovercontroller.json344
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-gateway.json116
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-httpfs.json464
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-journalnode.json458
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-namenode.json884
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-nfsgateway.json410
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-secondarynamenode.json458
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hdfs-service.json1352
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-gateway.json98
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-hivemetastore.json434
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-hiveserver2.json866
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-metastore-sentry-safety.xml15
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-server2-sentry-safety.xml12
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-service.json656
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-site.xml61
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hive-webhcat.json356
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hue-hue_load_balancer.json260
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hue-hue_server.json404
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hue-kt_renewer.json212
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/hue-service.json692
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/centos/disable_firewall20
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/centos/selinux_permissive13
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/centos/turn_off_services33
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/centos/wget_repo27
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/common/add_jar22
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/common/install_cloudera17
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/common/install_extjs22
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/common/unlimited_security_artifacts11
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/image.yaml128
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/ubuntu/turn_off_services33
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/images/ubuntu/wget_repo31
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/impala-catalogserver.json542
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/impala-impalad.json746
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/impala-llama.json518
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/impala-service.json572
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/impala-statestore.json404
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/kafka-gateway.json62
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/kafka-kafka_broker.json584
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/kafka-kafka_mirror_maker.json644
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/kafka-service.json404
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/kms-kms.json656
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/kms-service.json86
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/ks_indexer-hbase_indexer.json374
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/ks_indexer-service.json188
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/mapred-site.xml157
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/oozie-oozie_server.json674
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/oozie-service.json260
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/priority-one-confs.json45
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sentry-gateway.json62
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sentry-impala-client-safety.xml16
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sentry-sentry_server.json320
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sentry-service.json230
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/solr-gateway.json50
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/solr-service.json350
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/solr-solr_server.json470
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/spark-gateway.json242
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/spark-service.json170
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/spark-spark_yarn_history_server.json422
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sqoop-service.json116
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sqoop-sqoop_server.json410
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sqoop_client-gateway.json92
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/sqoop_client-service.json80
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/yarn-gateway.json530
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/yarn-jobhistory.json500
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/yarn-nodemanager.json794
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/yarn-resourcemanager.json686
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/yarn-service.json554
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/zookeeper-server.json608
-rw-r--r--sahara/plugins/cdh/v5_13_0/resources/zookeeper-service.json236
-rw-r--r--sahara/plugins/cdh/v5_13_0/validation.py21
-rw-r--r--sahara/plugins/cdh/v5_13_0/versionhandler.py37
-rw-r--r--sahara/tests/unit/plugins/cdh/test_versionfactory.py2
-rwxr-xr-xtools/gate/build-images1
95 files changed, 27832 insertions, 9 deletions
diff --git a/releasenotes/notes/cdh-513-bdce0d5d269d8f20.yaml b/releasenotes/notes/cdh-513-bdce0d5d269d8f20.yaml
new file mode 100644
index 0000000..112248e
--- /dev/null
+++ b/releasenotes/notes/cdh-513-bdce0d5d269d8f20.yaml
@@ -0,0 +1,3 @@
1---
2features:
3 - Adding support to CDH 5.13.0 in CDH plugin.
diff --git a/sahara/plugins/cdh/config_helper.py b/sahara/plugins/cdh/config_helper.py
index 3d19ed7..6cc7a6d 100644
--- a/sahara/plugins/cdh/config_helper.py
+++ b/sahara/plugins/cdh/config_helper.py
@@ -291,6 +291,9 @@ class ConfigHelper(object):
291 return self._get_config_value(cluster, 291 return self._get_config_value(cluster,
292 self.ENABLE_HBASE_COMMON_LIB) 292 self.ENABLE_HBASE_COMMON_LIB)
293 293
294 def is_keytrustee_available(self):
295 return True
296
294 def get_swift_lib_url(self, cluster): 297 def get_swift_lib_url(self, cluster):
295 return self._get_config_value(cluster, self.SWIFT_LIB_URL) 298 return self._get_config_value(cluster, self.SWIFT_LIB_URL)
296 299
diff --git a/sahara/plugins/cdh/plugin.py b/sahara/plugins/cdh/plugin.py
index e9d14a4..4f753c4 100644
--- a/sahara/plugins/cdh/plugin.py
+++ b/sahara/plugins/cdh/plugin.py
@@ -38,6 +38,7 @@ class CDHPluginProvider(p.ProvisioningPluginBase):
38 deprecated = {'enabled': {'status': True}, 38 deprecated = {'enabled': {'status': True},
39 'deprecated': {'status': True}} 39 'deprecated': {'status': True}}
40 result['version_labels'] = { 40 result['version_labels'] = {
41 '5.13.0': copy.deepcopy(default),
41 '5.11.0': copy.deepcopy(default), 42 '5.11.0': copy.deepcopy(default),
42 '5.9.0': copy.deepcopy(default), 43 '5.9.0': copy.deepcopy(default),
43 '5.7.0': copy.deepcopy(default), 44 '5.7.0': copy.deepcopy(default),
diff --git a/sahara/plugins/cdh/plugin_utils.py b/sahara/plugins/cdh/plugin_utils.py
index 5b72b7d..662b769 100644
--- a/sahara/plugins/cdh/plugin_utils.py
+++ b/sahara/plugins/cdh/plugin_utils.py
@@ -413,30 +413,32 @@ class AbstractPluginUtils(object):
413 cm5_key = ( 413 cm5_key = (
414 self.c_helper.get_cm5_key_url(cluster) or 414 self.c_helper.get_cm5_key_url(cluster) or
415 self.c_helper.DEFAULT_CM5_UBUNTU_REPO_KEY_URL) 415 self.c_helper.DEFAULT_CM5_UBUNTU_REPO_KEY_URL)
416 kms_key = ( 416 if self.c_helper.is_keytrustee_available():
417 self.c_helper.get_kms_key_url(cluster) or 417 kms_key = (
418 self.c_helper.DEFAULT_KEY_TRUSTEE_UBUNTU_REPO_KEY_URL) 418 self.c_helper.get_kms_key_url(cluster) or
419 self.c_helper.DEFAULT_KEY_TRUSTEE_UBUNTU_REPO_KEY_URL)
420 kms_repo_url = self.c_helper.KEY_TRUSTEE_UBUNTU_REPO_URL
421 cmd.add_ubuntu_repository(r, kms_repo_url, 'kms')
422 cmd.add_apt_key(r, kms_key)
419 423
420 cdh5_repo_content = self.c_helper.CDH5_UBUNTU_REPO 424 cdh5_repo_content = self.c_helper.CDH5_UBUNTU_REPO
421 cm5_repo_content = self.c_helper.CM5_UBUNTU_REPO 425 cm5_repo_content = self.c_helper.CM5_UBUNTU_REPO
422 kms_repo_url = self.c_helper.KEY_TRUSTEE_UBUNTU_REPO_URL
423 426
424 cmd.write_ubuntu_repository(r, cdh5_repo_content, 'cdh') 427 cmd.write_ubuntu_repository(r, cdh5_repo_content, 'cdh')
425 cmd.add_apt_key(r, cdh5_key) 428 cmd.add_apt_key(r, cdh5_key)
426 cmd.write_ubuntu_repository(r, cm5_repo_content, 'cm') 429 cmd.write_ubuntu_repository(r, cm5_repo_content, 'cm')
427 cmd.add_apt_key(r, cm5_key) 430 cmd.add_apt_key(r, cm5_key)
428 cmd.add_ubuntu_repository(r, kms_repo_url, 'kms')
429 cmd.add_apt_key(r, kms_key)
430 cmd.update_repository(r) 431 cmd.update_repository(r)
431 432
432 if cmd.is_centos_os(r): 433 if cmd.is_centos_os(r):
433 cdh5_repo_content = self.c_helper.CDH5_CENTOS_REPO 434 cdh5_repo_content = self.c_helper.CDH5_CENTOS_REPO
434 cm5_repo_content = self.c_helper.CM5_CENTOS_REPO 435 cm5_repo_content = self.c_helper.CM5_CENTOS_REPO
435 kms_repo_url = self.c_helper.KEY_TRUSTEE_CENTOS_REPO_URL 436 if self.c_helper.is_keytrustee_available():
437 kms_repo_url = self.c_helper.KEY_TRUSTEE_CENTOS_REPO_URL
438 cmd.add_centos_repository(r, kms_repo_url, 'kms')
436 439
437 cmd.write_centos_repository(r, cdh5_repo_content, 'cdh') 440 cmd.write_centos_repository(r, cdh5_repo_content, 'cdh')
438 cmd.write_centos_repository(r, cm5_repo_content, 'cm') 441 cmd.write_centos_repository(r, cm5_repo_content, 'cm')
439 cmd.add_centos_repository(r, kms_repo_url, 'kms')
440 cmd.update_repository(r) 442 cmd.update_repository(r)
441 443
442 def _get_config_value(self, service, name, configs, cluster=None): 444 def _get_config_value(self, service, name, configs, cluster=None):
diff --git a/sahara/plugins/cdh/v5_13_0/__init__.py b/sahara/plugins/cdh/v5_13_0/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/__init__.py
diff --git a/sahara/plugins/cdh/v5_13_0/cloudera_utils.py b/sahara/plugins/cdh/v5_13_0/cloudera_utils.py
new file mode 100644
index 0000000..d9ca851
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/cloudera_utils.py
@@ -0,0 +1,28 @@
1# Copyright (c) 2016 Mirantis Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from sahara.plugins.cdh import cloudera_utils as cu
17from sahara.plugins.cdh.v5_13_0 import config_helper
18from sahara.plugins.cdh.v5_13_0 import plugin_utils as pu
19from sahara.plugins.cdh.v5_13_0 import validation
20
21
22class ClouderaUtilsV5130(cu.ClouderaUtils):
23
24 def __init__(self):
25 cu.ClouderaUtils.__init__(self)
26 self.pu = pu.PluginUtilsV5130()
27 self.validator = validation.ValidatorV5130
28 self.c_helper = config_helper.ConfigHelperV5130()
diff --git a/sahara/plugins/cdh/v5_13_0/config_helper.py b/sahara/plugins/cdh/v5_13_0/config_helper.py
new file mode 100644
index 0000000..bb17857
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/config_helper.py
@@ -0,0 +1,89 @@
1# Copyright (c) 2016 Mirantis Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from sahara.plugins.cdh import config_helper as c_h
17from sahara.plugins import provisioning as p
18from sahara.utils import files as f
19
20
21class ConfigHelperV5130(c_h.ConfigHelper):
22 path_to_config = 'plugins/cdh/v5_13_0/resources/'
23
24 CDH5_UBUNTU_REPO = (
25 'deb [arch=amd64] http://archive.cloudera.com/cdh5'
26 '/ubuntu/xenial/amd64/cdh trusty-cdh5.13.0 contrib'
27 '\ndeb-src http://archive.cloudera.com/cdh5/ubuntu'
28 '/xenial/amd64/cdh trusty-cdh5.13.0 contrib')
29
30 DEFAULT_CDH5_UBUNTU_REPO_KEY_URL = (
31 'http://archive.cloudera.com/cdh5/ubuntu'
32 '/xenial/amd64/cdh/archive.key')
33
34 CM5_UBUNTU_REPO = (
35 'deb [arch=amd64] http://archive.cloudera.com/cm5'
36 '/ubuntu/xenial/amd64/cm trusty-cm5.13.0 contrib'
37 '\ndeb-src http://archive.cloudera.com/cm5/ubuntu'
38 '/xenial/amd64/cm trusty-cm5.13.0 contrib')
39
40 DEFAULT_CM5_UBUNTU_REPO_KEY_URL = (
41 'http://archive.cloudera.com/cm5/ubuntu'
42 '/xenial/amd64/cm/archive.key')
43
44 CDH5_CENTOS_REPO = (
45 '[cloudera-cdh5]'
46 '\nname=Cloudera\'s Distribution for Hadoop, Version 5'
47 '\nbaseurl=http://archive.cloudera.com/cdh5/redhat/6'
48 '/x86_64/cdh/5.13.0/'
49 '\ngpgkey = http://archive.cloudera.com/cdh5/redhat/6'
50 '/x86_64/cdh/RPM-GPG-KEY-cloudera'
51 '\ngpgcheck = 1')
52
53 CM5_CENTOS_REPO = (
54 '[cloudera-manager]'
55 '\nname=Cloudera Manager'
56 '\nbaseurl=http://archive.cloudera.com/cm5/redhat/6'
57 '/x86_64/cm/5.13.0/'
58 '\ngpgkey = http://archive.cloudera.com/cm5/redhat/6'
59 '/x86_64/cm/RPM-GPG-KEY-cloudera'
60 '\ngpgcheck = 1')
61
62 DEFAULT_SWIFT_LIB_URL = (
63 'https://repository.cloudera.com/artifactory/repo/org'
64 '/apache/hadoop/hadoop-openstack/2.6.0-cdh5.13.0'
65 '/hadoop-openstack-2.6.0-cdh5.13.0.jar')
66
67 SWIFT_LIB_URL = p.Config(
68 'Hadoop OpenStack library URL', 'general', 'cluster', priority=1,
69 default_value=DEFAULT_SWIFT_LIB_URL,
70 description=("Library that adds Swift support to CDH. The file"
71 " will be downloaded by VMs."))
72
73 HIVE_SERVER2_SENTRY_SAFETY_VALVE = f.get_file_text(
74 path_to_config + 'hive-server2-sentry-safety.xml')
75
76 HIVE_METASTORE_SENTRY_SAFETY_VALVE = f.get_file_text(
77 path_to_config + 'hive-metastore-sentry-safety.xml')
78
79 SENTRY_IMPALA_CLIENT_SAFETY_VALVE = f.get_file_text(
80 path_to_config + 'sentry-impala-client-safety.xml')
81
82 def is_keytrustee_available(self):
83 return False
84
85 def __init__(self):
86 super(ConfigHelperV5130, self).__init__()
87 self.priority_one_confs = self._load_json(
88 self.path_to_config + 'priority-one-confs.json')
89 self._init_all_ng_plugin_configs()
diff --git a/sahara/plugins/cdh/v5_13_0/deploy.py b/sahara/plugins/cdh/v5_13_0/deploy.py
new file mode 100644
index 0000000..07453d2
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/deploy.py
@@ -0,0 +1,168 @@
1# Copyright (c) 2016 Mirantis Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from sahara.i18n import _
17from sahara.plugins.cdh import commands as cmd
18from sahara.plugins.cdh import deploy as common_deploy
19from sahara.plugins.cdh.v5_13_0 import cloudera_utils as cu
20from sahara.plugins import utils as gu
21from sahara.service.edp import hdfs_helper as h
22from sahara.utils import cluster_progress_ops as cpo
23
24CU = cu.ClouderaUtilsV5130()
25
26PACKAGES = common_deploy.PACKAGES
27
28
29def configure_cluster(cluster):
30 instances = gu.get_instances(cluster)
31
32 if not cmd.is_pre_installed_cdh(CU.pu.get_manager(cluster).remote()):
33 CU.pu.configure_os(instances)
34 CU.pu.install_packages(instances, PACKAGES)
35
36 CU.pu.start_cloudera_agents(instances)
37 CU.pu.start_cloudera_manager(cluster)
38 CU.update_cloudera_password(cluster)
39 CU.configure_rack_awareness(cluster)
40 CU.await_agents(cluster, instances)
41 CU.create_mgmt_service(cluster)
42 CU.create_services(cluster)
43 CU.configure_services(cluster)
44 CU.configure_instances(instances, cluster)
45 CU.deploy_configs(cluster)
46
47
48@cpo.event_wrapper(
49 True, step=_("Start roles: NODEMANAGER, DATANODE"), param=('cluster', 0))
50def _start_roles(cluster, instances):
51 for instance in instances:
52 if 'HDFS_DATANODE' in instance.node_group.node_processes:
53 hdfs = CU.get_service_by_role('DATANODE', instance=instance)
54 CU.start_roles(hdfs, CU.pu.get_role_name(instance, 'DATANODE'))
55
56 if 'YARN_NODEMANAGER' in instance.node_group.node_processes:
57 yarn = CU.get_service_by_role('NODEMANAGER', instance=instance)
58 CU.start_roles(yarn, CU.pu.get_role_name(instance, 'NODEMANAGER'))
59
60
61def scale_cluster(cluster, instances):
62 if not instances:
63 return
64
65 if not cmd.is_pre_installed_cdh(instances[0].remote()):
66 CU.pu.configure_os(instances)
67 CU.pu.install_packages(instances, PACKAGES)
68
69 CU.pu.start_cloudera_agents(instances)
70 CU.await_agents(cluster, instances)
71 CU.configure_rack_awareness(cluster)
72 CU.configure_instances(instances, cluster)
73 CU.update_configs(instances)
74 common_deploy.prepare_scaling_kerberized_cluster(
75 cluster, CU, instances)
76
77 CU.pu.configure_swift(cluster, instances)
78 _start_roles(cluster, instances)
79 CU.refresh_datanodes(cluster)
80 CU.refresh_yarn_nodes(cluster)
81 CU.restart_stale_services(cluster)
82
83
84def decommission_cluster(cluster, instances):
85 dns = []
86 dns_to_delete = []
87 nms = []
88 nms_to_delete = []
89 for i in instances:
90 if 'HDFS_DATANODE' in i.node_group.node_processes:
91 dns.append(CU.pu.get_role_name(i, 'DATANODE'))
92 dns_to_delete.append(
93 CU.pu.get_role_name(i, 'HDFS_GATEWAY'))
94
95 if 'YARN_NODEMANAGER' in i.node_group.node_processes:
96 nms.append(CU.pu.get_role_name(i, 'NODEMANAGER'))
97 nms_to_delete.append(
98 CU.pu.get_role_name(i, 'YARN_GATEWAY'))
99
100 if dns:
101 CU.decommission_nodes(
102 cluster, 'DATANODE', dns, dns_to_delete)
103
104 if nms:
105 CU.decommission_nodes(
106 cluster, 'NODEMANAGER', nms, nms_to_delete)
107
108 CU.delete_instances(cluster, instances)
109
110 CU.refresh_datanodes(cluster)
111 CU.refresh_yarn_nodes(cluster)
112 CU.restart_stale_services(cluster)
113
114
115@cpo.event_wrapper(True, step=_("Prepare cluster"), param=('cluster', 0))
116def _prepare_cluster(cluster):
117 if CU.pu.get_oozie(cluster):
118 CU.pu.install_extjs(cluster)
119
120 if CU.pu.get_hive_metastore(cluster):
121 CU.pu.configure_hive(cluster)
122
123 if CU.pu.get_sentry(cluster):
124 CU.pu.configure_sentry(cluster)
125
126
127@cpo.event_wrapper(
128 True, step=_("Finish cluster starting"), param=('cluster', 0))
129def _finish_cluster_starting(cluster):
130 if CU.pu.get_hive_metastore(cluster):
131 CU.pu.put_hive_hdfs_xml(cluster)
132
133 server = CU.pu.get_hbase_master(cluster)
134 if CU.pu.c_helper.is_hbase_common_lib_enabled(cluster) and server:
135 with server.remote() as r:
136 h.create_hbase_common_lib(r)
137
138 if CU.pu.get_flumes(cluster):
139 flume = CU.get_service_by_role('AGENT', cluster)
140 CU.start_service(flume)
141
142
143def start_cluster(cluster):
144 _prepare_cluster(cluster)
145
146 CU.first_run(cluster)
147
148 CU.pu.configure_swift(cluster)
149
150 if len(CU.pu.get_jns(cluster)) > 0:
151 CU.enable_namenode_ha(cluster)
152 # updating configs for NameNode role on needed nodes
153 CU.update_role_config(CU.pu.get_secondarynamenode(cluster),
154 'HDFS_NAMENODE')
155
156 if CU.pu.get_stdb_rm(cluster):
157 CU.enable_resourcemanager_ha(cluster)
158 # updating configs for ResourceManager on needed nodes
159 CU.update_role_config(CU.pu.get_stdb_rm(cluster), 'YARN_STANDBYRM')
160
161 _finish_cluster_starting(cluster)
162
163 common_deploy.setup_kerberos_for_cluster(cluster, CU)
164
165
166def get_open_ports(node_group):
167 ports = common_deploy.get_open_ports(node_group)
168 return ports
diff --git a/sahara/plugins/cdh/v5_13_0/edp_engine.py b/sahara/plugins/cdh/v5_13_0/edp_engine.py
new file mode 100644
index 0000000..9d21013
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/edp_engine.py
@@ -0,0 +1,47 @@
1# Copyright (c) 2016 Mirantis Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from sahara.plugins.cdh import confighints_helper as ch_helper
17from sahara.plugins.cdh import edp_engine
18from sahara.plugins.cdh.v5_13_0 import cloudera_utils as cu
19from sahara.service.edp.oozie import engine as oozie_engine
20from sahara.utils import edp
21
22
23class EdpOozieEngine(edp_engine.EdpOozieEngine):
24
25 def __init__(self, cluster):
26 super(EdpOozieEngine, self).__init__(cluster)
27 self.cloudera_utils = cu.ClouderaUtilsV5130()
28
29 @staticmethod
30 def get_possible_job_config(job_type):
31 if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
32 return {'job_config': ch_helper.get_possible_hive_config_from(
33 'plugins/cdh/v5_13_0/resources/hive-site.xml')}
34 if edp.compare_job_type(job_type,
35 edp.JOB_TYPE_MAPREDUCE,
36 edp.JOB_TYPE_MAPREDUCE_STREAMING):
37 return {'job_config': ch_helper.get_possible_mapreduce_config_from(
38 'plugins/cdh/v5_13_0/resources/mapred-site.xml')}
39 if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
40 return {'job_config': ch_helper.get_possible_pig_config_from(
41 'plugins/cdh/v5_13_0/resources/mapred-site.xml')}
42 return oozie_engine.OozieJobEngine.get_possible_job_config(job_type)
43
44
45class EdpSparkEngine(edp_engine.EdpSparkEngine):
46
47 edp_base_version = "5.13.0"
diff --git a/sahara/plugins/cdh/v5_13_0/images.py b/sahara/plugins/cdh/v5_13_0/images.py
new file mode 100644
index 0000000..933af80
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/images.py
@@ -0,0 +1,43 @@
1# Copyright (c) 2016 Red Hat, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from sahara.plugins import images
17from sahara.plugins import utils as plugin_utils
18
19
20_validator = images.SaharaImageValidator.from_yaml(
21 'plugins/cdh/v5_13_0/resources/images/image.yaml',
22 resource_roots=['plugins/cdh/v5_13_0/resources/images'])
23
24
25def get_image_arguments():
26 return _validator.get_argument_list()
27
28
29def pack_image(remote, test_only=False, image_arguments=None):
30 _validator.validate(remote, test_only=test_only,
31 image_arguments=image_arguments)
32
33
34def validate_images(cluster, test_only=False, image_arguments=None):
35 image_arguments = get_image_arguments()
36 if not test_only:
37 instances = plugin_utils.get_instances(cluster)
38 else:
39 instances = plugin_utils.get_instances(cluster)[0]
40 for instance in instances:
41 with instance.remote() as r:
42 _validator.validate(r, test_only=test_only,
43 image_arguments=image_arguments)
diff --git a/sahara/plugins/cdh/v5_13_0/plugin_utils.py b/sahara/plugins/cdh/v5_13_0/plugin_utils.py
new file mode 100644
index 0000000..64ec8b9
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/plugin_utils.py
@@ -0,0 +1,23 @@
1# Copyright (c) 2016 Mirantis Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from sahara.plugins.cdh import plugin_utils as pu
17from sahara.plugins.cdh.v5_13_0 import config_helper
18
19
20class PluginUtilsV5130(pu.AbstractPluginUtils):
21
22 def __init__(self):
23 self.c_helper = config_helper.ConfigHelperV5130()
diff --git a/sahara/plugins/cdh/v5_13_0/resources/cdh_config.py b/sahara/plugins/cdh/v5_13_0/resources/cdh_config.py
new file mode 100755
index 0000000..3c72144
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/cdh_config.py
@@ -0,0 +1,68 @@
1#!/usr/bin/env python
2# Copyright (c) 2017 Massachusetts Open Cloud
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13# implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17from cm_api.api_client import ApiResource
18
19cloudera_user = 'cloudera'
20cloudera_pass = 'cloudera'
21cm_host = "localhost"
22api = ApiResource(cm_host,
23 username=cloudera_user, password=cloudera_pass, # nosec
24 version=17)
25
26c = api.get_all_clusters()[0]
27services = c.get_all_services()
28
29
30def process_service(service):
31 service_name = service.name
32 if service_name == "spark_on_yarn":
33 service_name = "spark"
34 for role_cfgs in service.get_all_role_config_groups():
35 role_cm_cfg = role_cfgs.get_config(view='full')
36 role_cfg = parse_config(role_cm_cfg)
37 role_name = role_cfgs.roleType.lower()
38 write_cfg(role_cfg, '%s-%s.json' % (service_name, role_name))
39
40 service_cm_cfg = service.get_config(view='full')[0]
41 service_cfg = parse_config(service_cm_cfg)
42 write_cfg(service_cfg, '%s-service.json' % service_name)
43
44
45def parse_config(config):
46 cfg = []
47 for name, value in config.items():
48 p = {
49 'name': value.name,
50 'value': value.default,
51 'display_name': value.displayName,
52 'desc': value.description
53 }
54 cfg.append(p)
55
56 return cfg
57
58
59def write_cfg(cfg, file_name):
60 to_write = __import__('json').dumps(cfg, sort_keys=True, indent=4,
61 separators=(',', ': '))
62
63 with open(file_name, 'w') as f:
64 f.write(to_write)
65
66if __name__ == '__main__':
67 for service in services:
68 process_service(service)
diff --git a/sahara/plugins/cdh/v5_13_0/resources/flume-agent.json b/sahara/plugins/cdh/v5_13_0/resources/flume-agent.json
new file mode 100644
index 0000000..2b61941
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/flume-agent.json
@@ -0,0 +1,440 @@
1[
2 {
3 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
4 "display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
5 "name": "log_directory_free_space_percentage_thresholds",
6 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
7 },
8 {
9 "desc": "Used to select an agent configuration to use from flume.conf. Multiple agents can share the same agent name, in which case they are assigned the same agent configuration.",
10 "display_name": "Agent Name",
11 "name": "agent_name",
12 "value": "tier1"
13 },
14 {
15 "desc": "<p>This file contains the rules that govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message. If a log message matches multiple rules, the first matching rule is used. </p><p>Each rule has some or all of the following fields:</p><ul><li><code>alert</code> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><code>rate</code> <b>(mandatory)</b> - the maximum number of log messages matching this rule that can be sent as events every minute. If more than <code>rate</code> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><code>periodminutes</code> - the number of minutes during which the publisher will only publish <code>rate</code> events or fewer. If not specified, the default is <b>one minute</b></li><li><code>threshold</code> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><code>content</code> - match only those messages for which contents match this regular expression.</li><li><code>exceptiontype</code> - match only those messages that are part of an exception message. The exception type must match this regular expression.</li></ul><p>Example:</p><ul><li><pre>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</pre>This rule sends events to Cloudera Manager for every <code>StringIndexOutOfBoundsException</code>, up to a maximum of 10 every minute.</li><li><pre>{\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"exceptiontype\": \".*\"}, {\"alert\": true, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"ERROR\"}</pre>In this example, an event generated may not be promoted to alert if an exception is in the ERROR log message, because the first rule with alert = false will match.</li></ul>",
16 "display_name": "Rules to Extract Events from Log Files",
17 "name": "log_event_whitelist",
18 "value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
19 },
20 {
21 "desc": "Text that goes into morphlines.conf file used by the Flume-NG Solr sink. The text goes verbatim into the config file except that $ZK_HOST is replaced by the ZooKeeper quorum of the Solr service.",
22 "display_name": "Morphlines File",
23 "name": "agent_morphlines_conf_file",
24 "value": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Application configuration file in HOCON format (Human-Optimized Config Object Notation). \n# HOCON syntax is defined at http://github.com/typesafehub/config/blob/master/HOCON.md\n# and also used by Akka (http://www.akka.io) and Play (http://www.playframework.org/).\n# For more examples see http://doc.akka.io/docs/akka/2.1.2/general/configuration.html\n\n# morphline.conf example file\n# this is a comment\n\n# Specify server locations in a SOLR_LOCATOR variable; used later in variable substitutions:\nSOLR_LOCATOR : {\n # Name of solr collection\n collection : collection1\n \n # ZooKeeper ensemble\n zkHost : \"$ZK_HOST\"\n \n # Relative or absolute path to a directory containing conf/solrconfig.xml and conf/schema.xml\n # If this path is uncommented it takes precedence over the configuration stored in ZooKeeper. \n # solrHomeDir : \"example/solr/collection1\"\n \n # The maximum number of documents to send to Solr per network batch (throughput knob)\n # batchSize : 100\n}\n\n# Specify an array of one or more morphlines, each of which defines an ETL \n# transformation chain. A morphline consists of one or more (potentially \n# nested) commands. A morphline is a way to consume records (e.g. Flume events, \n# HDFS files or blocks), turn them into a stream of records, and pipe the stream \n# of records through a set of easily configurable transformations on it's way to \n# Solr (or a MapReduceIndexerTool RecordWriter that feeds via a Reducer into Solr).\nmorphlines : [\n {\n # Name used to identify a morphline. E.g. used if there are multiple morphlines in a \n # morphline config file\n id : morphline1 \n \n # Import all morphline commands in these java packages and their subpackages.\n # Other commands that may be present on the classpath are not visible to this morphline.\n importCommands : [\"org.kitesdk.**\", \"org.apache.solr.**\"]\n \n commands : [ \n { \n # Parse Avro container file and emit a record for each avro object\n readAvroContainer {\n # Optionally, require the input record to match one of these MIME types:\n # supportedMimeTypes : [avro/binary]\n \n # Optionally, use a custom Avro schema in JSON format inline:\n # schemaString : \"\"\"<json can go here>\"\"\"\n \n # Optionally, use a custom Avro schema file in JSON format:\n # schemaFile : /path/to/syslog.avsc\n }\n } \n \n { \n # Consume the output record of the previous command and pipe another record downstream.\n #\n # extractAvroPaths is a command that uses zero or more avro path expressions to extract \n # values from an Avro object. Each expression consists of a record output field name (on \n # the left side of the colon ':') as well as zero or more path steps (on the right hand \n # side), each path step separated by a '/' slash. Avro arrays are traversed with the '[]'\n # notation.\n #\n # The result of a path expression is a list of objects, each of which is added to the \n # given record output field.\n # \n # The path language supports all Avro concepts, including nested structures, records, \n # arrays, maps, unions, etc, as well as a flatten option that collects the primitives in \n # a subtree into a flat list.\n extractAvroPaths {\n flatten : false\n paths : { \n id : /id \n text : /text \n user_friends_count : /user_friends_count\n user_location : /user_location\n user_description : /user_description\n user_statuses_count : /user_statuses_count\n user_followers_count : /user_followers_count\n user_name : /user_name\n user_screen_name : /user_screen_name\n created_at : /created_at\n retweet_count : /retweet_count\n retweeted : /retweeted\n in_reply_to_user_id : /in_reply_to_user_id\n source : /source\n in_reply_to_status_id : /in_reply_to_status_id\n media_url_https : /media_url_https\n expanded_url : /expanded_url\n }\n }\n }\n \n # Consume the output record of the previous command and pipe another record downstream.\n #\n # convert timestamp field to native Solr timestamp format\n # e.g. 2012-09-06T07:14:34Z to 2012-09-06T07:14:34.000Z\n {\n convertTimestamp {\n field : created_at\n inputFormats : [\"yyyy-MM-dd'T'HH:mm:ss'Z'\", \"yyyy-MM-dd\"]\n inputTimezone : America/Los_Angeles\n# outputFormat : \"yyyy-MM-dd'T'HH:mm:ss.SSSZ\" \n outputTimezone : UTC\n }\n }\n \n # Consume the output record of the previous command and pipe another record downstream.\n #\n # Command that sanitizes record fields that are unknown to Solr schema.xml by either \n # deleting them (renameToPrefix is absent or a zero length string), or by moving them to a\n # field prefixed with the given renameToPrefix (e.g. renameToPrefix = \"ignored_\" to use \n # typical dynamic Solr fields).\n #\n # Recall that Solr throws an exception on any attempt to load a document that contains a \n # field that isn't specified in schema.xml.\n {\n sanitizeUnknownSolrFields {\n # Location from which to fetch Solr schema\n solrLocator : ${SOLR_LOCATOR}\n \n # renameToPrefix : \"ignored_\"\n }\n } \n \n # log the record at DEBUG level to SLF4J\n { logDebug { format : \"output record: {}\", args : [\"@{}\"] } } \n \n # load the record into a SolrServer or MapReduce SolrOutputFormat.\n { \n loadSolr {\n solrLocator : ${SOLR_LOCATOR}\n }\n }\n ]\n }\n]\n"
25 },
26 {
27 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Plugin directories parameter.",
28 "display_name": "Suppress Parameter Validation: Plugin directories",
29 "name": "role_config_suppression_agent_plugin_dirs",
30 "value": "false"
31 },
32 {
33 "desc": "When computing the overall Agent health, consider the host's health.",
34 "display_name": "Agent Host Health Test",
35 "name": "flume_agent_host_health_enabled",
36 "value": "true"
37 },
38 {
39 "desc": "The health test thresholds on the duration of the metrics request to the web server.",
40 "display_name": "Web Metric Collection Duration",
41 "name": "agent_web_metric_collection_thresholds",
42 "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
43 },
44 {
45 "desc": "The maximum size, in megabytes, per log file for Agent logs. Typically used by log4j or logback.",
46 "display_name": "Agent Max Log Size",
47 "name": "max_log_size",
48 "value": "200"
49 },
50 {
51 "desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
52 "display_name": "Suppress Health Test: Swap Memory Usage",
53 "name": "role_health_suppression_flume_agent_swap_memory_usage",
54 "value": "false"
55 },
56 {
57 "desc": "Text that goes verbatim into grok-dictionary.conf file used by the Flume-NG Solr sink.",
58 "display_name": "Grok Dictionary File",
59 "name": "agent_grok_dictionary_conf_file",
60 "value": "USERNAME [a-zA-Z0-9._-]+\nUSER %{USERNAME}\nINT (?:[+-]?(?:[0-9]+))\nBASE10NUM (?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\\.[0-9]+)?)|(?:\\.[0-9]+)))\nNUMBER (?:%{BASE10NUM})\nBASE16NUM (?<![0-9A-Fa-f])(?:[+-]?(?:0x)?(?:[0-9A-Fa-f]+))\nBASE16FLOAT \\b(?<![0-9A-Fa-f.])(?:[+-]?(?:0x)?(?:(?:[0-9A-Fa-f]+(?:\\.[0-9A-Fa-f]*)?)|(?:\\.[0-9A-Fa-f]+)))\\b\n\nPOSINT \\b(?:[1-9][0-9]*)\\b\nNONNEGINT \\b(?:[0-9]+)\\b\nWORD \\b\\w+\\b\nNOTSPACE \\S+\nSPACE \\s*\nDATA .*?\nGREEDYDATA .*\n#QUOTEDSTRING (?:(?<!\\\\)(?:\"(?:\\\\.|[^\\\\\"])*\"|(?:'(?:\\\\.|[^\\\\'])*')|(?:`(?:\\\\.|[^\\\\`])*`)))\nQUOTEDSTRING (?>(?<!\\\\)(?>\"(?>\\\\.|[^\\\\\"]+)+\"|\"\"|(?>'(?>\\\\.|[^\\\\']+)+')|''|(?>`(?>\\\\.|[^\\\\`]+)+`)|``))\nUUID [A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}\n\n# Networking\nMAC (?:%{CISCOMAC}|%{WINDOWSMAC}|%{COMMONMAC})\nCISCOMAC (?:(?:[A-Fa-f0-9]{4}\\.){2}[A-Fa-f0-9]{4})\nWINDOWSMAC (?:(?:[A-Fa-f0-9]{2}-){5}[A-Fa-f0-9]{2})\nCOMMONMAC (?:(?:[A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2})\nIP (?<![0-9])(?:(?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2})[.](?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2})[.](?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2})[.](?:25[0-5]|2[0-4][0-9]|[0-1]?[0-9]{1,2}))(?![0-9])\nHOSTNAME \\b(?:[0-9A-Za-z][0-9A-Za-z-]{0,62})(?:\\.(?:[0-9A-Za-z][0-9A-Za-z-]{0,62}))*(\\.?|\\b)\nHOST %{HOSTNAME}\nIPORHOST (?:%{HOSTNAME}|%{IP})\n#HOSTPORT (?:%{IPORHOST=~/\\./}:%{POSINT}) # WH\n\n# paths\nPATH (?:%{UNIXPATH}|%{WINPATH})\nUNIXPATH (?>/(?>[\\w_%!$@:.,-]+|\\\\.)*)+\n#UNIXPATH (?<![\\w\\/])(?:/[^\\/\\s?*]*)+\nLINUXTTY (?>/dev/pts/%{NONNEGINT})\nBSDTTY (?>/dev/tty[pq][a-z0-9])\nTTY (?:%{BSDTTY}|%{LINUXTTY})\nWINPATH (?>[A-Za-z]+:|\\\\)(?:\\\\[^\\\\?*]*)+\nURIPROTO [A-Za-z]+(\\+[A-Za-z+]+)?\nURIHOST %{IPORHOST}(?::%{POSINT:port})?\n# uripath comes loosely from RFC1738, but mostly from what Firefox\n# doesn't turn into %XX\nURIPATH (?:/[A-Za-z0-9$.+!*'(){},~:;=#%_\\-]*)+\n#URIPARAM \\?(?:[A-Za-z0-9]+(?:=(?:[^&]*))?(?:&(?:[A-Za-z0-9]+(?:=(?:[^&]*))?)?)*)?\nURIPARAM \\?[A-Za-z0-9$.+!*'|(){},~#%&/=:;_?\\-\\[\\]]*\nURIPATHPARAM %{URIPATH}(?:%{URIPARAM})?\nURI %{URIPROTO}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST})?(?:%{URIPATHPARAM})?\n\n# Months: January, Feb, 3, 03, 12, December\nMONTH \\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\\b\nMONTHNUM (?:0?[1-9]|1[0-2])\nMONTHDAY (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9])\n\n# Days: Monday, Tue, Thu, etc...\nDAY (?:Mon(?:day)?|Tue(?:sday)?|Wed(?:nesday)?|Thu(?:rsday)?|Fri(?:day)?|Sat(?:urday)?|Sun(?:day)?)\n\n# Years?\nYEAR (?>\\d\\d){1,2}\n# Time: HH:MM:SS\n#TIME \\d{2}:\\d{2}(?::\\d{2}(?:\\.\\d+)?)?\n# I'm still on the fence about using grok to perform the time match,\n# since it's probably slower.\n# TIME %{POSINT<24}:%{POSINT<60}(?::%{POSINT<60}(?:\\.%{POSINT})?)?\nHOUR (?:2[0123]|[01]?[0-9])\nMINUTE (?:[0-5][0-9])\n# '60' is a leap second in most time standards and thus is valid.\nSECOND (?:(?:[0-5][0-9]|60)(?:[:.,][0-9]+)?)\nTIME (?!<[0-9])%{HOUR}:%{MINUTE}(?::%{SECOND})(?![0-9])\n# datestamp is YYYY/MM/DD-HH:MM:SS.UUUU (or something like it)\nDATE_US %{MONTHNUM}[/-]%{MONTHDAY}[/-]%{YEAR}\nDATE_EU %{MONTHDAY}[./-]%{MONTHNUM}[./-]%{YEAR}\nISO8601_TIMEZONE (?:Z|[+-]%{HOUR}(?::?%{MINUTE}))\nISO8601_SECOND (?:%{SECOND}|60)\nTIMESTAMP_ISO8601 %{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE}?\nDATE %{DATE_US}|%{DATE_EU}\nDATESTAMP %{DATE}[- ]%{TIME}\nTZ (?:[PMCE][SD]T)\nDATESTAMP_RFC822 %{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{TIME} %{TZ}\nDATESTAMP_OTHER %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{TZ} %{YEAR}\n\n# Syslog Dates: Month Day HH:MM:SS\nSYSLOGTIMESTAMP %{MONTH} +%{MONTHDAY} %{TIME}\nPROG (?:[\\w._/%-]+)\nSYSLOGPROG %{PROG:program}(?:\\[%{POSINT:pid}\\])?\nSYSLOGHOST %{IPORHOST}\nSYSLOGFACILITY <%{NONNEGINT:facility}.%{NONNEGINT:priority}>\nHTTPDATE %{MONTHDAY}/%{MONTH}/%{YEAR}:%{TIME} %{INT}\n\n# Shortcuts\nQS %{QUOTEDSTRING}\n\n# Log formats\nSYSLOGBASE %{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource} %{SYSLOGPROG}:\nCOMBINEDAPACHELOG %{IPORHOST:clientip} %{USER:ident} %{USER:auth} \\[%{HTTPDATE:timestamp}\\] \"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})\" %{NUMBER:response} (?:%{NUMBER:bytes}|-) %{QS:referrer} %{QS:agent}\n\n# Log Levels\nLOGLEVEL ([T|t]race|TRACE|[D|d]ebug|DEBUG|[N|n]otice|NOTICE|[I|i]nfo|INFO|[W|w]arn?(?:ing)?|WARN?(?:ING)?|[E|e]rr?(?:or)?|ERR?(?:OR)?|[C|c]rit?(?:ical)?|CRIT?(?:ICAL)?|[F|f]atal|FATAL|[S|s]evere|SEVERE|EMERG(?:ENCY)?|[Ee]merg(?:ency)?)"
61 },
62 {
63 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Agent Name parameter.",
64 "display_name": "Suppress Parameter Validation: Agent Name",
65 "name": "role_config_suppression_agent_name",
66 "value": "false"
67 },
68 {
69 "desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
70 "display_name": "Suppress Health Test: Web Server Status",
71 "name": "role_health_suppression_flume_agent_web_metric_collection",
72 "value": "false"
73 },
74 {
75 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Morphlines File parameter.",
76 "display_name": "Suppress Parameter Validation: Morphlines File",
77 "name": "role_config_suppression_agent_morphlines_conf_file",
78 "value": "false"
79 },
80 {
81 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
82 "display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
83 "name": "heap_dump_directory_free_space_absolute_thresholds",
84 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
85 },
86 {
87 "desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <code>stacks</code> subdirectory of the role's log directory.",
88 "display_name": "Stacks Collection Directory",
89 "name": "stacks_collection_directory",
90 "value": null
91 },
92 {
93 "desc": "The health test thresholds on the swap memory usage of the process.",
94 "display_name": "Process Swap Memory Thresholds",
95 "name": "process_swap_memory_thresholds",
96 "value": "{\"critical\":\"never\",\"warning\":\"any\"}"
97 },
98 {
99 "desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
100 "display_name": "Stacks Collection Method",
101 "name": "stacks_collection_method",
102 "value": "jstack"
103 },
104 {
105 "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
106 "display_name": "Web Metric Collection",
107 "name": "agent_web_metric_collection_enabled",
108 "value": "true"
109 },
110 {
111 "desc": "<p>Verbatim contents of flume.conf. Multiple agents may be configured from the same configuration file; the Agent Name setting can be overridden to select which agent configuration to use for each agent.</p><p>To integrate with a secured cluster, you can use the substitution strings \"<code>$KERBEROS_PRINCIPAL</code>\" and \"<code>$KERBEROS_KEYTAB</code>\", which will be replaced by the principal name and the keytab path respectively.</p>",
112 "display_name": "Configuration File",
113 "name": "agent_config_file",
114 "value": "# Please paste flume.conf here. Example:\n\n# Sources, channels, and sinks are defined per\n# agent name, in this case 'tier1'.\ntier1.sources = source1\ntier1.channels = channel1\ntier1.sinks = sink1\n\n# For each source, channel, and sink, set\n# standard properties.\ntier1.sources.source1.type = netcat\ntier1.sources.source1.bind = 127.0.0.1\ntier1.sources.source1.port = 9999\ntier1.sources.source1.channels = channel1\ntier1.channels.channel1.type = memory\ntier1.sinks.sink1.type = logger\ntier1.sinks.sink1.channel = channel1\n\n# Other properties are specific to each type of\n# source, channel, or sink. In this case, we\n# specify the capacity of the memory channel.\ntier1.channels.channel1.capacity = 100\n"
115 },
116 {
117 "desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
118 "display_name": "Suppress Health Test: File Descriptors",
119 "name": "role_health_suppression_flume_agent_file_descriptor",
120 "value": "false"
121 },
122 {
123 "desc": "The maximum number of rolled log files to keep for Agent logs. Typically used by log4j or logback.",
124 "display_name": "Agent Maximum Log File Backups",
125 "name": "max_log_backup_index",
126 "value": "10"
127 },
128 {
129 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Agent Logging Advanced Configuration Snippet (Safety Valve) parameter.",
130 "display_name": "Suppress Parameter Validation: Agent Logging Advanced Configuration Snippet (Safety Valve)",
131 "name": "role_config_suppression_log4j_safety_valve",
132 "value": "false"
133 },
134 {
135 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Custom Mime-types File parameter.",
136 "display_name": "Suppress Parameter Validation: Custom Mime-types File",
137 "name": "role_config_suppression_agent_custom_mimetypes_file",
138 "value": "false"
139 },
140 {
141 "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
142 "display_name": "Cgroup Memory Soft Limit",
143 "name": "rm_memory_soft_limit",
144 "value": "-1"
145 },
146 {
147 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume Agent Log Directory parameter.",
148 "display_name": "Suppress Parameter Validation: Flume Agent Log Directory",
149 "name": "role_config_suppression_flume_agent_log_dir",
150 "value": "false"
151 },
152 {
153 "desc": "Enables the health test that the Agent's process state is consistent with the role configuration",
154 "display_name": "Agent Process Health Test",
155 "name": "flume_agent_scm_health_enabled",
156 "value": "true"
157 },
158 {
159 "desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
160 "display_name": "Suppress Health Test: Unexpected Exits",
161 "name": "role_health_suppression_flume_agent_unexpected_exits",
162 "value": "false"
163 },
164 {
165 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
166 "display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
167 "name": "role_config_suppression_log_event_whitelist",
168 "value": "false"
169 },
170 {
171 "desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
172 "display_name": "Suppress Configuration Validator: CDH Version Validator",
173 "name": "role_config_suppression_cdh_version_validator",
174 "value": "false"
175 },
176 {
177 "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
178 "display_name": "Cgroup Memory Hard Limit",
179 "name": "rm_memory_hard_limit",
180 "value": "-1"
181 },
182 {
183 "desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
184 "display_name": "Suppress Health Test: Heap Dump Directory Free Space",
185 "name": "role_health_suppression_flume_agent_heap_dump_directory_free_space",
186 "value": "false"
187 },
188 {
189 "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
190 "display_name": "Automatically Restart Process",
191 "name": "process_auto_restart",
192 "value": "true"
193 },
194 {
195 "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
196 "display_name": "File Descriptor Monitoring Thresholds",
197 "name": "flume_agent_fd_thresholds",
198 "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
199 },
200 {
201 "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
202 "display_name": "Cgroup I/O Weight",
203 "name": "rm_io_weight",
204 "value": "500"
205 },
206 {
207 "desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
208 "display_name": "Stacks Collection Data Retention",
209 "name": "stacks_collection_data_retention",
210 "value": "104857600"
211 },
212 {
213 "desc": "The port on which the Flume web server listens for requests.",
214 "display_name": "HTTP Port",
215 "name": "agent_http_port",
216 "value": "41414"
217 },
218 {
219 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
220 "display_name": "Suppress Parameter Validation: Heap Dump Directory",
221 "name": "role_config_suppression_oom_heap_dump_dir",
222 "value": "false"
223 },
224 {
225 "desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
226 "display_name": "Agent Logging Advanced Configuration Snippet (Safety Valve)",
227 "name": "log4j_safety_valve",
228 "value": null
229 },
230 {
231 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
232 "display_name": "Suppress Parameter Validation: Role Triggers",
233 "name": "role_config_suppression_role_triggers",
234 "value": "false"
235 },
236 {
237 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
238 "display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
239 "name": "log_directory_free_space_absolute_thresholds",
240 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
241 },
242 {
243 "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
244 "display_name": "Java Heap Size of Agent in Bytes",
245 "name": "agent_java_heapsize",
246 "value": "1073741824"
247 },
248 {
249 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Configuration File parameter.",
250 "display_name": "Suppress Parameter Validation: Configuration File",
251 "name": "role_config_suppression_agent_config_file",
252 "value": "false"
253 },
254 {
255 "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
256 "display_name": "Maximum Process File Descriptors",
257 "name": "rlimit_fds",
258 "value": null
259 },
260 {
261 "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
262 "display_name": "Enable Configuration Change Alerts",
263 "name": "enable_config_alerts",
264 "value": "false"
265 },
266 {
267 "desc": "Disables import of ZooKeeper configuration from the HBase classpath. This prevents zoo.cfg from overriding hbase-site.xml for Zookeeper quorum information. This option is only supported on CDH 4.4 or later deployments.",
268 "display_name": "HBase sink prefer hbase-site.xml over Zookeeper config",
269 "name": "agent_disable_zoo_cfg",
270 "value": "true"
271 },
272 {
273 "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
274 "display_name": "Agent Environment Advanced Configuration Snippet (Safety Valve)",
275 "name": "AGENT_role_env_safety_valve",
276 "value": null
277 },
278 {
279 "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
280 "display_name": "Kill When Out of Memory",
281 "name": "oom_sigkill_enabled",
282 "value": "true"
283 },
284 {
285 "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
286 "display_name": "Dump Heap When Out of Memory",
287 "name": "oom_heap_dump_enabled",
288 "value": "true"
289 },
290 {
291 "desc": "Directory where Flume Agent will place its log files.",
292 "display_name": "Flume Agent Log Directory",
293 "name": "flume_agent_log_dir",
294 "value": "/var/log/flume-ng"
295 },
296 {
297 "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
298 "display_name": "Unexpected Exits Thresholds",
299 "name": "unexpected_exits_thresholds",
300 "value": "{\"critical\":\"any\",\"warning\":\"never\"}"
301 },
302 {
303 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for Flume Agent parameter.",
304 "display_name": "Suppress Parameter Validation: Java Configuration Options for Flume Agent",
305 "name": "role_config_suppression_flume_agent_java_opts",
306 "value": "false"
307 },
308 {
309 "desc": "Cloudera Manager agent monitors each service and each of its role by publishing metrics to the Cloudera Manager Service Monitor. Setting it to false will stop Cloudera Manager agent from publishing any metric for corresponding service/roles. This is usually helpful for services that generate large amount of metrics which Service Monitor is not able to process.",
310 "display_name": "Enable Metric Collection",
311 "name": "process_should_monitor",
312 "value": "true"
313 },
314 {
315 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Grok Dictionary File parameter.",
316 "display_name": "Suppress Parameter Validation: Grok Dictionary File",
317 "name": "role_config_suppression_agent_grok_dictionary_conf_file",
318 "value": "false"
319 },
320 {
321 "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
322 "display_name": "Enable Health Alerts for this Role",
323 "name": "enable_alerts",
324 "value": "true"
325 },
326 {
327 "desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
328 "display_name": "Suppress Health Test: Log Directory Free Space",
329 "name": "role_health_suppression_flume_agent_log_directory_free_space",
330 "value": "false"
331 },
332 {
333 "desc": "<p>The configured triggers for this role. This is a JSON-formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <b>(mandatory)</b> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <b>(mandatory)</b> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <b>(optional)</b> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <b> (optional)</b> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <b> (optional)</b> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
334 "display_name": "Role Triggers",
335 "name": "role_triggers",
336 "value": "[]"
337 },
338 {
339 "desc": "Text that goes verbatim into custom-mimetypes.xml file used by the Flume-NG Solr sink.",
340 "display_name": "Custom Mime-types File",
341 "name": "agent_custom_mimetypes_file",
342 "value": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor\n license agreements. See the NOTICE file distributed with this work for additional\n information regarding copyright ownership. The ASF licenses this file to\n You under the Apache License, Version 2.0 (the \"License\"); you may not use\n this file except in compliance with the License. You may obtain a copy of\n the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required\n by applicable law or agreed to in writing, software distributed under the\n License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS\n OF ANY KIND, either express or implied. See the License for the specific\n language governing permissions and limitations under the License. -->\n\n<mime-info>\n\n <mime-type type=\"text/space-separated-values\">\n <glob pattern=\"*.ssv\"/>\n </mime-type>\n\n <mime-type type=\"avro/binary\">\n <magic priority=\"50\">\n <match value=\"0x4f626a01\" type=\"string\" offset=\"0\"/> \n </magic>\n <glob pattern=\"*.avro\"/>\n </mime-type>\n\n <mime-type type=\"mytwittertest/json+delimited+length\">\n <magic priority=\"50\">\n <match value=\"[0-9]+(\\r)?\\n\\\\{&quot;\" type=\"regex\" offset=\"0:16\"/> \n </magic>\n </mime-type>\n \n <mime-type type=\"application/hadoop-sequence-file\">\n <magic priority=\"50\">\n <match value=\"SEQ[\\0-\\6]\" type=\"regex\" offset=\"0\"/>\n </magic>\n </mime-type>\n \n</mime-info>"
343 },
344 {
345 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
346 "display_name": "Suppress Parameter Validation: Stacks Collection Directory",
347 "name": "role_config_suppression_stacks_collection_directory",
348 "value": "false"
349 },
350 {
351 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume Home Directory parameter.",
352 "display_name": "Suppress Parameter Validation: Flume Home Directory",
353 "name": "role_config_suppression_agent_home_dir",
354 "value": "false"
355 },
356 {
357 "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags would be passed here. Note that Flume agent only uses options that start with -D and -X (including -XX).",
358 "display_name": "Java Configuration Options for Flume Agent",
359 "name": "flume_agent_java_opts",
360 "value": ""
361 },
362 {
363 "desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
364 "display_name": "Suppress Health Test: Process Status",
365 "name": "role_health_suppression_flume_agent_scm_health",
366 "value": "false"
367 },
368 {
369 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
370 "display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
371 "name": "heap_dump_directory_free_space_percentage_thresholds",
372 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
373 },
374 {
375 "desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
376 "display_name": "Suppress Health Test: Host Health",
377 "name": "role_health_suppression_flume_agent_host_health",
378 "value": "false"
379 },
380 {
381 "desc": "Whether to suppress the results of the Audit Pipeline Test heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
382 "display_name": "Suppress Health Test: Audit Pipeline Test",
383 "name": "role_health_suppression_flume_agent_audit_health",
384 "value": "false"
385 },
386 {
387 "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
388 "display_name": "Cgroup CPU Shares",
389 "name": "rm_cpu_shares",
390 "value": "1024"
391 },
392 {
393 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Agent Environment Advanced Configuration Snippet (Safety Valve) parameter.",
394 "display_name": "Suppress Parameter Validation: Agent Environment Advanced Configuration Snippet (Safety Valve)",
395 "name": "role_config_suppression_agent_role_env_safety_valve",
396 "value": "false"
397 },
398 {
399 "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
400 "display_name": "Heap Dump Directory",
401 "name": "oom_heap_dump_dir",
402 "value": "/tmp"
403 },
404 {
405 "desc": "The period to review when computing unexpected exits.",
406 "display_name": "Unexpected Exits Monitoring Period",
407 "name": "unexpected_exits_window",
408 "value": "5"
409 },
410 {
411 "desc": "The frequency with which stacks are collected.",
412 "display_name": "Stacks Collection Frequency",
413 "name": "stacks_collection_frequency",
414 "value": "5.0"
415 },
416 {
417 "desc": "List of Flume plugin directories. This overrides the default Flume plugin directory.",
418 "display_name": "Plugin directories",
419 "name": "agent_plugin_dirs",
420 "value": "/usr/lib/flume-ng/plugins.d:/var/lib/flume-ng/plugins.d"
421 },
422 {
423 "desc": "Home directory for Flume user. The File Channel uses paths for checkpoint and data directories that are within the user home.",
424 "display_name": "Flume Home Directory",
425 "name": "agent_home_dir",
426 "value": "/var/lib/flume-ng"
427 },
428 {
429 "desc": "The minimum log level for Agent logs",
430 "display_name": "Agent Logging Threshold",
431 "name": "log_threshold",
432 "value": "INFO"
433 },
434 {
435 "desc": "Whether or not periodic stacks collection is enabled.",
436 "display_name": "Stacks Collection Enabled",
437 "name": "stacks_collection_enabled",
438 "value": "false"
439 }
440] \ No newline at end of file
diff --git a/sahara/plugins/cdh/v5_13_0/resources/flume-service.json b/sahara/plugins/cdh/v5_13_0/resources/flume-service.json
new file mode 100644
index 0000000..4c73fe8
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/flume-service.json
@@ -0,0 +1,164 @@
1[
2 {
3 "desc": "Name of the HBase service that this Flume service instance depends on",
4 "display_name": "HBase Service",
5 "name": "hbase_service",
6 "value": null
7 },
8 {
9 "desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Flume might connect to. This is used when Flume is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
10 "display_name": "Flume TLS/SSL Certificate Trust Store File",
11 "name": "flume_truststore_file",
12 "value": null
13 },
14 {
15 "desc": "Whether to suppress the results of the Agent Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
16 "display_name": "Suppress Health Test: Agent Health",
17 "name": "service_health_suppression_flume_agents_healthy",
18 "value": "false"
19 },
20 {
21 "desc": "The user that this service's processes should run as.",
22 "display_name": "System User",
23 "name": "process_username",
24 "value": "flume"
25 },
26 {
27 "desc": "Name of the HDFS service that this Flume service instance depends on",
28 "display_name": "HDFS Service",
29 "name": "hdfs_service",
30 "value": null
31 },
32 {
33 "desc": "Whether to suppress configuration warnings produced by the Agent Count Validator configuration validator.",
34 "display_name": "Suppress Configuration Validator: Agent Count Validator",
35 "name": "service_config_suppression_agent_count_validator",
36 "value": "false"
37 },
38 {
39 "desc": "Name of the Solr service that this Flume service instance depends on",
40 "display_name": "Solr Service",
41 "name": "solr_service",
42 "value": null
43 },
44 {
45 "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
46 "display_name": "Flume Service Environment Advanced Configuration Snippet (Safety Valve)",
47 "name": "flume_env_safety_valve",
48 "value": null
49 },
50 {
51 "desc": "Sets the maximum number of Flume components that will be returned under Flume Metric Details. Increasing this value will negatively impact the interactive performance of the Flume Metrics Details page.",
52 "display_name": "Maximum displayed Flume metrics components",
53 "name": "flume_context_groups_request_limit",
54 "value": "1000"
55 },
56 {
57 "desc": "Kerberos principal short name used by all roles of this service.",
58 "display_name": "Kerberos Principal",
59 "name": "kerberos_princ_name",
60 "value": "flume"
61 },
62 {
63 "desc": "The health test thresholds of the overall Agent health. The check returns \"Concerning\" health if the percentage of \"Healthy\" Agents falls below the warning threshold. The check is unhealthy if the total percentage of \"Healthy\" and \"Concerning\" Agents falls below the critical threshold.",
64 "display_name": "Healthy Agent Monitoring Thresholds",
65 "name": "flume_agents_healthy_thresholds",
66 "value": "{\"critical\":\"never\",\"warning\":\"95.0\"}"
67 },
68 {
69 "desc": "The group that this service's processes should run as.",
70 "display_name": "System Group",
71 "name": "process_groupname",
72 "value": "flume"
73 },
74 {
75 "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
76 "display_name": "Enable Configuration Change Alerts",
77 "name": "enable_config_alerts",
78 "value": "false"
79 },
80 {
81 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
82 "display_name": "Suppress Parameter Validation: Service Triggers",
83 "name": "service_config_suppression_service_triggers",
84 "value": "false"
85 },
86 {
87 "desc": "The frequency in which the log4j event publication appender will retry sending undelivered log events to the Event server, in seconds",
88 "display_name": "Log Event Retry Frequency",
89 "name": "log_event_retry_frequency",
90 "value": "30"
91 },
92 {
93 "desc": "<p>The configured triggers for this service. This is a JSON-formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <b>(mandatory)</b> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <b>(mandatory)</b> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <b>(optional)</b> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <b> (optional)</b> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <b> (optional)</b> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
94 "display_name": "Service Triggers",
95 "name": "service_triggers",
96 "value": "[]"
97 },
98 {
99 "desc": "When set, each role identifies important log events and forwards them to Cloudera Manager.",
100 "display_name": "Enable Log Event Capture",
101 "name": "catch_events",
102 "value": "true"
103 },
104 {
105 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
106 "display_name": "Suppress Parameter Validation: System User",
107 "name": "service_config_suppression_process_username",
108 "value": "false"
109 },
110 {
111 "desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
112 "display_name": "Enable Service Level Health Alerts",
113 "name": "enable_alerts",
114 "value": "true"
115 },
116 {
117 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kerberos Principal parameter.",
118 "display_name": "Suppress Parameter Validation: Kerberos Principal",
119 "name": "service_config_suppression_kerberos_princ_name",
120 "value": "false"
121 },
122 {
123 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
124 "display_name": "Suppress Parameter Validation: System Group",
125 "name": "service_config_suppression_process_groupname",
126 "value": "false"
127 },
128 {
129 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume TLS/SSL Certificate Trust Store Password parameter.",
130 "display_name": "Suppress Parameter Validation: Flume TLS/SSL Certificate Trust Store Password",
131 "name": "service_config_suppression_flume_truststore_password",
132 "value": "false"
133 },
134 {
135 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
136 "display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
137 "name": "service_config_suppression_smon_derived_configs_safety_valve",
138 "value": "false"
139 },
140 {
141 "desc": "The password for the Flume TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
142 "display_name": "Flume TLS/SSL Certificate Trust Store Password",
143 "name": "flume_truststore_password",
144 "value": null
145 },
146 {
147 "desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
148 "display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
149 "name": "smon_derived_configs_safety_valve",
150 "value": null
151 },
152 {
153 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume TLS/SSL Certificate Trust Store File parameter.",
154 "display_name": "Suppress Parameter Validation: Flume TLS/SSL Certificate Trust Store File",
155 "name": "service_config_suppression_flume_truststore_file",
156 "value": "false"
157 },
158 {
159 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Flume Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
160 "display_name": "Suppress Parameter Validation: Flume Service Environment Advanced Configuration Snippet (Safety Valve)",
161 "name": "service_config_suppression_flume_env_safety_valve",
162 "value": "false"
163 }
164] \ No newline at end of file
diff --git a/sahara/plugins/cdh/v5_13_0/resources/hbase-gateway.json b/sahara/plugins/cdh/v5_13_0/resources/hbase-gateway.json
new file mode 100644
index 0000000..e419705
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/hbase-gateway.json
@@ -0,0 +1,122 @@
1[
2 {
3 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Deploy Directory parameter.",
4 "display_name": "Suppress Parameter Validation: Deploy Directory",
5 "name": "role_config_suppression_client_config_root_dir",
6 "value": "false"
7 },
8 {
9 "desc": "Scanner Timeout, in milliseconds, for HBase Clients. Scanner related RPCs will apply this timeout against the RegionServers they talk to.",
10 "display_name": "HBase Client Scanner Timeout",
11 "name": "hbase_client_scanner_timeout_period",
12 "value": "60000"
13 },
14 {
15 "desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
16 "display_name": "Gateway Logging Advanced Configuration Snippet (Safety Valve)",
17 "name": "log4j_safety_valve",
18 "value": null
19 },
20 {
21 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Client Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
22 "display_name": "Suppress Parameter Validation: HBase Client Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
23 "name": "role_config_suppression_hbase_client_config_safety_valve",
24 "value": "false"
25 },
26 {
27 "desc": "For advanced use only, a string to be inserted into the client configuration for <strong>hbase-site.xml</strong>.",
28 "display_name": "HBase Client Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
29 "name": "hbase_client_config_safety_valve",
30 "value": null
31 },
32 {
33 "desc": "If a multiget operation is performed with 'Consistency.TIMELINE', the read RPC is sent to the primary RegionServer first. After this timeout, a parallel RPC for secondary region replicas is also sent if the primary does not respond. After this, the result is returned from whichever RPC is finished first. If the response returns from the primary region replica, that the data is the most recent. Result.isStale() API has been added to inspect the staleness. If the result is from a secondary region, Result.isStale() is set to true.",
34 "display_name": "HBase Client Multiget Timeout For Secondary Region Replicas",
35 "name": "hbase_client_primaryCallTimeout_multiget",
36 "value": "10"
37 },
38 {
39 "desc": "The directory where the client configs will be deployed",
40 "display_name": "Deploy Directory",
41 "name": "client_config_root_dir",
42 "value": "/etc/hbase"
43 },
44 {
45 "desc": "Whether to enable interruption of RPC threads at the client. The default value of true enables primary RegionServers to access data from other regions' secondary replicas.",
46 "display_name": "Enable Client RPC Threads Interruption",
47 "name": "hbase_ipc_client_allowsInterrupt",
48 "value": "true"
49 },
50 {
51 "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into the client configuration for <strong>hbase-env.sh</strong>",
52 "display_name": "HBase Client Environment Advanced Configuration Snippet (Safety Valve) for hbase-env.sh",
53 "name": "hbase_client_env_safety_valve",
54 "value": null
55 },
56 {
57 "desc": "The priority level that the client configuration will have in the Alternatives system on the hosts. Higher priority levels will cause Alternatives to prefer this configuration over any others.",
58 "display_name": "Alternatives Priority",
59 "name": "client_config_priority",
60 "value": "90"
61 },
62 {
63 "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
64 "display_name": "Enable Configuration Change Alerts",
65 "name": "enable_config_alerts",
66 "value": "false"
67 },
68 {
69 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Client Environment Advanced Configuration Snippet (Safety Valve) for hbase-env.sh parameter.",
70 "display_name": "Suppress Parameter Validation: HBase Client Environment Advanced Configuration Snippet (Safety Valve) for hbase-env.sh",
71 "name": "role_config_suppression_hbase_client_env_safety_valve",
72 "value": "false"
73 },
74 {
75 "desc": "Whether to suppress configuration warnings produced by the HBase Client Scanner Timeout exceeds Lease Period Validator configuration validator.",
76 "display_name": "Suppress Configuration Validator: HBase Client Scanner Timeout exceeds Lease Period Validator",
77 "name": "role_config_suppression_client_server_scanner_rpc_timeout_validator",
78 "value": "false"
79 },
80 {
81 "desc": "If a get operation is performed with 'Consistency.TIMELINE', the read RPC is sent to the primary RegionServer first. After this timeout, parallel RPC for secondary region replicas is also sent if the primary does not respond. After this, the result is returned from whichever RPC is finished first. If the response returns from the primary region replica, that data is the most recent. Result.isStale() API has been added to inspect the staleness. If the result is from a secondary region, Result.isStale() is set to true.",
82 "display_name": "HBase Client Get Timeout For Secondary Region Replicas",
83 "name": "hbase_client_primaryCallTimeout_get",
84 "value": "10"
85 },
86 {
87 "desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
88 "display_name": "Suppress Configuration Validator: CDH Version Validator",
89 "name": "role_config_suppression_cdh_version_validator",
90 "value": "false"
91 },
92 {
93 "desc": "These are Java command-line arguments. Commonly, garbage collection flags, PermGen, or extra debugging flags would be passed here.",
94 "display_name": "Client Java Configuration Options",
95 "name": "hbase_client_java_opts",
96 "value": "-XX:+HeapDumpOnOutOfMemoryError -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -Djava.net.preferIPv4Stack=true"
97 },
98 {
99 "desc": "Maximum size in bytes for the Java process heap memory. Passed to Java -Xmx.",
100 "display_name": "Client Java Heap Size in Bytes",
101 "name": "hbase_client_java_heapsize",
102 "value": "268435456"
103 },
104 {
105 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Gateway Logging Advanced Configuration Snippet (Safety Valve) parameter.",
106 "display_name": "Suppress Parameter Validation: Gateway Logging Advanced Configuration Snippet (Safety Valve)",
107 "name": "role_config_suppression_log4j_safety_valve",
108 "value": "false"
109 },
110 {
111 "desc": "The minimum log level for Gateway logs",
112 "display_name": "Gateway Logging Threshold",
113 "name": "log_threshold",
114 "value": "INFO"
115 },
116 {
117 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Client Java Configuration Options parameter.",
118 "display_name": "Suppress Parameter Validation: Client Java Configuration Options",
119 "name": "role_config_suppression_hbase_client_java_opts",
120 "value": "false"
121 }
122] \ No newline at end of file
diff --git a/sahara/plugins/cdh/v5_13_0/resources/hbase-hbaserestserver.json b/sahara/plugins/cdh/v5_13_0/resources/hbase-hbaserestserver.json
new file mode 100644
index 0000000..6b88e0e
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/hbase-hbaserestserver.json
@@ -0,0 +1,452 @@
1[
2 {
3 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
4 "display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
5 "name": "log_directory_free_space_percentage_thresholds",
6 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
7 },
8 {
9 "desc": "<p>This file contains the rules that govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message. If a log message matches multiple rules, the first matching rule is used. </p><p>Each rule has some or all of the following fields:</p><ul><li><code>alert</code> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><code>rate</code> <b>(mandatory)</b> - the maximum number of log messages matching this rule that can be sent as events every minute. If more than <code>rate</code> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><code>periodminutes</code> - the number of minutes during which the publisher will only publish <code>rate</code> events or fewer. If not specified, the default is <b>one minute</b></li><li><code>threshold</code> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><code>content</code> - match only those messages for which contents match this regular expression.</li><li><code>exceptiontype</code> - match only those messages that are part of an exception message. The exception type must match this regular expression.</li></ul><p>Example:</p><ul><li><pre>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</pre>This rule sends events to Cloudera Manager for every <code>StringIndexOutOfBoundsException</code>, up to a maximum of 10 every minute.</li><li><pre>{\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"exceptiontype\": \".*\"}, {\"alert\": true, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"ERROR\"}</pre>In this example, an event generated may not be promoted to alert if an exception is in the ERROR log message, because the first rule with alert = false will match.</li></ul>",
10 "display_name": "Rules to Extract Events from Log Files",
11 "name": "log_event_whitelist",
12 "value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
13 },
14 {
15 "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags, PermGen, or extra debugging flags would be passed here.",
16 "display_name": "Java Configuration Options for HBase REST Server",
17 "name": "hbase_restserver_java_opts",
18 "value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
19 },
20 {
21 "desc": "Whether or not periodic stacks collection is enabled.",
22 "display_name": "Stacks Collection Enabled",
23 "name": "stacks_collection_enabled",
24 "value": "false"
25 },
26 {
27 "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
28 "display_name": "Maximum Process File Descriptors",
29 "name": "rlimit_fds",
30 "value": null
31 },
32 {
33 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server TLS/SSL Server JKS Keystore File Location parameter.",
34 "display_name": "Suppress Parameter Validation: HBase REST Server TLS/SSL Server JKS Keystore File Location",
35 "name": "role_config_suppression_hbase_restserver_keystore_file",
36 "value": "false"
37 },
38 {
39 "desc": "Maximum size of the HBase REST Server thread pool. The server can process this number of concurrent requests. Setting this too high can lead to out of memory errors.",
40 "display_name": "HBase REST Server Maximum Threads",
41 "name": "hbase_restserver_threads_max",
42 "value": "100"
43 },
44 {
45 "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
46 "display_name": "File Descriptor Monitoring Thresholds",
47 "name": "hbaserestserver_fd_thresholds",
48 "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
49 },
50 {
51 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Log Directory parameter.",
52 "display_name": "Suppress Parameter Validation: HBase REST Server Log Directory",
53 "name": "role_config_suppression_hbase_restserver_log_dir",
54 "value": "false"
55 },
56 {
57 "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
58 "display_name": "HBase REST Server Environment Advanced Configuration Snippet (Safety Valve)",
59 "name": "HBASERESTSERVER_role_env_safety_valve",
60 "value": null
61 },
62 {
63 "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
64 "display_name": "Cgroup I/O Weight",
65 "name": "rm_io_weight",
66 "value": "500"
67 },
68 {
69 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
70 "display_name": "Suppress Parameter Validation: HBase REST Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
71 "name": "role_config_suppression_hbase_restserver_config_safety_valve",
72 "value": "false"
73 },
74 {
75 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server TLS/SSL Server JKS Keystore File Password parameter.",
76 "display_name": "Suppress Parameter Validation: HBase REST Server TLS/SSL Server JKS Keystore File Password",
77 "name": "role_config_suppression_hbase_restserver_keystore_password",
78 "value": "false"
79 },
80 {
81 "desc": "The frequency with which stacks are collected.",
82 "display_name": "Stacks Collection Frequency",
83 "name": "stacks_collection_frequency",
84 "value": "5.0"
85 },
86 {
87 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
88 "display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
89 "name": "heap_dump_directory_free_space_absolute_thresholds",
90 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
91 },
92 {
93 "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
94 "display_name": "Unexpected Exits Thresholds",
95 "name": "unexpected_exits_thresholds",
96 "value": "{\"critical\":\"any\",\"warning\":\"never\"}"
97 },
98 {
99 "desc": "The health test thresholds on the swap memory usage of the process.",
100 "display_name": "Process Swap Memory Thresholds",
101 "name": "process_swap_memory_thresholds",
102 "value": "{\"critical\":\"never\",\"warning\":\"any\"}"
103 },
104 {
105 "desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
106 "display_name": "Stacks Collection Method",
107 "name": "stacks_collection_method",
108 "value": "jstack"
109 },
110 {
111 "desc": "Whether to suppress the results of the Audit Pipeline Test heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
112 "display_name": "Suppress Health Test: Audit Pipeline Test",
113 "name": "role_health_suppression_hbase_rest_server_audit_health",
114 "value": "false"
115 },
116 {
117 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
118 "display_name": "Suppress Parameter Validation: HBase REST Server Environment Advanced Configuration Snippet (Safety Valve)",
119 "name": "role_config_suppression_hbaserestserver_role_env_safety_valve",
120 "value": "false"
121 },
122 {
123 "desc": "For advanced use only. A string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
124 "display_name": "HBase REST Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
125 "name": "hbase_restserver_config_safety_valve",
126 "value": null
127 },
128 {
129 "desc": "The password for the HBase REST Server JKS keystore file.",
130 "display_name": "HBase REST Server TLS/SSL Server JKS Keystore File Password",
131 "name": "hbase_restserver_keystore_password",
132 "value": null
133 },
134 {
135 "desc": "The maximum number of rolled log files to keep for HBase REST Server logs. Typically used by log4j or logback.",
136 "display_name": "HBase REST Server Maximum Log File Backups",
137 "name": "max_log_backup_index",
138 "value": "10"
139 },
140 {
141 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
142 "display_name": "Suppress Parameter Validation: HBase REST Server Logging Advanced Configuration Snippet (Safety Valve)",
143 "name": "role_config_suppression_log4j_safety_valve",
144 "value": "false"
145 },
146 {
147 "desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
148 "display_name": "Suppress Health Test: Process Status",
149 "name": "role_health_suppression_hbase_rest_server_scm_health",
150 "value": "false"
151 },
152 {
153 "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
154 "display_name": "Cgroup Memory Soft Limit",
155 "name": "rm_memory_soft_limit",
156 "value": "-1"
157 },
158 {
159 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
160 "display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
161 "name": "log_directory_free_space_absolute_thresholds",
162 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
163 },
164 {
165 "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
166 "display_name": "Java Heap Size of HBase REST Server in Bytes",
167 "name": "hbase_restserver_java_heapsize",
168 "value": "1073741824"
169 },
170 {
171 "desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
172 "display_name": "Suppress Health Test: Heap Dump Directory Free Space",
173 "name": "role_health_suppression_hbase_rest_server_heap_dump_directory_free_space",
174 "value": "false"
175 },
176 {
177 "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
178 "display_name": "Automatically Restart Process",
179 "name": "process_auto_restart",
180 "value": "false"
181 },
182 {
183 "desc": "Encrypt communication between clients and HBase REST Server using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
184 "display_name": "Enable TLS/SSL for HBase REST Server",
185 "name": "hbase_restserver_ssl_enable",
186 "value": "false"
187 },
188 {
189 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
190 "display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
191 "name": "role_config_suppression_log_event_whitelist",
192 "value": "false"
193 },
194 {
195 "desc": "Enables the health test that the HBase REST Server's process state is consistent with the role configuration",
196 "display_name": "HBase REST Server Process Health Test",
197 "name": "hbaserestserver_scm_health_enabled",
198 "value": "true"
199 },
200 {
201 "desc": "The port that HBase REST Server binds to.",
202 "display_name": "HBase REST Server Port",
203 "name": "hbase_restserver_port",
204 "value": "20550"
205 },
206 {
207 "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
208 "display_name": "Cgroup Memory Hard Limit",
209 "name": "rm_memory_hard_limit",
210 "value": "-1"
211 },
212 {
213 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server DNS Name Server parameter.",
214 "display_name": "Suppress Parameter Validation: HBase REST Server DNS Name Server",
215 "name": "role_config_suppression_hbase_restserver_dns_nameserver",
216 "value": "false"
217 },
218 {
219 "desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
220 "display_name": "Suppress Configuration Validator: CDH Version Validator",
221 "name": "role_config_suppression_cdh_version_validator",
222 "value": "false"
223 },
224 {
225 "desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
226 "display_name": "Suppress Health Test: File Descriptors",
227 "name": "role_health_suppression_hbase_rest_server_file_descriptor",
228 "value": "false"
229 },
230 {
231 "desc": "If true, HBase REST Server Web UI will bind to a wildcard address (0.0.0.0). Otherwise it will bind to a host name. Only available in CDH 4.3 and later.",
232 "display_name": "HBase REST Server Web UI Bind to Wildcard Address",
233 "name": "hbase_restserver_info_bind_to_wildcard",
234 "value": "true"
235 },
236 {
237 "desc": "Cloudera Manager agent monitors each service and each of its role by publishing metrics to the Cloudera Manager Service Monitor. Setting it to false will stop Cloudera Manager agent from publishing any metric for corresponding service/roles. This is usually helpful for services that generate large amount of metrics which Service Monitor is not able to process.",
238 "display_name": "Enable Metric Collection",
239 "name": "process_should_monitor",
240 "value": "true"
241 },
242 {
243 "desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
244 "display_name": "Stacks Collection Data Retention",
245 "name": "stacks_collection_data_retention",
246 "value": "104857600"
247 },
248 {
249 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
250 "display_name": "Suppress Parameter Validation: Heap Dump Directory",
251 "name": "role_config_suppression_oom_heap_dump_dir",
252 "value": "false"
253 },
254 {
255 "desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
256 "display_name": "HBase REST Server Logging Advanced Configuration Snippet (Safety Valve)",
257 "name": "log4j_safety_valve",
258 "value": null
259 },
260 {
261 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
262 "display_name": "Suppress Parameter Validation: Role Triggers",
263 "name": "role_config_suppression_role_triggers",
264 "value": "false"
265 },
266 {
267 "desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when HBase REST Server is acting as a TLS/SSL server. The keystore must be in JKS format.",
268 "display_name": "HBase REST Server TLS/SSL Server JKS Keystore File Location",
269 "name": "hbase_restserver_keystore_file",
270 "value": null
271 },
272 {
273 "desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
274 "display_name": "Suppress Health Test: Swap Memory Usage",
275 "name": "role_health_suppression_hbase_rest_server_swap_memory_usage",
276 "value": "false"
277 },
278 {
279 "desc": "When false, all HTTP methods are permitted (GET/PUT/POST/DELETE). When true, only GET is permitted.",
280 "display_name": "Enable HBase REST Server Read Only Mode",
281 "name": "hbase_restserver_readonly",
282 "value": "false"
283 },
284 {
285 "desc": "The port that HBase REST Server Web UI binds to.",
286 "display_name": "HBase REST Server Web UI Port",
287 "name": "hbase_restserver_info_port",
288 "value": "8085"
289 },
290 {
291 "desc": "Directory where HBase REST Server will place its log files.",
292 "display_name": "HBase REST Server Log Directory",
293 "name": "hbase_restserver_log_dir",
294 "value": "/var/log/hbase"
295 },
296 {
297 "desc": "When computing the overall HBase REST Server health, consider the host's health.",
298 "display_name": "HBase REST Server Host Health Test",
299 "name": "hbaserestserver_host_health_enabled",
300 "value": "true"
301 },
302 {
303 "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
304 "display_name": "Enable Configuration Change Alerts",
305 "name": "enable_config_alerts",
306 "value": "false"
307 },
308 {
309 "desc": "The password that protects the private key contained in the JKS keystore used when HBase REST Server is acting as a TLS/SSL server.",
310 "display_name": "HBase REST Server TLS/SSL Server JKS Keystore Key Password",
311 "name": "hbase_restserver_keystore_keypassword",
312 "value": null
313 },
314 {
315 "desc": "The maximum size, in megabytes, per log file for HBase REST Server logs. Typically used by log4j or logback.",
316 "display_name": "HBase REST Server Max Log Size",
317 "name": "max_log_size",
318 "value": "200"
319 },
320 {
321 "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
322 "display_name": "Kill When Out of Memory",
323 "name": "oom_sigkill_enabled",
324 "value": "true"
325 },
326 {
327 "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
328 "display_name": "Dump Heap When Out of Memory",
329 "name": "oom_heap_dump_enabled",
330 "value": "true"
331 },
332 {
333 "desc": "The host name or IP address of the DNS name server which an HBase REST Server should use to determine the host name used for communication and display purposes.",
334 "display_name": "HBase REST Server DNS Name Server",
335 "name": "hbase_restserver_dns_nameserver",
336 "value": null
337 },
338 {
339 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HBase REST Server parameter.",
340 "display_name": "Suppress Parameter Validation: Java Configuration Options for HBase REST Server",
341 "name": "role_config_suppression_hbase_restserver_java_opts",
342 "value": "false"
343 },
344 {
345 "desc": "HBase REST Server will bind to this address.",
346 "display_name": "HBase REST Server Host Address",
347 "name": "hbase_restserver_host",
348 "value": "0.0.0.0"
349 },
350 {
351 "desc": "<p>The configured triggers for this role. This is a JSON-formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <b>(mandatory)</b> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <b>(mandatory)</b> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <b>(optional)</b> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <b> (optional)</b> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <b> (optional)</b> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
352 "display_name": "Role Triggers",
353 "name": "role_triggers",
354 "value": "[]"
355 },
356 {
357 "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
358 "display_name": "Enable Health Alerts for this Role",
359 "name": "enable_alerts",
360 "value": "true"
361 },
362 {
363 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server Host Address parameter.",
364 "display_name": "Suppress Parameter Validation: HBase REST Server Host Address",
365 "name": "role_config_suppression_hbase_restserver_host",
366 "value": "false"
367 },
368 {
369 "desc": "The name of the DNS network interface from which an HBase REST Server should report its IP address.",
370 "display_name": "HBase REST Server DNS Network Interface",
371 "name": "hbase_restserver_dns_interface",
372 "value": null
373 },
374 {
375 "desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <code>stacks</code> subdirectory of the role's log directory.",
376 "display_name": "Stacks Collection Directory",
377 "name": "stacks_collection_directory",
378 "value": null
379 },
380 {
381 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server DNS Network Interface parameter.",
382 "display_name": "Suppress Parameter Validation: HBase REST Server DNS Network Interface",
383 "name": "role_config_suppression_hbase_restserver_dns_interface",
384 "value": "false"
385 },
386 {
387 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
388 "display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
389 "name": "heap_dump_directory_free_space_percentage_thresholds",
390 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
391 },
392 {
393 "desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
394 "display_name": "Suppress Health Test: Log Directory Free Space",
395 "name": "role_health_suppression_hbase_rest_server_log_directory_free_space",
396 "value": "false"
397 },
398 {
399 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase REST Server TLS/SSL Server JKS Keystore Key Password parameter.",
400 "display_name": "Suppress Parameter Validation: HBase REST Server TLS/SSL Server JKS Keystore Key Password",
401 "name": "role_config_suppression_hbase_restserver_keystore_keypassword",
402 "value": "false"
403 },
404 {
405 "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
406 "display_name": "Cgroup CPU Shares",
407 "name": "rm_cpu_shares",
408 "value": "1024"
409 },
410 {
411 "desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
412 "display_name": "Suppress Health Test: Host Health",
413 "name": "role_health_suppression_hbase_rest_server_host_health",
414 "value": "false"
415 },
416 {
417 "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
418 "display_name": "Heap Dump Directory",
419 "name": "oom_heap_dump_dir",
420 "value": "/tmp"
421 },
422 {
423 "desc": "The period to review when computing unexpected exits.",
424 "display_name": "Unexpected Exits Monitoring Period",
425 "name": "unexpected_exits_window",
426 "value": "5"
427 },
428 {
429 "desc": "Minimum size of the HBase REST Server thread pool. The server will maintain at least this number of threads in the pool at all times. The thread pool can grow up to the maximum size set by hbase.rest.threads.max.",
430 "display_name": "HBase REST Server Minimum Threads",
431 "name": "hbase_restserver_threads_min",
432 "value": "2"
433 },
434 {
435 "desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
436 "display_name": "Suppress Health Test: Unexpected Exits",
437 "name": "role_health_suppression_hbase_rest_server_unexpected_exits",
438 "value": "false"
439 },
440 {
441 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
442 "display_name": "Suppress Parameter Validation: Stacks Collection Directory",
443 "name": "role_config_suppression_stacks_collection_directory",
444 "value": "false"
445 },
446 {
447 "desc": "The minimum log level for HBase REST Server logs",
448 "display_name": "HBase REST Server Logging Threshold",
449 "name": "log_threshold",
450 "value": "INFO"
451 }
452] \ No newline at end of file
diff --git a/sahara/plugins/cdh/v5_13_0/resources/hbase-hbasethriftserver.json b/sahara/plugins/cdh/v5_13_0/resources/hbase-hbasethriftserver.json
new file mode 100644
index 0000000..84838cc
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/hbase-hbasethriftserver.json
@@ -0,0 +1,458 @@
1[
2 {
3 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
4 "display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
5 "name": "log_directory_free_space_percentage_thresholds",
6 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
7 },
8 {
9 "desc": "<p>This file contains the rules that govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message. If a log message matches multiple rules, the first matching rule is used. </p><p>Each rule has some or all of the following fields:</p><ul><li><code>alert</code> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><code>rate</code> <b>(mandatory)</b> - the maximum number of log messages matching this rule that can be sent as events every minute. If more than <code>rate</code> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><code>periodminutes</code> - the number of minutes during which the publisher will only publish <code>rate</code> events or fewer. If not specified, the default is <b>one minute</b></li><li><code>threshold</code> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><code>content</code> - match only those messages for which contents match this regular expression.</li><li><code>exceptiontype</code> - match only those messages that are part of an exception message. The exception type must match this regular expression.</li></ul><p>Example:</p><ul><li><pre>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</pre>This rule sends events to Cloudera Manager for every <code>StringIndexOutOfBoundsException</code>, up to a maximum of 10 every minute.</li><li><pre>{\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"exceptiontype\": \".*\"}, {\"alert\": true, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"ERROR\"}</pre>In this example, an event generated may not be promoted to alert if an exception is in the ERROR log message, because the first rule with alert = false will match.</li></ul>",
10 "display_name": "Rules to Extract Events from Log Files",
11 "name": "log_event_whitelist",
12 "value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
13 },
14 {
15 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Environment Advanced Configuration Snippet (Safety Valve) parameter.",
16 "display_name": "Suppress Parameter Validation: HBase Thrift Server Environment Advanced Configuration Snippet (Safety Valve)",
17 "name": "role_config_suppression_hbasethriftserver_role_env_safety_valve",
18 "value": "false"
19 },
20 {
21 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HBase Thrift Server parameter.",
22 "display_name": "Suppress Parameter Validation: Java Configuration Options for HBase Thrift Server",
23 "name": "role_config_suppression_hbase_thriftserver_java_opts",
24 "value": "false"
25 },
26 {
27 "desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
28 "display_name": "Suppress Health Test: Unexpected Exits",
29 "name": "role_health_suppression_hbase_thrift_server_unexpected_exits",
30 "value": "false"
31 },
32 {
33 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server DNS Network Interface parameter.",
34 "display_name": "Suppress Parameter Validation: HBase Thrift Server DNS Network Interface",
35 "name": "role_config_suppression_hbase_thriftserver_dns_interface",
36 "value": "false"
37 },
38 {
39 "desc": "The \"core size\" of the thread pool. New threads are created on every connection until this many threads are created.",
40 "display_name": "HBase Thrift Server Min Worker Threads",
41 "name": "hbase_thriftserver_min_worker_threads",
42 "value": "200"
43 },
44 {
45 "desc": "The period to review when computing unexpected exits.",
46 "display_name": "Unexpected Exits Monitoring Period",
47 "name": "unexpected_exits_window",
48 "value": "5"
49 },
50 {
51 "desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
52 "display_name": "Suppress Health Test: Host Health",
53 "name": "role_health_suppression_hbase_thrift_server_host_health",
54 "value": "false"
55 },
56 {
57 "desc": "The port that HBase Thrift Server binds to.",
58 "display_name": "HBase Thrift Server Port",
59 "name": "hbase_thriftserver_port",
60 "value": "9090"
61 },
62 {
63 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
64 "display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
65 "name": "heap_dump_directory_free_space_percentage_thresholds",
66 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
67 },
68 {
69 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Bind Address parameter.",
70 "display_name": "Suppress Parameter Validation: HBase Thrift Server Bind Address",
71 "name": "role_config_suppression_hbase_thriftserver_bindaddress",
72 "value": "false"
73 },
74 {
75 "desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
76 "display_name": "Suppress Health Test: File Descriptors",
77 "name": "role_health_suppression_hbase_thrift_server_file_descriptor",
78 "value": "false"
79 },
80 {
81 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server DNS Name Server parameter.",
82 "display_name": "Suppress Parameter Validation: HBase Thrift Server DNS Name Server",
83 "name": "role_config_suppression_hbase_thriftserver_dns_nameserver",
84 "value": "false"
85 },
86 {
87 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
88 "display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
89 "name": "heap_dump_directory_free_space_absolute_thresholds",
90 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
91 },
92 {
93 "desc": "The health test thresholds on the swap memory usage of the process.",
94 "display_name": "Process Swap Memory Thresholds",
95 "name": "process_swap_memory_thresholds",
96 "value": "{\"critical\":\"never\",\"warning\":\"any\"}"
97 },
98 {
99 "desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
100 "display_name": "Stacks Collection Method",
101 "name": "stacks_collection_method",
102 "value": "jstack"
103 },
104 {
105 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Log Directory parameter.",
106 "display_name": "Suppress Parameter Validation: HBase Thrift Server Log Directory",
107 "name": "role_config_suppression_hbase_thriftserver_log_dir",
108 "value": "false"
109 },
110 {
111 "desc": "Whether or not periodic stacks collection is enabled.",
112 "display_name": "Stacks Collection Enabled",
113 "name": "stacks_collection_enabled",
114 "value": "false"
115 },
116 {
117 "desc": "The maximum number of rolled log files to keep for HBase Thrift Server logs. Typically used by log4j or logback.",
118 "display_name": "HBase Thrift Server Maximum Log File Backups",
119 "name": "max_log_backup_index",
120 "value": "10"
121 },
122 {
123 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Logging Advanced Configuration Snippet (Safety Valve) parameter.",
124 "display_name": "Suppress Parameter Validation: HBase Thrift Server Logging Advanced Configuration Snippet (Safety Valve)",
125 "name": "role_config_suppression_log4j_safety_valve",
126 "value": "false"
127 },
128 {
129 "desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
130 "display_name": "Suppress Health Test: Log Directory Free Space",
131 "name": "role_health_suppression_hbase_thrift_server_log_directory_free_space",
132 "value": "false"
133 },
134 {
135 "desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when HBase Thrift Server over HTTP is acting as a TLS/SSL server. The keystore must be in JKS format.",
136 "display_name": "HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Location",
137 "name": "hbase_thriftserver_http_keystore_file",
138 "value": null
139 },
140 {
141 "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
142 "display_name": "Java Heap Size of HBase Thrift Server in Bytes",
143 "name": "hbase_thriftserver_java_heapsize",
144 "value": "1073741824"
145 },
146 {
147 "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
148 "display_name": "Cgroup Memory Soft Limit",
149 "name": "rm_memory_soft_limit",
150 "value": "-1"
151 },
152 {
153 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
154 "display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
155 "name": "log_directory_free_space_absolute_thresholds",
156 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
157 },
158 {
159 "desc": "The password for the HBase Thrift Server over HTTP JKS keystore file.",
160 "display_name": "HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Password",
161 "name": "hbase_thriftserver_http_keystore_password",
162 "value": null
163 },
164 {
165 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
166 "display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
167 "name": "role_config_suppression_log_event_whitelist",
168 "value": "false"
169 },
170 {
171 "desc": "If true, HBase Thrift Server Web UI will bind to a wildcard address (0.0.0.0). Otherwise it will bind to a host name. Only available in CDH 4.3 and later.",
172 "display_name": "HBase Thrift Server Web UI Bind to Wildcard Address",
173 "name": "hbase_thriftserver_info_bind_to_wildcard",
174 "value": "true"
175 },
176 {
177 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Location parameter.",
178 "display_name": "Suppress Parameter Validation: HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Location",
179 "name": "role_config_suppression_hbase_thriftserver_http_keystore_file",
180 "value": "false"
181 },
182 {
183 "desc": "Address to bind the HBase Thrift Server to. When using the THsHaServer or the TNonblockingServer, always binds to 0.0.0.0 irrespective of this configuration value.",
184 "display_name": "HBase Thrift Server Bind Address",
185 "name": "hbase_thriftserver_bindaddress",
186 "value": "0.0.0.0"
187 },
188 {
189 "desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
190 "display_name": "Suppress Configuration Validator: CDH Version Validator",
191 "name": "role_config_suppression_cdh_version_validator",
192 "value": "false"
193 },
194 {
195 "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
196 "display_name": "Cgroup Memory Hard Limit",
197 "name": "rm_memory_hard_limit",
198 "value": "-1"
199 },
200 {
201 "desc": "Type of HBase Thrift Server.",
202 "display_name": "HBase Thrift Server Type",
203 "name": "hbase_thriftserver_type",
204 "value": "threadpool"
205 },
206 {
207 "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
208 "display_name": "Automatically Restart Process",
209 "name": "process_auto_restart",
210 "value": "false"
211 },
212 {
213 "desc": "Cloudera Manager agent monitors each service and each of its role by publishing metrics to the Cloudera Manager Service Monitor. Setting it to false will stop Cloudera Manager agent from publishing any metric for corresponding service/roles. This is usually helpful for services that generate large amount of metrics which Service Monitor is not able to process.",
214 "display_name": "Enable Metric Collection",
215 "name": "process_should_monitor",
216 "value": "true"
217 },
218 {
219 "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
220 "display_name": "Cgroup I/O Weight",
221 "name": "rm_io_weight",
222 "value": "500"
223 },
224 {
225 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Password parameter.",
226 "display_name": "Suppress Parameter Validation: HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore File Password",
227 "name": "role_config_suppression_hbase_thriftserver_http_keystore_password",
228 "value": "false"
229 },
230 {
231 "desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
232 "display_name": "Stacks Collection Data Retention",
233 "name": "stacks_collection_data_retention",
234 "value": "104857600"
235 },
236 {
237 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
238 "display_name": "Suppress Parameter Validation: Heap Dump Directory",
239 "name": "role_config_suppression_oom_heap_dump_dir",
240 "value": "false"
241 },
242 {
243 "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
244 "display_name": "HBase Thrift Server Environment Advanced Configuration Snippet (Safety Valve)",
245 "name": "HBASETHRIFTSERVER_role_env_safety_valve",
246 "value": null
247 },
248 {
249 "desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
250 "display_name": "HBase Thrift Server Logging Advanced Configuration Snippet (Safety Valve)",
251 "name": "log4j_safety_valve",
252 "value": null
253 },
254 {
255 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
256 "display_name": "Suppress Parameter Validation: Role Triggers",
257 "name": "role_config_suppression_role_triggers",
258 "value": "false"
259 },
260 {
261 "desc": "When computing the overall HBase Thrift Server health, consider the host's health.",
262 "display_name": "HBase Thrift Server Host Health Test",
263 "name": "hbasethriftserver_host_health_enabled",
264 "value": "true"
265 },
266 {
267 "desc": "The password that protects the private key contained in the JKS keystore used when HBase Thrift Server over HTTP is acting as a TLS/SSL server.",
268 "display_name": "HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore Key Password",
269 "name": "hbase_thriftserver_http_keystore_keypassword",
270 "value": null
271 },
272 {
273 "desc": "Enables the health test that the HBase Thrift Server's process state is consistent with the role configuration",
274 "display_name": "HBase Thrift Server Process Health Test",
275 "name": "hbasethriftserver_scm_health_enabled",
276 "value": "true"
277 },
278 {
279 "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags, PermGen, or extra debugging flags would be passed here.",
280 "display_name": "Java Configuration Options for HBase Thrift Server",
281 "name": "hbase_thriftserver_java_opts",
282 "value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
283 },
284 {
285 "desc": "Use framed transport. When using the THsHaServer or TNonblockingServer, framed transport is always used irrespective of this configuration value.",
286 "display_name": "Enable HBase Thrift Server Framed Transport",
287 "name": "hbase_thriftserver_framed",
288 "value": "false"
289 },
290 {
291 "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
292 "display_name": "Maximum Process File Descriptors",
293 "name": "rlimit_fds",
294 "value": null
295 },
296 {
297 "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
298 "display_name": "Enable Configuration Change Alerts",
299 "name": "enable_config_alerts",
300 "value": "false"
301 },
302 {
303 "desc": "The maximum size, in megabytes, per log file for HBase Thrift Server logs. Typically used by log4j or logback.",
304 "display_name": "HBase Thrift Server Max Log Size",
305 "name": "max_log_size",
306 "value": "200"
307 },
308 {
309 "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
310 "display_name": "Kill When Out of Memory",
311 "name": "oom_sigkill_enabled",
312 "value": "true"
313 },
314 {
315 "desc": "The port that HBase Thrift Server Web UI binds to.",
316 "display_name": "HBase Thrift Server Web UI Port",
317 "name": "hbase_thriftserver_info_port",
318 "value": "9095"
319 },
320 {
321 "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
322 "display_name": "Dump Heap When Out of Memory",
323 "name": "oom_heap_dump_enabled",
324 "value": "true"
325 },
326 {
327 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore Key Password parameter.",
328 "display_name": "Suppress Parameter Validation: HBase Thrift Server over HTTP TLS/SSL Server JKS Keystore Key Password",
329 "name": "role_config_suppression_hbase_thriftserver_http_keystore_keypassword",
330 "value": "false"
331 },
332 {
333 "desc": "Whether to suppress the results of the Audit Pipeline Test heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
334 "display_name": "Suppress Health Test: Audit Pipeline Test",
335 "name": "role_health_suppression_hbase_thrift_server_audit_health",
336 "value": "false"
337 },
338 {
339 "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
340 "display_name": "Unexpected Exits Thresholds",
341 "name": "unexpected_exits_thresholds",
342 "value": "{\"critical\":\"any\",\"warning\":\"never\"}"
343 },
344 {
345 "desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
346 "display_name": "Suppress Health Test: Swap Memory Usage",
347 "name": "role_health_suppression_hbase_thrift_server_swap_memory_usage",
348 "value": "false"
349 },
350 {
351 "desc": "<p>The configured triggers for this role. This is a JSON-formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <b>(mandatory)</b> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <b>(mandatory)</b> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <b>(optional)</b> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <b> (optional)</b> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <b> (optional)</b> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
352 "display_name": "Role Triggers",
353 "name": "role_triggers",
354 "value": "[]"
355 },
356 {
357 "desc": "The host name or IP address of the DNS name server which an HBase Thrift Server should use to determine the host name used for communication and display purposes.",
358 "display_name": "HBase Thrift Server DNS Name Server",
359 "name": "hbase_thriftserver_dns_nameserver",
360 "value": null
361 },
362 {
363 "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
364 "display_name": "Enable Health Alerts for this Role",
365 "name": "enable_alerts",
366 "value": "true"
367 },
368 {
369 "desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
370 "display_name": "Suppress Health Test: Process Status",
371 "name": "role_health_suppression_hbase_thrift_server_scm_health",
372 "value": "false"
373 },
374 {
375 "desc": "Use the TCompactProtocol instead of the default TBinaryProtocol. TCompactProtocol is a binary protocol that is more compact than the default and typically more efficient.",
376 "display_name": "Enable HBase Thrift Server Compact Protocol",
377 "name": "hbase_thriftserver_compact",
378 "value": "false"
379 },
380 {
381 "desc": "The name of the DNS network interface from which an HBase Thrift Server should report its IP address.",
382 "display_name": "HBase Thrift Server DNS Network Interface",
383 "name": "hbase_thriftserver_dns_interface",
384 "value": null
385 },
386 {
387 "desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <code>stacks</code> subdirectory of the role's log directory.",
388 "display_name": "Stacks Collection Directory",
389 "name": "stacks_collection_directory",
390 "value": null
391 },
392 {
393 "desc": "Directory where HBase Thrift Server will place its log files.",
394 "display_name": "HBase Thrift Server Log Directory",
395 "name": "hbase_thriftserver_log_dir",
396 "value": "/var/log/hbase"
397 },
398 {
399 "desc": "For advanced use only. A string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
400 "display_name": "HBase Thrift Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
401 "name": "hbase_thriftserver_config_safety_valve",
402 "value": null
403 },
404 {
405 "desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
406 "display_name": "Suppress Health Test: Heap Dump Directory Free Space",
407 "name": "role_health_suppression_hbase_thrift_server_heap_dump_directory_free_space",
408 "value": "false"
409 },
410 {
411 "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
412 "display_name": "File Descriptor Monitoring Thresholds",
413 "name": "hbasethriftserver_fd_thresholds",
414 "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
415 },
416 {
417 "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
418 "display_name": "Cgroup CPU Shares",
419 "name": "rm_cpu_shares",
420 "value": "1024"
421 },
422 {
423 "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
424 "display_name": "Heap Dump Directory",
425 "name": "oom_heap_dump_dir",
426 "value": "/tmp"
427 },
428 {
429 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Thrift Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
430 "display_name": "Suppress Parameter Validation: HBase Thrift Server Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
431 "name": "role_config_suppression_hbase_thriftserver_config_safety_valve",
432 "value": "false"
433 },
434 {
435 "desc": "The frequency with which stacks are collected.",
436 "display_name": "Stacks Collection Frequency",
437 "name": "stacks_collection_frequency",
438 "value": "5.0"
439 },
440 {
441 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
442 "display_name": "Suppress Parameter Validation: Stacks Collection Directory",
443 "name": "role_config_suppression_stacks_collection_directory",
444 "value": "false"
445 },
446 {
447 "desc": "The minimum log level for HBase Thrift Server logs",
448 "display_name": "HBase Thrift Server Logging Threshold",
449 "name": "log_threshold",
450 "value": "INFO"
451 },
452 {
453 "desc": "Encrypt communication between clients and HBase Thrift Server over HTTP using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
454 "display_name": "Enable TLS/SSL for HBase Thrift Server over HTTP",
455 "name": "hbase_thriftserver_http_use_ssl",
456 "value": "false"
457 }
458] \ No newline at end of file
diff --git a/sahara/plugins/cdh/v5_13_0/resources/hbase-master.json b/sahara/plugins/cdh/v5_13_0/resources/hbase-master.json
new file mode 100644
index 0000000..074246b
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/hbase-master.json
@@ -0,0 +1,530 @@
1[
2 {
3 "desc": "Whether or not periodic stacks collection is enabled.",
4 "display_name": "Stacks Collection Enabled",
5 "name": "stacks_collection_enabled",
6 "value": "false"
7 },
8 {
9 "desc": "Directory where Master will place its log files.",
10 "display_name": "Master Log Directory",
11 "name": "hbase_master_log_dir",
12 "value": "/var/log/hbase"
13 },
14 {
15 "desc": "The health test thresholds on the duration of the metrics request to the web server.",
16 "display_name": "Web Metric Collection Duration",
17 "name": "master_web_metric_collection_thresholds",
18 "value": "{\"critical\":\"never\",\"warning\":\"10000.0\"}"
19 },
20 {
21 "desc": "Whether to suppress the results of the Log Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
22 "display_name": "Suppress Health Test: Log Directory Free Space",
23 "name": "role_health_suppression_master_log_directory_free_space",
24 "value": "false"
25 },
26 {
27 "desc": "Maximum size in bytes for the Java Process heap memory. Passed to Java -Xmx.",
28 "display_name": "Java Heap Size of HBase Master in Bytes",
29 "name": "hbase_master_java_heapsize",
30 "value": "1073741824"
31 },
32 {
33 "desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
34 "display_name": "Master Logging Advanced Configuration Snippet (Safety Valve)",
35 "name": "log4j_safety_valve",
36 "value": null
37 },
38 {
39 "desc": "When true, HBase Master will bind to 0.0.0.0. Only available with CDH 4.3 and later.",
40 "display_name": "HBase Master Bind to Wildcard Address",
41 "name": "hbase_master_bind_to_wildcard_address",
42 "value": "true"
43 },
44 {
45 "desc": "The period to review when computing unexpected exits.",
46 "display_name": "Unexpected Exits Monitoring Period",
47 "name": "unexpected_exits_window",
48 "value": "5"
49 },
50 {
51 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master Web UI Address parameter.",
52 "display_name": "Suppress Parameter Validation: HBase Master Web UI Address",
53 "name": "role_config_suppression_hbase_master_info_bindaddress",
54 "value": "false"
55 },
56 {
57 "desc": "The port for the HBase Master web UI. Set to -1 to disable the HBase Master web UI.",
58 "display_name": "HBase Master Web UI Port",
59 "name": "hbase_master_info_port",
60 "value": "60010"
61 },
62 {
63 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
64 "display_name": "Suppress Parameter Validation: Heap Dump Directory",
65 "name": "role_config_suppression_oom_heap_dump_dir",
66 "value": "false"
67 },
68 {
69 "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
70 "display_name": "Enable Configuration Change Alerts",
71 "name": "enable_config_alerts",
72 "value": "false"
73 },
74 {
75 "desc": "Maximum time an HLog remains in the .oldlogdir directory until an HBase Master thread deletes it.",
76 "display_name": "Maximum Time to Keep HLogs",
77 "name": "hbase_master_logcleaner_ttl",
78 "value": "60000"
79 },
80 {
81 "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
82 "display_name": "Dump Heap When Out of Memory",
83 "name": "oom_heap_dump_enabled",
84 "value": "true"
85 },
86 {
87 "desc": "Whether to suppress the results of the HBase Master Canary heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
88 "display_name": "Suppress Health Test: HBase Master Canary",
89 "name": "role_health_suppression_master_canary_health",
90 "value": "false"
91 },
92 {
93 "desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
94 "display_name": "Master Environment Advanced Configuration Snippet (Safety Valve)",
95 "name": "MASTER_role_env_safety_valve",
96 "value": null
97 },
98 {
99 "desc": "The address for the HBase Master web UI",
100 "display_name": "HBase Master Web UI Address",
101 "name": "hbase_master_info_bindAddress",
102 "value": null
103 },
104 {
105 "desc": "A comma-separated list of LogCleanerDelegate(s) that are used in LogsCleaner. WAL/HLog cleaner(s) are called in order, so put the log cleaner that prunes the most log files in the front. To implement your own LogCleanerDelegate, add it to HBase's classpath and add the fully-qualified class name here. You should always add the above default log cleaners in the list, unless you have a special reason not to.",
106 "display_name": "HBase Master Log Cleaner Plugins",
107 "name": "hbase_master_logcleaner_plugins",
108 "value": null
109 },
110 {
111 "desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
112 "display_name": "Suppress Health Test: Web Server Status",
113 "name": "role_health_suppression_master_web_metric_collection",
114 "value": "false"
115 },
116 {
117 "desc": "The host name or IP address of the DNS name server which an HBase Master should use to determine the host name used for communication and display purposes.",
118 "display_name": "HBase Master DNS Name Server",
119 "name": "hbase_master_dns_nameserver",
120 "value": null
121 },
122 {
123 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
124 "display_name": "Suppress Parameter Validation: Stacks Collection Directory",
125 "name": "role_config_suppression_stacks_collection_directory",
126 "value": "false"
127 },
128 {
129 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Environment Advanced Configuration Snippet (Safety Valve) parameter.",
130 "display_name": "Suppress Parameter Validation: Master Environment Advanced Configuration Snippet (Safety Valve)",
131 "name": "role_config_suppression_master_role_env_safety_valve",
132 "value": "false"
133 },
134 {
135 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
136 "display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
137 "name": "log_directory_free_space_percentage_thresholds",
138 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
139 },
140 {
141 "desc": "<p>This file contains the rules that govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message. If a log message matches multiple rules, the first matching rule is used. </p><p>Each rule has some or all of the following fields:</p><ul><li><code>alert</code> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><code>rate</code> <b>(mandatory)</b> - the maximum number of log messages matching this rule that can be sent as events every minute. If more than <code>rate</code> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><code>periodminutes</code> - the number of minutes during which the publisher will only publish <code>rate</code> events or fewer. If not specified, the default is <b>one minute</b></li><li><code>threshold</code> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><code>content</code> - match only those messages for which contents match this regular expression.</li><li><code>exceptiontype</code> - match only those messages that are part of an exception message. The exception type must match this regular expression.</li></ul><p>Example:</p><ul><li><pre>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</pre>This rule sends events to Cloudera Manager for every <code>StringIndexOutOfBoundsException</code>, up to a maximum of 10 every minute.</li><li><pre>{\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"exceptiontype\": \".*\"}, {\"alert\": true, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"ERROR\"}</pre>In this example, an event generated may not be promoted to alert if an exception is in the ERROR log message, because the first rule with alert = false will match.</li></ul>",
142 "display_name": "Rules to Extract Events from Log Files",
143 "name": "log_event_whitelist",
144 "value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.IOException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketClosedException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.EOFException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.nio.channels.CancelledKeyException\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"IPC Server handler.*ClosedChannelException\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"IPC Server Responder, call.*output error\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Daughter regiondir does not exist: .*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"File.*might still be open.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"File.*might still be open.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Moving table .+ state to enabled but was already enabled\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Received OPENED for region.*but region was in the state.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Unknown job [^ ]+ being deleted.*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Error executing shell command .+ No such process.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\".*attempt to override final parameter.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"[^ ]+ is a deprecated filesystem name. Use.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}\n"
145 },
146 {
147 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Log Directory parameter.",
148 "display_name": "Suppress Parameter Validation: Master Log Directory",
149 "name": "role_config_suppression_hbase_master_log_dir",
150 "value": "false"
151 },
152 {
153 "desc": "The amount of time allowed after this role is started that failures of health tests that rely on communication with this role will be tolerated.",
154 "display_name": "Health Test Startup Tolerance",
155 "name": "master_startup_tolerance",
156 "value": "5"
157 },
158 {
159 "desc": "Number of pooled threads to handle region closing in the master.",
160 "display_name": "Region Closing Threads",
161 "name": "hbase_master_executor_closeregion_threads",
162 "value": "5"
163 },
164 {
165 "desc": "When computing the overall Master health, consider the host's health.",
166 "display_name": "Master Host Health Test",
167 "name": "master_host_health_enabled",
168 "value": "true"
169 },
170 {
171 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory.",
172 "display_name": "Heap Dump Directory Free Space Monitoring Absolute Thresholds",
173 "name": "heap_dump_directory_free_space_absolute_thresholds",
174 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
175 },
176 {
177 "desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
178 "display_name": "Stacks Collection Method",
179 "name": "stacks_collection_method",
180 "value": "jstack"
181 },
182 {
183 "desc": "Enables the health test that the Cloudera Manager Agent can successfully contact and gather metrics from the web server.",
184 "display_name": "Web Metric Collection",
185 "name": "master_web_metric_collection_enabled",
186 "value": "true"
187 },
188 {
189 "desc": "The maximum size, in megabytes, per log file for Master logs. Typically used by log4j or logback.",
190 "display_name": "Master Max Log Size",
191 "name": "max_log_size",
192 "value": "200"
193 },
194 {
195 "desc": "<p>The configured triggers for this role. This is a JSON-formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <b>(mandatory)</b> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <b>(mandatory)</b> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <b>(optional)</b> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <b> (optional)</b> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <b> (optional)</b> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
196 "display_name": "Role Triggers",
197 "name": "role_triggers",
198 "value": "[]"
199 },
200 {
201 "desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
202 "display_name": "Garbage Collection Duration Thresholds",
203 "name": "master_gc_duration_thresholds",
204 "value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
205 },
206 {
207 "desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
208 "display_name": "Suppress Health Test: Process Status",
209 "name": "role_health_suppression_master_scm_health",
210 "value": "false"
211 },
212 {
213 "desc": "Advanced Configuration Snippet (Safety Valve) for Hadoop Metrics2. Properties will be inserted into <strong>hadoop-metrics2.properties</strong>.",
214 "display_name": "Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
215 "name": "hadoop_metrics2_safety_valve",
216 "value": null
217 },
218 {
219 "desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
220 "display_name": "Stacks Collection Data Retention",
221 "name": "stacks_collection_data_retention",
222 "value": "104857600"
223 },
224 {
225 "desc": "For advanced use only. A string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
226 "display_name": "Master Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
227 "name": "hbase_master_config_safety_valve",
228 "value": null
229 },
230 {
231 "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags, PermGen, or extra debugging flags would be passed here.",
232 "display_name": "Java Configuration Options for HBase Master",
233 "name": "hbase_master_java_opts",
234 "value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled -XX:ReservedCodeCacheSize=256m"
235 },
236 {
237 "desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
238 "display_name": "Automatically Restart Process",
239 "name": "process_auto_restart",
240 "value": "false"
241 },
242 {
243 "desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
244 "display_name": "Unexpected Exits Thresholds",
245 "name": "unexpected_exits_thresholds",
246 "value": "{\"critical\":\"any\",\"warning\":\"never\"}"
247 },
248 {
249 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master DNS Network Interface parameter.",
250 "display_name": "Suppress Parameter Validation: HBase Master DNS Network Interface",
251 "name": "role_config_suppression_hbase_master_dns_interface",
252 "value": "false"
253 },
254 {
255 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve) parameter.",
256 "display_name": "Suppress Parameter Validation: Hadoop Metrics2 Advanced Configuration Snippet (Safety Valve)",
257 "name": "role_config_suppression_hadoop_metrics2_safety_valve",
258 "value": "false"
259 },
260 {
261 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's heap dump directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Heap Dump Directory Free Space Monitoring Absolute Thresholds setting is configured.",
262 "display_name": "Heap Dump Directory Free Space Monitoring Percentage Thresholds",
263 "name": "heap_dump_directory_free_space_percentage_thresholds",
264 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
265 },
266 {
267 "desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
268 "display_name": "Cgroup CPU Shares",
269 "name": "rm_cpu_shares",
270 "value": "1024"
271 },
272 {
273 "desc": "The minimum log level for Master logs",
274 "display_name": "Master Logging Threshold",
275 "name": "log_threshold",
276 "value": "INFO"
277 },
278 {
279 "desc": "Whether to suppress the results of the Audit Pipeline Test heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
280 "display_name": "Suppress Health Test: Audit Pipeline Test",
281 "name": "role_health_suppression_master_audit_health",
282 "value": "false"
283 },
284 {
285 "desc": "Whether to suppress the results of the HBase Regions In Transition Over Threshold heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
286 "display_name": "Suppress Health Test: HBase Regions In Transition Over Threshold",
287 "name": "role_health_suppression_master_regions_in_transition_over_threshold",
288 "value": "false"
289 },
290 {
291 "desc": "Number of pooled threads to handle the recovery of the RegionServers in the master.",
292 "display_name": "RegionServer Recovery Threads",
293 "name": "hbase_master_executor_serverops_threads",
294 "value": "5"
295 },
296 {
297 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Logging Advanced Configuration Snippet (Safety Valve) parameter.",
298 "display_name": "Suppress Parameter Validation: Master Logging Advanced Configuration Snippet (Safety Valve)",
299 "name": "role_config_suppression_log4j_safety_valve",
300 "value": "false"
301 },
302 {
303 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Rules to Extract Events from Log Files parameter.",
304 "display_name": "Suppress Parameter Validation: Rules to Extract Events from Log Files",
305 "name": "role_config_suppression_log_event_whitelist",
306 "value": "false"
307 },
308 {
309 "desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
310 "display_name": "Suppress Configuration Validator: CDH Version Validator",
311 "name": "role_config_suppression_cdh_version_validator",
312 "value": "false"
313 },
314 {
315 "desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
316 "display_name": "Cgroup Memory Hard Limit",
317 "name": "rm_memory_hard_limit",
318 "value": "-1"
319 },
320 {
321 "desc": "Whether to suppress the results of the GC Duration heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
322 "display_name": "Suppress Health Test: GC Duration",
323 "name": "role_health_suppression_master_gc_duration",
324 "value": "false"
325 },
326 {
327 "desc": "The period to review when computing the moving average of garbage collection time.",
328 "display_name": "Garbage Collection Duration Monitoring Period",
329 "name": "master_gc_duration_window",
330 "value": "5"
331 },
332 {
333 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory.",
334 "display_name": "Log Directory Free Space Monitoring Absolute Thresholds",
335 "name": "log_directory_free_space_absolute_thresholds",
336 "value": "{\"critical\":\"5.36870912E9\",\"warning\":\"1.073741824E10\"}"
337 },
338 {
339 "desc": "Enable the health test that checks if there are regions in transition over the threshold configured in HBase.",
340 "display_name": "HBase Regions In Transition Over Threshold Health Test",
341 "name": "master_regions_in_transition_health_enabled",
342 "value": "true"
343 },
344 {
345 "desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
346 "display_name": "Maximum Process File Descriptors",
347 "name": "rlimit_fds",
348 "value": null
349 },
350 {
351 "desc": "Number of RPC Server instances spun up on HBase Master.",
352 "display_name": "HBase Master Handler Count",
353 "name": "hbase_master_handler_count",
354 "value": "25"
355 },
356 {
357 "desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
358 "display_name": "Suppress Health Test: Unexpected Exits",
359 "name": "role_health_suppression_master_unexpected_exits",
360 "value": "false"
361 },
362 {
363 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master DNS Name Server parameter.",
364 "display_name": "Suppress Parameter Validation: HBase Master DNS Name Server",
365 "name": "role_config_suppression_hbase_master_dns_nameserver",
366 "value": "false"
367 },
368 {
369 "desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
370 "display_name": "Heap Dump Directory",
371 "name": "oom_heap_dump_dir",
372 "value": "/tmp"
373 },
374 {
375 "desc": "Number of pooled threads to handle region opening in the master.",
376 "display_name": "Region Opening Threads",
377 "name": "hbase_master_executor_openregion_threads",
378 "value": "5"
379 },
380 {
381 "desc": "The frequency with which stacks are collected.",
382 "display_name": "Stacks Collection Frequency",
383 "name": "stacks_collection_frequency",
384 "value": "5.0"
385 },
386 {
387 "desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
388 "display_name": "Enable Health Alerts for this Role",
389 "name": "enable_alerts",
390 "value": "true"
391 },
392 {
393 "desc": "The name of the DNS network interface from which an HBase Master should report its IP address.",
394 "display_name": "HBase Master DNS Network Interface",
395 "name": "hbase_master_dns_interface",
396 "value": null
397 },
398 {
399 "desc": "Enables the health test that the Master's process state is consistent with the role configuration",
400 "display_name": "Master Process Health Test",
401 "name": "master_scm_health_enabled",
402 "value": "true"
403 },
404 {
405 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Coprocessor Master Classes parameter.",
406 "display_name": "Suppress Parameter Validation: HBase Coprocessor Master Classes",
407 "name": "role_config_suppression_hbase_coprocessor_master_classes",
408 "value": "false"
409 },
410 {
411 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Master Advanced Configuration Snippet (Safety Valve) for hbase-site.xml parameter.",
412 "display_name": "Suppress Parameter Validation: Master Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
413 "name": "role_config_suppression_hbase_master_config_safety_valve",
414 "value": "false"
415 },
416 {
417 "desc": "Time period in seconds to reset long-running metrics (e.g. compactions). This is an HBase specific configuration.",
418 "display_name": "Extended Period",
419 "name": "hbase_metrics_extended_period",
420 "value": "3600"
421 },
422 {
423 "desc": "The health test thresholds on the swap memory usage of the process.",
424 "display_name": "Process Swap Memory Thresholds",
425 "name": "process_swap_memory_thresholds",
426 "value": "{\"critical\":\"never\",\"warning\":\"any\"}"
427 },
428 {
429 "desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
430 "display_name": "File Descriptor Monitoring Thresholds",
431 "name": "master_fd_thresholds",
432 "value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
433 },
434 {
435 "desc": "The port that the HBase Master binds to.",
436 "display_name": "HBase Master Port",
437 "name": "hbase_master_port",
438 "value": "60000"
439 },
440 {
441 "desc": "The maximum number of rolled log files to keep for Master logs. Typically used by log4j or logback.",
442 "display_name": "Master Maximum Log File Backups",
443 "name": "max_log_backup_index",
444 "value": "10"
445 },
446 {
447 "desc": "List of org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are loaded by default on the active HMaster process. For any implemented coprocessor methods, the listed classes will be called in order. After implementing your own MasterObserver, just put it in HBase's classpath and add the fully qualified class name here.",
448 "display_name": "HBase Coprocessor Master Classes",
449 "name": "hbase_coprocessor_master_classes",
450 "value": ""
451 },
452 {
453 "desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
454 "display_name": "Cgroup Memory Soft Limit",
455 "name": "rm_memory_soft_limit",
456 "value": "-1"
457 },
458 {
459 "desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
460 "display_name": "Suppress Health Test: File Descriptors",
461 "name": "role_health_suppression_master_file_descriptor",
462 "value": "false"
463 },
464 {
465 "desc": "Enables the health test that a client can connect to the HBase Master",
466 "display_name": "HBase Master Canary Health Test",
467 "name": "master_canary_health_enabled",
468 "value": "true"
469 },
470 {
471 "desc": "Cloudera Manager agent monitors each service and each of its role by publishing metrics to the Cloudera Manager Service Monitor. Setting it to false will stop Cloudera Manager agent from publishing any metric for corresponding service/roles. This is usually helpful for services that generate large amount of metrics which Service Monitor is not able to process.",
472 "display_name": "Enable Metric Collection",
473 "name": "process_should_monitor",
474 "value": "true"
475 },
476 {
477 "desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
478 "display_name": "Suppress Health Test: Swap Memory Usage",
479 "name": "role_health_suppression_master_swap_memory_usage",
480 "value": "false"
481 },
482 {
483 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
484 "display_name": "Suppress Parameter Validation: Role Triggers",
485 "name": "role_config_suppression_role_triggers",
486 "value": "false"
487 },
488 {
489 "desc": "Whether to suppress the results of the Heap Dump Directory Free Space heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
490 "display_name": "Suppress Health Test: Heap Dump Directory Free Space",
491 "name": "role_health_suppression_master_heap_dump_directory_free_space",
492 "value": "false"
493 },
494 {
495 "desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
496 "display_name": "Kill When Out of Memory",
497 "name": "oom_sigkill_enabled",
498 "value": "true"
499 },
500 {
501 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase Master Log Cleaner Plugins parameter.",
502 "display_name": "Suppress Parameter Validation: HBase Master Log Cleaner Plugins",
503 "name": "role_config_suppression_hbase_master_logcleaner_plugins",
504 "value": "false"
505 },
506 {
507 "desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <code>stacks</code> subdirectory of the role's log directory.",
508 "display_name": "Stacks Collection Directory",
509 "name": "stacks_collection_directory",
510 "value": null
511 },
512 {
513 "desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
514 "display_name": "Cgroup I/O Weight",
515 "name": "rm_io_weight",
516 "value": "500"
517 },
518 {
519 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Configuration Options for HBase Master parameter.",
520 "display_name": "Suppress Parameter Validation: Java Configuration Options for HBase Master",
521 "name": "role_config_suppression_hbase_master_java_opts",
522 "value": "false"
523 },
524 {
525 "desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
526 "display_name": "Suppress Health Test: Host Health",
527 "name": "role_health_suppression_master_host_health",
528 "value": "false"
529 }
530] \ No newline at end of file
diff --git a/sahara/plugins/cdh/v5_13_0/resources/hbase-regionserver.json b/sahara/plugins/cdh/v5_13_0/resources/hbase-regionserver.json
new file mode 100644
index 0000000..8912a47
--- /dev/null
+++ b/sahara/plugins/cdh/v5_13_0/resources/hbase-regionserver.json
@@ -0,0 +1,884 @@
1[
2 {
3 "desc": "For advanced use only. A string to be inserted into <strong>hbase-site.xml</strong> for this role only.",
4 "display_name": "RegionServer Advanced Configuration Snippet (Safety Valve) for hbase-site.xml",
5 "name": "hbase_regionserver_config_safety_valve",
6 "value": null
7 },
8 {
9 "desc": "Whether to suppress configuration warnings produced by the HBase RegionServer Multiple HDFS WAL with Replication Validator configuration validator.",
10 "display_name": "Suppress Configuration Validator: HBase RegionServer Multiple HDFS WAL with Replication Validator",
11 "name": "role_config_suppression_hbase_regionserver_multiwal_replication_validator",
12 "value": "false"
13 },
14 {
15 "desc": "Whether or not periodic stacks collection is enabled.",
16 "display_name": "Stacks Collection Enabled",
17 "name": "stacks_collection_enabled",
18 "value": "false"
19 },
20 {
21 "desc": "Maximum number of HStoreFiles to compact per minor compaction.",
22 "display_name": "Maximum Number of HStoreFiles Compaction",
23 "name": "hbase_hstore_compaction_max",
24 "value": null
25 },
26 {
27 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the RegionServer Environment Advanced Configuration Snippet (Safety Valve) parameter.",
28 "display_name": "Suppress Parameter Validation: RegionServer Environment Advanced Configuration Snippet (Safety Valve)",
29 "name": "role_config_suppression_regionserver_role_env_safety_valve",
30 "value": "false"
31 },
32 {
33 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HBase HRegion Major Compaction parameter.",
34 "display_name": "Suppress Parameter Validation: HBase HRegion Major Compaction",
35 "name": "role_config_suppression_hbase_hregion_majorcompaction",
36 "value": "false"
37 },
38 {
39 "desc": "The address for the HBase RegionServer web UI",
40 "display_name": "HBase RegionServer Web UI Address",
41 "name": "hbase_regionserver_info_bindAddress",
42 "value": null
43 },
44 {
45 "desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
46 "display_name": "Stacks Collection Method",
47 "name": "stacks_collection_method",
48 "value": "jstack"
49 },
50 {
51 "desc": "Number of threads to use while loading and unloading regions to or from a RegionServer. Can be used to increase the speed of decommissioning or rolling restart operations.",
52 "display_name": "Region Mover Threads",
53 "name": "hbase_regionserver_regionmover_thread_count",
54 "value": "1"
55 },
56 {
57 "desc": "The health test thresholds of the average size of the HBase RegionServer flush queue over a recent period. See HBase RegionServer Flush Queue Monitoring Period.",
58 "display_name": "HBase RegionServer Flush Queue Monitoring Thresholds",
59 "name": "regionserver_flush_queue_thresholds",
60 "value": "{\"critical\":\"never\",\"warning\":\"10.0\"}"
61 },
62 {
63 "desc": "The amount of time to wait for the HBase Region Server to fully start up and connect to the HBase Master before enforcing the connectivity check.",
64 "display_name": "HBase Region Server Connectivity Tolerance at Startup",
65 "name": "regionserver_connectivity_tolerance",
66 "value": "180"
67 },
68 {
69 "desc": "Where to store the contents of the BucketCache. Either \"offheap\" or file:/path/to/file -- this should be a file in the local file system (not HDFS), and is generally a file on ramdisk or SSD (not spinning disk). If this is set to \"offheap\" then Java's -XX:MaxDirectMemorySize is set to the value of hbase.bucketcache.size plus 1GB for short-circuit reads.",
70 "display_name": "BucketCache IOEngine",
71 "name": "hbase_bucketcache_ioengine",
72 "value": null
73 },
74 {
75 "desc": "Whether to suppress the results of the Store File Index Size heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
76 "display_name": "Suppress Health Test: Store File Index Size",
77 "name": "role_health_suppression_region_server_store_file_idx_size",
78 "value": "false"
79 },
80 {
81 "desc": "The hashing algorithm for use in HashFunction. Two values are supported: 'murmur' (for MurmurHash) and 'jenkins' (for JenkinsHash).",
82 "display_name": "HBase Hash Type",
83 "name": "hbase_hash_type",
84 "value": "murmur"
85 },
86 {
87 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the RegionServer DNS Network Interface parameter.",
88 "display_name": "Suppress Parameter Validation: RegionServer DNS Network Interface",
89 "name": "role_config_suppression_hbase_regionserver_dns_interface",
90 "value": "false"
91 },
92 {
93 "desc": "The port for the HBase RegionServer web UI. Set to -1 to disable RegionServer web UI.",
94 "display_name": "HBase RegionServer Web UI port",
95 "name": "hbase_regionserver_info_port",
96 "value": "60030"
97 },
98 {
99 "desc": "The period to review when computing unexpected exits.",
100 "display_name": "Unexpected Exits Monitoring Period",
101 "name": "unexpected_exits_window",
102 "value": "5"
103 },
104 {
105 "desc": "The health test thresholds for the weighted average time spent in Java garbage collection. Specified as a percentage of elapsed wall clock time.",
106 "display_name": "Garbage Collection Duration Thresholds",
107 "name": "regionserver_gc_duration_thresholds",
108 "value": "{\"critical\":\"60.0\",\"warning\":\"30.0\"}"
109 },
110 {
111 "desc": "The period over which to compute the moving average of the compaction queue size.",
112 "display_name": "HBase RegionServer Compaction Queue Monitoring Period",
113 "name": "regionserver_compaction_queue_window",
114 "value": "5"
115 },
116 {
117 "desc": "These arguments will be passed as part of the Java command line. Commonly, garbage collection flags, PermGen, or extra debugging flags would be passed here.",
118 "display_name": "Java Configuration Options for HBase RegionServer",
119 "name": "hbase_regionserver_java_opts",
120 "value": "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled -XX:ReservedCodeCacheSize=256m"
121 },
122 {
123 "desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
124 "display_name": "RegionServer Logging Advanced Configuration Snippet (Safety Valve)",
125 "name": "log4j_safety_valve",
126 "value": null
127 },
128 {
129 "desc": "The health test thresholds of the latency that the RegionServer sees for HDFS read operations",
130 "display_name": "HBase RegionServer HDFS Read Latency Thresholds",
131 "name": "regionserver_read_latency_thresholds",
132 "value": "{\"critical\":\"100.0\",\"warning\":\"50.0\"}"
133 },
134 {
135 "desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
136 "display_name": "Enable Configuration Change Alerts",
137 "name": "enable_config_alerts",
138 "value": "false"
139 },
140 {
141 "desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
142 "display_name": "Dump Heap When Out of Memory",
143 "name": "oom_heap_dump_enabled",
144 "value": "true"
145 },
146 {
147 "desc": "The health test thresholds of the size used by the HStoreFile index. Specified as a percentage of the total heap size.",
148 "display_name": "Percentage of Heap Used by HStoreFile Index",
149 "name": "regionserver_store_file_idx_size_thresholds",
150 "value": "{\"critical\":\"never\",\"warning\":\"10.0\"}"
151 },
152 {
153 "desc": "Maximum number of Write-Ahead Log (WAL) files. This value multiplied by HDFS Block Size (dfs.blocksize) is the size of the WAL that will need to be replayed when a server crashes. This value is inversely proportional to the frequency of flushes to disk.",
154 "display_name": "Maximum number of Write-Ahead Log (WAL) files",
155 "name": "hbase_regionserver_maxlogs",
156 "value": "32"
157 },
158 {
159 "desc": "List of coprocessors that are loaded by default on all tables. For any override coprocessor method, these classes will be called in order. After implementing your own coprocessor, just put it in HBase's classpath and add the fully qualified class name here. A coprocessor can also be loaded on demand by setting HTableDescriptor.",
160 "display_name": "HBase Coprocessor Region Classes",
161 "name": "hbase_coprocessor_region_classes",
162 "value": ""
163 },
164 {
165 "desc": "When BucketCache is enabled, use it as an L2 cache for LruBlockCache. If set to true, indexes and Bloom filters are kept in the LruBlockCache and the data blocks are kept in the BucketCache.",
166 "display_name": "Enable Combined BucketCache",
167 "name": "hbase_bucketcache_combinedcache_enabled",
168 "value": "true"
169 },
170 {
171 "desc": "Whether to suppress the results of the Web Server Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
172 "display_name": "Suppress Health Test: Web Server Status",
173 "name": "role_health_suppression_region_server_web_metric_collection",
174 "value": "false"
175 },
176 {
177 "desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
178 "display_name": "Suppress Health Test: Process Status",
179 "name": "role_health_suppression_region_server_scm_health",
180 "value": "false"
181 },
182 {
183 "desc": "The Hierarchical Storage Management policy that should be used by the RegionServer for the write-ahead-log. Using an SSD policy will have no effect unless HDFS HSM is configured to know which drives are SSDs. See <a class=\"bold\" href=\"http://tiny.cloudera.com/cm-hbase-hsm\" target=\"_blank\">Enabling HSM with HBase<i class=\"externalLink\"></i></a>.",
184 "display_name": "WAL HSM Storage Policy",
185 "name": "hbase_regionserver_wal_storage_policy",
186 "value": "NONE"
187 },
188 {
189 "desc": "Blocks writes if the size of the memstore increases to the value of 'hbase.hregion.block.memstore' multiplied by the value of 'hbase.hregion.flush.size' bytes. This setting is useful for preventing runaway memstore during spikes in update traffic. Without an upper-bound, memstore fills such that when it flushes, the resultant process of flushing files take a long time to compact or split, or worse, an \"out of memory\" error occurs.",
190 "display_name": "HBase Memstore Block Multiplier",
191 "name": "hbase_hregion_memstore_block_multiplier",
192 "value": "2"
193 },
194 {
195 "desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
196 "display_name": "Suppress Parameter Validation: Stacks Collection Directory",
197 "name": "role_config_suppression_stacks_collection_directory",
198 "value": "false"
199 },
200 {
201 "desc": "Sync the HLog to HDFS after this interval, in milliseconds, if it has not accumulated the number of HLog Entries specified to trigger a sync.",
202 "display_name": "Synch Interval of HLog Entries",
203 "name": "hbase_regionserver_optionallogflushinterval",
204 "value": "1000"
205 },
206 {
207 "desc": "The health test thresholds for monitoring of free space on the filesystem that contains this role's log directory. Specified as a percentage of the capacity on that filesystem. This setting is not used if a Log Directory Free Space Monitoring Absolute Thresholds setting is configured.",
208 "display_name": "Log Directory Free Space Monitoring Percentage Thresholds",
209 "name": "log_directory_free_space_percentage_thresholds",
210 "value": "{\"critical\":\"never\",\"warning\":\"never\"}"
211 },
212 {
213 "desc": "<p>This file contains the rules that govern how log messages are turned into events by the custom log4j appender that this role loads. It is in JSON format, and is composed of a list of rules. Every log message is evaluated against each of these rules in turn to decide whether or not to send an event for that message. If a log message matches multiple rules, the first matching rule is used. </p><p>Each rule has some or all of the following fields:</p><ul><li><code>alert</code> - whether or not events generated from this rule should be promoted to alerts. A value of \"true\" will cause alerts to be generated. If not specified, the default is \"false\".</li><li><code>rate</code> <b>(mandatory)</b> - the maximum number of log messages matching this rule that can be sent as events every minute. If more than <code>rate</code> matching log messages are received in a single minute, the extra messages are ignored. If rate is less than 0, the number of messages per minute is unlimited.</li><li><code>periodminutes</code> - the number of minutes during which the publisher will only publish <code>rate</code> events or fewer. If not specified, the default is <b>one minute</b></li><li><code>threshold</code> - apply this rule only to messages with this log4j severity level or above. An example is \"WARN\" for warning level messages or higher.</li><li><code>content</code> - match only those messages for which contents match this regular expression.</li><li><code>exceptiontype</code> - match only those messages that are part of an exception message. The exception type must match this regular expression.</li></ul><p>Example:</p><ul><li><pre>{\"alert\": false, \"rate\": 10, \"exceptiontype\": \"java.lang.StringIndexOutOfBoundsException\"}</pre>This rule sends events to Cloudera Manager for every <code>StringIndexOutOfBoundsException</code>, up to a maximum of 10 every minute.</li><li><pre>{\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"exceptiontype\": \".*\"}, {\"alert\": true, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"ERROR\"}</pre>In this example, an event generated may not be promoted to alert if an exception is in the ERROR log message, because the first rule with alert = false will match.</li></ul>",
214 "display_name": "Rules to Extract Events from Log Files",
215 "name": "log_event_whitelist",
216 "value": "{\n \"version\": \"0\",\n \"rules\": [\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"FATAL\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Instead, use .*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \".* is deprecated. Use .* instead\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.IOException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.net.SocketClosedException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.io.EOFException\"},\n {\"alert\": false, \"rate\": 0, \"exceptiontype\": \"java.nio.channels.CancelledKeyException\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\": \"IPC Server handler.*took.*appending an edit to hlog.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"ABORTING region server serverName.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"DFSOutputStream ResponseProcessor exception.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Error Recovery for block blk.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Failed init\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Problem renewing lease for DFSClient.*\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"remote error telling master we are up\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\", \"content\": \"Session.*for server.*closing socket connection and attempting reconnect.\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 2, \"exceptiontype\": \".*\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"Error executing shell command .+ No such process.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\".*attempt to override final parameter.+\"},\n {\"alert\": false, \"rate\": 0, \"threshold\":\"WARN\", \"content\":\"[^ ]+ is a deprecated filesystem name. Use.*\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Starting .*compaction on region (.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_COMPACTION_REGION_STARTED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^completed compaction on region (.+) after (.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_COMPACTION_REGION_COMPLETED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\", \"group1\": \"DURATION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Starting compaction on (.+) in region (.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_COMPACTION_COLUMN_FAMILY_STARTED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"COLUMN_FAMILY\", \"group1\": \"REGION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^completed compaction: regionName=(.+), storeName=(.+), fileCount=(.+), fileSize=(.+), priority=(.+), time=(.+); duration=(.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_COMPACTION_COLUMN_FAMILY_COMPLETED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\", \"group1\": \"COLUMN_FAMILY\", \"group2\": \"FILE_COUNT\", \"group3\": \"FILE_SIZE\", \"group4\": \"PRIORITY\", \"group5\": \"REQUEST_TIME_NANOS\", \"group6\": \"DURATION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Completed compaction: Request = regionName=(.+), storeName=(.+), fileCount=(.+), fileSize=(.+), priority=(.+), time=(.+); duration=(.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_COMPACTION_COLUMN_FAMILY_COMPLETED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\", \"group1\": \"COLUMN_FAMILY\", \"group2\": \"FILE_COUNT\", \"group3\": \"FILE_SIZE\", \"group4\": \"PRIORITY\", \"group5\": \"REQUEST_TIME_NANOS\", \"group6\": \"DURATION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^aborted compaction: regionName=(.+), storeName=(.+), fileCount=(.+), fileSize=(.+), priority=(.+), time=(.+); duration=(.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_COMPACTION_COLUMN_FAMILY_ABORTED\", \"attribute:SEVERITY\": \"IMPORTANT\", \"group0\": \"REGION\", \"group1\": \"COLUMN_FAMILY\", \"group2\": \"FILE_COUNT\", \"group3\": \"FILE_SIZE\", \"group4\": \"PRIORITY\", \"group5\": \"REQUEST_TIME_NANOS\", \"group6\": \"DURATION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Finished memstore flush of .+ for region (.+) in (.+), sequenceid=(.+), compaction requested=(.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_FLUSH_COMPLETED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\", \"group1\": \"DURATION\", \"group2\": \"SEQUENCE_ID\", \"group3\": \"COMPACTION_REQUESTED\" },\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Flush of region (.+) due to global heap pressure$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_FLUSH_DUE_TO_HEAP_PRESSURE\", \"attribute:SEVERITY\": \"IMPORTANT\", \"group0\": \"REGION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"WARN\", \"content\": \"^Region (.+) has too many store files; delaying flush up to 90000ms$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_FLUSH_DELAYED_TOO_MANY_STORE_FILES\", \"attribute:SEVERITY\": \"CRITICAL\", \"group0\": \"REGION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Starting split of region (.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_SPLIT_STARTED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Running rollback/cleanup of failed split of (.+);.+$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_SPLIT_ABORTED\", \"attribute:SEVERITY\": \"IMPORTANT\", \"group0\": \"REGION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Region split, hbase:meta updated, and report to master. Parent=(.+), new regions: (.+,.*,.+), (.+,.*,.+). Split took (.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_SPLIT_COMPLETED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\", \"group1\": \"DAUGHTER_REGIONS\", \"group2\": \"DAUGHTER_REGIONS\", \"group3\": \"DURATION\"},\n {\"alert\": false, \"rate\": -1, \"threshold\":\"INFO\", \"content\": \"^Region split, META updated, and report to master. Parent=(.+), new regions: (.+,.*,.+), (.+,.*,.+). Split took (.+)$\", \"attribute:CATEGORY\": \"HBASE\", \"attribute:EVENTCODE\": \"EV_HBASE_SPLIT_COMPLETED\", \"attribute:SEVERITY\": \"INFORMATIONAL\", \"group0\": \"REGION\", \"group1\": \"DAUGHTER_REGIONS\", \"group2\": \"DAUGHTER_REGIONS\", \"group3\": \"DURATION\"},\n {\"alert\": false, \"rate\": 1, \"periodminutes\": 1, \"threshold\":\"WARN\"}\n ]\n}"
217 },
218 {
219 "desc": "The total size of the BucketCache, in megabytes. The size to configure depends on the amount of memory available to HBase, or the size of a local SSD. If hbase.bucketcache.ioengine is set to \"offheap\", then the bucketcache consumes the configured amount of memory from Java's Direct Memory.",
220 "display_name": "BucketCache Size",
221 "name": "hbase_bucketcache_size",
222 "value": "1024"
223 },
224 {
225 "desc": "Whether to suppress the results of the Cluster Connectivity heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
226 "display_name": "Suppress Health Test: Cluster Connectivity",
227 "name": "role_health_suppression_region_server_master_connectivity",
228 "value": "false"
229 },
230 {
231 "desc": "The host name or IP address of the DNS name server which a RegionServer should use to determine the host name used by the HBase Master for communication and display purposes.",
232 "display_name": "RegionServer DNS Nameserver",
233 "name": "hbase_regionserver_dns_nameserver",
234 "value": null
235 },
236 {