sahara/sahara/plugins/cdh/v5_11_0/resources/kafka-kafka_broker.json

566 lines
33 KiB
JSON

[
{
"desc": "The log for a topic partition is stored as a directory of segment files. This setting controls the size to which a segment file can grow before a new segment is rolled over in the log. This value should be larger than message.max.bytes.",
"display_name": "Segment File Size",
"name": "log.segment.bytes",
"value": "1073741824"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka-monitoring.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "kafka-monitoring.properties_role_safety_valve",
"value": null
},
{
"desc": "Whether or not periodic stacks collection is enabled.",
"display_name": "Stacks Collection Enabled",
"name": "stacks_collection_enabled",
"value": "false"
},
{
"desc": "The method used to collect stacks. The jstack option involves periodically running the jstack command against the role's daemon process. The servlet method is available for those roles that have an HTTP server endpoint exposing the current stacks traces of all threads. When the servlet method is selected, that HTTP endpoint is periodically scraped.",
"display_name": "Stacks Collection Method",
"name": "stacks_collection_method",
"value": "jstack"
},
{
"desc": "The maximum time before a new log segment is rolled out. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded. The special value of -1 is interpreted as unlimited. This property is used in Kafka 1.4.0 and later in place of log.retention.hours.",
"display_name": "Data Retention Time",
"name": "log.retention.ms",
"value": null
},
{
"desc": "The amount of data to retain in the log for each topic-partition. This is the limit per partition: multiply by the number of partitions to get the total data retained for the topic. The special value of -1 is interpreted as unlimited. If both log.retention.ms and log.retention.bytes are set, a segment is deleted when either limit is exceeded.",
"display_name": "Data Retention Size",
"name": "log.retention.bytes",
"value": "-1"
},
{
"desc": "Enables the health test that the Kafka Broker's process state is consistent with the role configuration",
"display_name": "Kafka Broker Process Health Test",
"name": "kafka_broker_scm_health_enabled",
"value": "true"
},
{
"desc": "Kafka broker secure port.",
"display_name": "TLS/SSL Port",
"name": "ssl_port",
"value": "9093"
},
{
"desc": "The period to review when computing unexpected exits.",
"display_name": "Unexpected Exits Monitoring Period",
"name": "unexpected_exits_window",
"value": "5"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_kafka_broker_role_env_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "role_config_suppression_ssl_server_keystore_password",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Heap Dump Directory parameter.",
"display_name": "Suppress Parameter Validation: Heap Dump Directory",
"name": "role_config_suppression_oom_heap_dump_dir",
"value": "false"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this role reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Health Alerts for this Role",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "The password for the Kafka Broker TLS/SSL Certificate Trust Store File. This password is not required to access the trust store; this field can be left blank. This password provides optional integrity checking of the file. The contents of trust stores are certificates, and certificates are public information.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "ssl_client_truststore_password",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Log Directory parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Log Directory",
"name": "role_config_suppression_log_dir",
"value": "false"
},
{
"desc": "When set, generates heap dump file when java.lang.OutOfMemoryError is thrown.",
"display_name": "Dump Heap When Out of Memory",
"name": "oom_heap_dump_enabled",
"value": "true"
},
{
"desc": "The path to the TLS/SSL keystore file containing the server certificate and private key used for TLS/SSL. Used when Kafka Broker is acting as a TLS/SSL server. The keystore must be in JKS format.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "ssl_server_keystore_location",
"value": ""
},
{
"desc": "Host the HTTP metric reporter binds to.",
"display_name": "HTTP Metric Report Host",
"name": "kafka.http.metrics.host",
"value": "0.0.0.0"
},
{
"desc": "The amount of stacks data that is retained. After the retention limit is reached, the oldest data is deleted.",
"display_name": "Stacks Collection Data Retention",
"name": "stacks_collection_data_retention",
"value": "104857600"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Stacks Collection Directory parameter.",
"display_name": "Suppress Parameter Validation: Stacks Collection Directory",
"name": "role_config_suppression_stacks_collection_directory",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Advertised Host parameter.",
"display_name": "Suppress Parameter Validation: Advertised Host",
"name": "role_config_suppression_advertised.host.name",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). Secondary to the log.retention.ms property. The special value of -1 is interpreted as unlimited. This property is deprecated in Kafka 1.4.0. Use log.retention.ms.",
"display_name": "Data Retention Hours",
"name": "log.retention.hours",
"value": "168"
},
{
"desc": "The health test thresholds for unexpected exits encountered within a recent period specified by the unexpected_exits_window configuration for the role.",
"display_name": "Unexpected Exits Thresholds",
"name": "unexpected_exits_thresholds",
"value": "{\"critical\":\"any\",\"warning\":\"never\"}"
},
{
"desc": "The password that protects the private key contained in the JKS keystore used when Kafka Broker is acting as a TLS/SSL server.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "ssl_server_keystore_keypassword",
"value": ""
},
{
"desc": "The number of I/O threads that the server uses for executing requests. You should have at least as many threads as you have disks.",
"display_name": "Number of I/O Threads",
"name": "num.io.threads",
"value": "8"
},
{
"desc": "Whether to suppress the results of the Audit Pipeline Test heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Audit Pipeline Test",
"name": "role_health_suppression_kafka_kafka_broker_audit_health",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "role_config_suppression_kafka.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Host Health heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Host Health",
"name": "role_health_suppression_kafka_kafka_broker_host_health",
"value": "false"
},
{
"desc": "Kafka broker port.",
"display_name": "TCP Port",
"name": "port",
"value": "9092"
},
{
"desc": "The maximum size, in megabytes, per log file for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Max Log Size",
"name": "max_log_size",
"value": "200"
},
{
"desc": "The password for the Kafka Broker JKS keystore file.",
"display_name": "Kafka Broker TLS/SSL Server JKS Keystore File Password",
"name": "ssl_server_keystore_password",
"value": ""
},
{
"desc": "The log directory for log files of the role Kafka Broker.",
"display_name": "Kafka Broker Log Directory",
"name": "log_dir",
"value": "/var/log/kafka"
},
{
"desc": "Whether to suppress the results of the File Descriptors heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: File Descriptors",
"name": "role_health_suppression_kafka_kafka_broker_file_descriptor",
"value": "false"
},
{
"desc": "The location on disk of the trust store, in .jks format, used to confirm the authenticity of TLS/SSL servers that Kafka Broker might connect to. This is used when Kafka Broker is the client in a TLS/SSL connection. This trust store must contain the certificate(s) used to sign the service(s) connected to. If this parameter is not provided, the default list of well-known certificate authorities is used instead.",
"display_name": "Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "ssl_client_truststore_location",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka-monitoring.properties",
"name": "role_config_suppression_kafka-monitoring.properties_role_safety_valve",
"value": "false"
},
{
"desc": "The maximum time before a new log segment is rolled out (in hours). This property is deprecated in Cloudera Kafka 1.4.0; use log.roll.ms.",
"display_name": "Data Log Roll Hours",
"name": "log.roll.hours",
"value": "168"
},
{
"desc": "Port for JMX.",
"display_name": "JMX Port",
"name": "jmx_port",
"value": "9393"
},
{
"desc": "The frequency, in milliseconds, that the log cleaner checks whether any log segment is eligible for deletion, per retention policies.",
"display_name": "Data Retention Check Interval",
"name": "log.retention.check.interval.ms",
"value": "300000"
},
{
"desc": "The timeout in milliseconds to wait for graceful shutdown to complete.",
"display_name": "Graceful Shutdown Timeout",
"name": "graceful_stop_timeout",
"value": "30000"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>ssl.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "ssl.properties_role_safety_valve",
"value": null
},
{
"desc": "Protocol to be used for inter-broker communication. INFERRED will use the same protocol configured for external clients.",
"display_name": "Inter Broker Protocol",
"name": "security.inter.broker.protocol",
"value": "INFERRED"
},
{
"desc": "ID uniquely identifying each broker. Never set this property at the group level; it should always be overridden on instance level.",
"display_name": "Broker ID",
"name": "broker.id",
"value": null
},
{
"desc": "The maximum time before a new log segment is rolled out. This property is used in Cloudera Kafka 1.4.0 and later in place of log.roll.hours.",
"display_name": "Data Log Roll Time",
"name": "log.roll.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of I/O Threads parameter.",
"display_name": "Suppress Parameter Validation: Number of I/O Threads",
"name": "role_config_suppression_num.io.threads",
"value": "false"
},
{
"desc": "Number of CPU shares to assign to this role. The greater the number of shares, the larger the share of the host's CPUs that will be given to this role when the host experiences CPU contention. Must be between 2 and 262144. Defaults to 1024 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup CPU Shares",
"name": "rm_cpu_shares",
"value": "1024"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore File Location parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore File Location",
"name": "role_config_suppression_ssl_server_keystore_location",
"value": "false"
},
{
"desc": "The minimum log level for Kafka Broker logs",
"display_name": "Kafka Broker Logging Threshold",
"name": "log_threshold",
"value": "INFO"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Data Directories parameter.",
"display_name": "Suppress Parameter Validation: Data Directories",
"name": "role_config_suppression_log.dirs",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store File parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store File",
"name": "role_config_suppression_ssl_client_truststore_location",
"value": "false"
},
{
"desc": "Whether to suppress the results of the Process Status heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Process Status",
"name": "role_health_suppression_kafka_kafka_broker_scm_health",
"value": "false"
},
{
"desc": "When set, this role's process is automatically (and transparently) restarted in the event of an unexpected failure.",
"display_name": "Automatically Restart Process",
"name": "process_auto_restart",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Logging Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "role_config_suppression_log4j_safety_valve",
"value": "false"
},
{
"desc": "Port the HTTP metric reporter listens on.",
"display_name": "HTTP Metric Report Port",
"name": "kafka.http.metrics.port",
"value": "24042"
},
{
"desc": "Whether to suppress configuration warnings produced by the CDH Version Validator configuration validator.",
"display_name": "Suppress Configuration Validator: CDH Version Validator",
"name": "role_config_suppression_cdh_version_validator",
"value": "false"
},
{
"desc": "Hard memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Hard Limit",
"name": "rm_memory_hard_limit",
"value": "-1"
},
{
"desc": "Client authentication mode for SSL connections. Default is none, could be set to \"required\", i.e., client authentication is required or to \"requested\", i.e., client authentication is requested and client without certificates can still connect.",
"display_name": "SSL Client Authentication",
"name": "ssl.client.auth",
"value": "none"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Certificate Trust Store Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Certificate Trust Store Password",
"name": "role_config_suppression_ssl_client_truststore_password",
"value": "false"
},
{
"desc": "Encrypt communication between clients and Kafka Broker using Transport Layer Security (TLS) (formerly known as Secure Socket Layer (SSL)).",
"display_name": "Enable TLS/SSL for Kafka Broker",
"name": "ssl_enabled",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker TLS/SSL Server JKS Keystore Key Password parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker TLS/SSL Server JKS Keystore Key Password",
"name": "role_config_suppression_ssl_server_keystore_keypassword",
"value": "false"
},
{
"desc": "These arguments are passed as part of the Java command line. Commonly, garbage collection flags or extra debugging flags are passed here.",
"display_name": "Additional Broker Java Options",
"name": "broker_java_opts",
"value": "-server -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true"
},
{
"desc": "The health test thresholds on the swap memory usage of the process.",
"display_name": "Process Swap Memory Thresholds",
"name": "process_swap_memory_thresholds",
"value": "{\"critical\":\"never\",\"warning\":\"any\"}"
},
{
"desc": "If configured, overrides the process soft and hard rlimits (also called ulimits) for file descriptors to the configured value.",
"display_name": "Maximum Process File Descriptors",
"name": "rlimit_fds",
"value": null
},
{
"desc": "The health test thresholds of the number of file descriptors used. Specified as a percentage of file descriptor limit.",
"display_name": "File Descriptor Monitoring Thresholds",
"name": "kafka_broker_fd_thresholds",
"value": "{\"critical\":\"70.0\",\"warning\":\"50.0\"}"
},
{
"desc": "Path to directory where heap dumps are generated when java.lang.OutOfMemoryError error is thrown. This directory is automatically created if it does not exist. If this directory already exists, role user must have write access to this directory. If this directory is shared among multiple roles, it should have 1777 permissions. The heap dump files are created with 600 permissions and are owned by the role user. The amount of free space in this directory should be greater than the maximum Java Process heap size configured for this role.",
"display_name": "Heap Dump Directory",
"name": "oom_heap_dump_dir",
"value": "/tmp"
},
{
"desc": "The frequency with which stacks are collected.",
"display_name": "Stacks Collection Frequency",
"name": "stacks_collection_frequency",
"value": "5.0"
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of this role except client configuration.",
"display_name": "Kafka Broker Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_BROKER_role_env_safety_valve",
"value": null
},
{
"desc": "When computing the overall Kafka Broker health, consider the host's health.",
"display_name": "Kafka Broker Host Health Test",
"name": "kafka_broker_host_health_enabled",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Additional Broker Java Options parameter.",
"display_name": "Suppress Parameter Validation: Additional Broker Java Options",
"name": "role_config_suppression_broker_java_opts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Broker ID parameter.",
"display_name": "Suppress Parameter Validation: Broker ID",
"name": "role_config_suppression_broker.id",
"value": "false"
},
{
"desc": "Maximum number of connections allowed from each IP address.",
"display_name": "Maximum Connections per IP Address",
"name": "max.connections.per.ip",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Segment File Size parameter.",
"display_name": "Suppress Parameter Validation: Segment File Size",
"name": "role_config_suppression_log.segment.bytes",
"value": "false"
},
{
"desc": "For advanced use only, a string to be inserted into <strong>log4j.properties</strong> for this role only.",
"display_name": "Kafka Broker Logging Advanced Configuration Snippet (Safety Valve)",
"name": "log4j_safety_valve",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the HTTP Metric Report Host parameter.",
"display_name": "Suppress Parameter Validation: HTTP Metric Report Host",
"name": "role_config_suppression_kafka.http.metrics.host",
"value": "false"
},
{
"desc": "Maximum size for the Java process heap memory. Passed to Java -Xmx. Measured in megabytes. Kafka does not generally require setting large heap sizes. It is better to let the file system cache utilize the available memory.",
"display_name": "Java Heap Size of Broker",
"name": "broker_max_heap_size",
"value": "1024"
},
{
"desc": "For advanced use only. A string to be inserted into <strong>kafka.properties</strong> for this role only.",
"display_name": "Kafka Broker Advanced Configuration Snippet (Safety Valve) for kafka.properties",
"name": "kafka.properties_role_safety_valve",
"value": null
},
{
"desc": "If set, this is the hostname given out to producers, consumers, and other brokers to use in establishing connections. Never set this property at the group level; it should always be overriden on instance level.",
"display_name": "Advertised Host",
"name": "advertised.host.name",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Java Heap Size of Broker parameter.",
"display_name": "Suppress Parameter Validation: Java Heap Size of Broker",
"name": "role_config_suppression_broker_max_heap_size",
"value": "false"
},
{
"desc": "Users who are allowed to perform any action on the Kafka cluster.",
"display_name": "Super users",
"name": "super.users",
"value": "kafka"
},
{
"desc": "Cloudera Manager agent monitors each service and each of its role by publishing metrics to the Cloudera Manager Service Monitor. Setting it to false will stop Cloudera Manager agent from publishing any metric for corresponding service/roles. This is usually helpful for services that generate large amount of metrics which Service Monitor is not able to process.",
"display_name": "Enable Metric Collection",
"name": "process_should_monitor",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Super users parameter.",
"display_name": "Suppress Parameter Validation: Super users",
"name": "role_config_suppression_super.users",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Role Triggers parameter.",
"display_name": "Suppress Parameter Validation: Role Triggers",
"name": "role_config_suppression_role_triggers",
"value": "false"
},
{
"desc": "Authenticate a SASL connection with zookeeper, if Kerberos authentication is enabled. It also allows a broker to set SASL ACL on zookeeper nodes which locks these nodes down so that only kafka broker can modify.",
"display_name": "Authenticate Zookeeper Connection",
"name": "authenticate.zookeeper.connection",
"value": "true"
},
{
"desc": "The maximum number of rolled log files to keep for Kafka Broker logs. Typically used by log4j or logback.",
"display_name": "Kafka Broker Maximum Log File Backups",
"name": "max_log_backup_index",
"value": "10"
},
{
"desc": "When set, a SIGKILL signal is sent to the role process when java.lang.OutOfMemoryError is thrown.",
"display_name": "Kill When Out of Memory",
"name": "oom_sigkill_enabled",
"value": "true"
},
{
"desc": "Whether to suppress the results of the Unexpected Exits heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Unexpected Exits",
"name": "role_health_suppression_kafka_kafka_broker_unexpected_exits",
"value": "false"
},
{
"desc": "<p>The configured triggers for this role. This is a JSON-formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <b>(mandatory)</b> - The name of the trigger. This value must be unique for the specific role. </li><li><code>triggerExpression</code> <b>(mandatory)</b> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <b>(optional)</b> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <b> (optional)</b> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <b> (optional)</b> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul><p>For example, the following JSON formatted trigger configured for a DataNode fires if the DataNode has more than 1500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleName=$ROLENAME and last(fd_open) > 1500) DO health:bad\",\n \"streamThreshold\": 0, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Role Triggers",
"name": "role_triggers",
"value": "[]"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "The port to give out to producers, consumers, and other brokers to use in establishing connections. This only needs to be set if this port is different from the port the server should bind to.",
"display_name": "Advertised Port",
"name": "advertised.port",
"value": null
},
{
"desc": "The directory in which stacks logs are placed. If not set, stacks are logged into a <code>stacks</code> subdirectory of the role's log directory.",
"display_name": "Stacks Collection Directory",
"name": "stacks_collection_directory",
"value": null
},
{
"desc": "Whether to suppress the results of the Swap Memory Usage heath test. The results of suppressed health tests are ignored when computing the overall health of the associated host, role or service, so suppressed health tests will not generate alerts.",
"display_name": "Suppress Health Test: Swap Memory Usage",
"name": "role_health_suppression_kafka_kafka_broker_swap_memory_usage",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties parameter.",
"display_name": "Suppress Parameter Validation: Kafka Broker Advanced Configuration Snippet (Safety Valve) for ssl.properties",
"name": "role_config_suppression_ssl.properties_role_safety_valve",
"value": "false"
},
{
"desc": "Weight for the read I/O requests issued by this role. The greater the weight, the higher the priority of the requests when the host experiences I/O contention. Must be between 100 and 1000. Defaults to 1000 for processes not managed by Cloudera Manager.",
"display_name": "Cgroup I/O Weight",
"name": "rm_io_weight",
"value": "500"
},
{
"desc": "A list of one or more directories in which Kafka data is stored. Each new partition created is placed in the directory that currently has the fewest partitions. Each directory should be on its own separate drive.",
"display_name": "Data Directories",
"name": "log.dirs",
"value": "/var/local/kafka/data"
},
{
"desc": "Soft memory limit to assign to this role, enforced by the Linux kernel. When the limit is reached, the kernel will reclaim pages charged to the process if and only if the host is facing memory pressure. If reclaiming fails, the kernel may kill the process. Both anonymous as well as page cache pages contribute to the limit. Use a value of -1 B to specify no limit. By default processes not managed by Cloudera Manager will have no limit.",
"display_name": "Cgroup Memory Soft Limit",
"name": "rm_memory_soft_limit",
"value": "-1"
}
]