sahara/sahara/plugins/cdh/v5_5_0/resources/kafka-service.json

374 lines
21 KiB
JSON

[
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Controlled Shutdown Maximum Attempts parameter.",
"display_name": "Suppress Parameter Validation: Controlled Shutdown Maximum Attempts",
"name": "service_config_suppression_controlled.shutdown.max.retries",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Kafka Service Environment Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_kafka_service_env_safety_valve",
"value": "false"
},
{
"desc": "The default number of partitions for automatically created topics.",
"display_name": "Default Number of Partitions",
"name": "num.partitions",
"value": "1"
},
{
"desc": "The amount of time to retain delete messages for log compacted topics. Once a consumer has seen an original message you need to ensure it also sees the delete message. If you removed the delete message too quickly, this might not happen. As a result there is a configurable delete retention time.",
"display_name": "Log Compaction Delete Record Retention Time",
"name": "log.cleaner.delete.retention.ms",
"value": "604800000"
},
{
"desc": "Enables auto creation of topics on the server. If this is set to true, then attempts to produce, consume, or fetch metadata for a non-existent topic automatically create the topic with the default replication factor and number of partitions.",
"display_name": "Topic Auto Creation",
"name": "auto.create.topics.enable",
"value": "true"
},
{
"desc": "When set, Cloudera Manager will send alerts when the health of this service reaches the threshold specified by the EventServer setting eventserver_health_events_alert_threshold",
"display_name": "Enable Service Level Health Alerts",
"name": "enable_alerts",
"value": "true"
},
{
"desc": "Number of threads used to replicate messages from leaders. Increasing this value increases the degree of I/O parallelism in the follower broker.",
"display_name": "Number of Replica Fetchers",
"name": "num.replica.fetchers",
"value": "1"
},
{
"desc": "Enables Kafka monitoring.",
"display_name": "Enable Kafka Monitoring (Note: Requires Kafka-1.3.0 parcel or higher)",
"name": "monitoring.enabled",
"value": "true"
},
{
"desc": "If automatic leader rebalancing is enabled, the controller tries to balance leadership for partitions among the brokers by periodically returning leadership for each partition to the preferred replica, if it is available.",
"display_name": "Enable Automatic Leader Rebalancing",
"name": "auto.leader.rebalance.enable",
"value": "true"
},
{
"desc": "Number of unsuccessful controlled shutdown attempts before executing an unclean shutdown. For example, the default value of 3 means that the system will attempt a controlled shutdown 3 times before executing an unclean shutdown.",
"display_name": "Controlled Shutdown Maximum Attempts",
"name": "controlled.shutdown.max.retries",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System User parameter.",
"display_name": "Suppress Parameter Validation: System User",
"name": "service_config_suppression_process_username",
"value": "false"
},
{
"desc": "The number of partitions for the offset commit topic. Since changing this after deployment is currently unsupported, we recommend using a higher setting for production (for example, 100-200).",
"display_name": "Offset Commit Topic Number of Partitions",
"name": "offsets.topic.num.partitions",
"value": "50"
},
{
"desc": "If a follower has not sent any fetch requests, nor has it consumed up to the leader's log end offset during this time, the leader removes the follower from the ISR set.",
"display_name": "Allowed Replica Time Lag",
"name": "replica.lag.time.max.ms",
"value": "10000"
},
{
"desc": "The user that this service's processes should run as.",
"display_name": "System User",
"name": "process_username",
"value": "kafka"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Replica Maximum Fetch Size parameter.",
"display_name": "Suppress Parameter Validation: Replica Maximum Fetch Size",
"name": "service_config_suppression_replica.fetch.max.bytes",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Triggers parameter.",
"display_name": "Suppress Parameter Validation: Service Triggers",
"name": "service_config_suppression_service_triggers",
"value": "false"
},
{
"desc": "The replication factor for the offset commit topic. A higher setting is recommended in order to ensure higher availability (for example, 3 or 4) . If the offsets topic is created when there are fewer brokers than the replication factor, then the offsets topic is created with fewer replicas.",
"display_name": "Offset Commit Topic Replication Factor",
"name": "offsets.topic.replication.factor",
"value": "3"
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka Broker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka Broker Count Validator",
"name": "service_config_suppression_kafka_broker_count_validator",
"value": "false"
},
{
"desc": "Controls how frequently the log cleaner will attempt to clean the log. This ratio bounds the maximum space wasted in the log by duplicates. For example, at 0.5 at most 50% of the log could be duplicates. A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log.",
"display_name": "Log Cleaner Clean Ratio",
"name": "log.cleaner.min.cleanable.ratio",
"value": "0.5"
},
{
"desc": "The group that this service's processes should run as.",
"display_name": "System Group",
"name": "process_groupname",
"value": "kafka"
},
{
"desc": "Enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so might result in data loss.",
"display_name": "Enable Unclean Leader Election",
"name": "unclean.leader.election.enable",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Default Replication Factor",
"name": "service_config_suppression_default.replication.factor",
"value": "false"
},
{
"desc": "Enables controlled shutdown of the broker. If enabled, the broker moves all leaders on it to other brokers before shutting itself down. This reduces the unavailability window during shutdown.",
"display_name": "Enable Controlled Shutdown",
"name": "controlled.shutdown.enable",
"value": "true"
},
{
"desc": "The frequency with which to check for leader imbalance.",
"display_name": "Leader Imbalance Check Interval",
"name": "leader.imbalance.check.interval.seconds",
"value": "300"
},
{
"desc": "The maximum number of bytes to fetch for each partition in fetch requests replicas send to the leader. This value should be larger than message.max.bytes.",
"display_name": "Replica Maximum Fetch Size",
"name": "replica.fetch.max.bytes",
"value": "1048576"
},
{
"desc": "When set, Cloudera Manager will send alerts when this entity's configuration changes.",
"display_name": "Enable Configuration Change Alerts",
"name": "enable_config_alerts",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Session Timeout parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Session Timeout",
"name": "service_config_suppression_zookeeper.session.timeout.ms",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Number of Replica Fetchers parameter.",
"display_name": "Suppress Parameter Validation: Number of Replica Fetchers",
"name": "service_config_suppression_num.replica.fetchers",
"value": "false"
},
{
"desc": "Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Producer Quota",
"name": "quota.producer.default",
"value": null
},
{
"desc": "For advanced use only, key-value pairs (one on each line) to be inserted into a role's environment. Applies to configurations of all roles in this service except client configuration.",
"display_name": "Kafka Service Environment Advanced Configuration Snippet (Safety Valve)",
"name": "KAFKA_service_env_safety_valve",
"value": null
},
{
"desc": "The maximum size of a message that the server can receive. It is important that this property be in sync with the maximum fetch size the consumers use, or else an unruly producer could publish messages too large for consumers to consume.",
"display_name": "Maximum Message Size",
"name": "message.max.bytes",
"value": "1000000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Allowed Per Broker parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Allowed Per Broker",
"name": "service_config_suppression_leader.imbalance.per.broker.percentage",
"value": "false"
},
{
"desc": "For advanced use only, a list of derived configuration properties that will be used by the Service Monitor instead of the default ones.",
"display_name": "Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "smon_derived_configs_safety_valve",
"value": null
},
{
"desc": "Enables the log cleaner to compact topics with cleanup.policy=compact on this cluster.",
"display_name": "Enable Log Compaction",
"name": "log.cleaner.enable",
"value": "true"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Minimum Number of Replicas in ISR parameter.",
"display_name": "Suppress Parameter Validation: Minimum Number of Replicas in ISR",
"name": "service_config_suppression_min.insync.replicas",
"value": "false"
},
{
"desc": "ZNode in ZooKeeper that should be used as a root for this Kafka cluster.",
"display_name": "ZooKeeper Root",
"name": "zookeeper.chroot",
"value": ""
},
{
"desc": "Whether to suppress configuration warnings produced by the Kafka MirrorMaker Count Validator configuration validator.",
"display_name": "Suppress Configuration Validator: Kafka MirrorMaker Count Validator",
"name": "service_config_suppression_kafka_mirror_maker_count_validator",
"value": "false"
},
{
"desc": "The number of messages written to a log partition before triggering an fsync on the log. Setting this lower syncs data to disk more often, but has a major impact on performance. We recommend use of replication for durability rather than depending on single-server fsync; however, this setting can be used to be extra certain. If used in conjunction with log.flush.interval.ms, the log is flushed when either criteria is met.",
"display_name": "Log Flush Message Interval",
"name": "log.flush.interval.messages",
"value": null
},
{
"desc": "The number of background threads to use for log cleaning.",
"display_name": "Number of Log Cleaner Threads",
"name": "log.cleaner.threads",
"value": "1"
},
{
"desc": "Enable Kerberos authentication for this KAFKA service.",
"display_name": "Enable Kerberos Authentication",
"name": "kerberos.auth.enable",
"value": "false"
},
{
"desc": "List of metric reporter class names. HTTP reporter is included by default.",
"display_name": "List of Metric Reporters",
"name": "kafka.metrics.reporters",
"value": "nl.techop.kafka.KafkaHttpMetricsReporter"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the ZooKeeper Root parameter.",
"display_name": "Suppress Parameter Validation: ZooKeeper Root",
"name": "service_config_suppression_zookeeper.chroot",
"value": "false"
},
{
"desc": "The frequency, in ms, with which the log flusher checks whether any log is eligible to be flushed to disk.",
"display_name": "Log Flush Scheduler Interval",
"name": "log.flush.scheduler.interval.ms",
"value": null
},
{
"desc": "The minimum number of replicas in the in-sync replica needed to satisfy a produce request where required.acks=-1 (that is, all).",
"display_name": "Minimum Number of Replicas in ISR",
"name": "min.insync.replicas",
"value": "1"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Replication Factor parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Replication Factor",
"name": "service_config_suppression_offsets.topic.replication.factor",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Leader Imbalance Check Interval parameter.",
"display_name": "Suppress Parameter Validation: Leader Imbalance Check Interval",
"name": "service_config_suppression_leader.imbalance.check.interval.seconds",
"value": "false"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve) parameter.",
"display_name": "Suppress Parameter Validation: Service Monitor Derived Configs Advanced Configuration Snippet (Safety Valve)",
"name": "service_config_suppression_smon_derived_configs_safety_valve",
"value": "false"
},
{
"desc": "If the server fails to send a heartbeat to ZooKeeper within this period of time, it is considered dead. If set too low, ZooKeeper might falsely consider a server dead; if set too high, ZooKeeper might take too long to recognize a dead server.",
"display_name": "ZooKeeper Session Timeout",
"name": "zookeeper.session.timeout.ms",
"value": "6000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Maximum Message Size parameter.",
"display_name": "Suppress Parameter Validation: Maximum Message Size",
"name": "service_config_suppression_message.max.bytes",
"value": "false"
},
{
"desc": "Enables topic deletion using admin tools. When delete topic is disabled, deleting topics through the admin tools has no effect.",
"display_name": "Enable Delete Topic",
"name": "delete.topic.enable",
"value": "true"
},
{
"desc": "<p>The configured triggers for this service. This is a JSON formatted list of triggers. These triggers are evaluated as part as the health system. Every trigger expression is parsed, and if the trigger condition is met, the list of actions provided in the trigger expression is executed.</p><p>Each trigger has the following fields:</p><ul><li><code>triggerName</code> <strong>(mandatory)</strong> - The name of the trigger. This value must be unique for the specific service. </li><li><code>triggerExpression</code> <strong>(mandatory)</strong> - A tsquery expression representing the trigger. </li><li><code>streamThreshold</code> <strong>(optional)</strong> - The maximum number of streams that can satisfy a condition of a trigger before the condition fires. By default set to 0, and any stream returned causes the condition to fire. </li><li><code>enabled</code> <strong> (optional)</strong> - By default set to 'true'. If set to 'false', the trigger is not evaluated.</li><li><code>expressionEditorConfig</code> <strong> (optional)</strong> - Metadata for the trigger editor. If present, the trigger should only be edited from the Edit Trigger page; editing the trigger here can lead to inconsistencies.</li></ul></p><p>For example, the followig JSON formatted trigger fires if there are more than 10 DataNodes with more than 500 file descriptors opened:</p><p><pre>[{\"triggerName\": \"sample-trigger\",\n \"triggerExpression\": \"IF (SELECT fd_open WHERE roleType = DataNode and last(fd_open) > 500) DO health:bad\",\n \"streamThreshold\": 10, \"enabled\": \"true\"}]</pre></p><p>See the trigger rules documentation for more details on how to write triggers using tsquery.</p><p>The JSON format is evolving and may change and, as a result, backward compatibility is not guaranteed between releases.</p>",
"display_name": "Service Triggers",
"name": "service_triggers",
"value": "[]"
},
{
"desc": "If a replica falls more than this number of messages behind the leader, the leader removes the follower from the ISR and treats it as dead. This property is deprecated in Kafka 1.4.0; higher versions use only replica.lag.time.max.ms.",
"display_name": "Allowed Replica Message Lag",
"name": "replica.lag.max.messages",
"value": "4000"
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Default Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Default Number of Partitions",
"name": "service_config_suppression_num.partitions",
"value": "false"
},
{
"desc": "Name of the ZooKeeper service that this Kafka service instance depends on",
"display_name": "ZooKeeper Service",
"name": "zookeeper_service",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the System Group parameter.",
"display_name": "Suppress Parameter Validation: System Group",
"name": "service_config_suppression_process_groupname",
"value": "false"
},
{
"desc": "The total memory used for log deduplication across all cleaner threads. This memory is statically allocated and will not cause GC problems.",
"display_name": "Log Cleaner Deduplication Buffer Size",
"name": "log.cleaner.dedupe.buffer.size",
"value": "134217728"
},
{
"desc": "Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second. Only respected by Kafka 2.0 or later.",
"display_name": "Default Consumer Quota",
"name": "quota.consumer.default",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the Offset Commit Topic Number of Partitions parameter.",
"display_name": "Suppress Parameter Validation: Offset Commit Topic Number of Partitions",
"name": "service_config_suppression_offsets.topic.num.partitions",
"value": "false"
},
{
"desc": "The default replication factor for automatically created topics.",
"display_name": "Default Replication Factor",
"name": "default.replication.factor",
"value": "1"
},
{
"desc": "The maximum time between fsync calls on the log. If used in conjuction with log.flush.interval.messages, the log is flushed when either criteria is met.",
"display_name": "Log Flush Time Interval",
"name": "log.flush.interval.ms",
"value": null
},
{
"desc": "Whether to suppress configuration warnings produced by the built-in parameter validation for the List of Metric Reporters parameter.",
"display_name": "Suppress Parameter Validation: List of Metric Reporters",
"name": "service_config_suppression_kafka.metrics.reporters",
"value": "false"
},
{
"desc": "The percentage of leader imbalance allowed per broker. The controller rebalances leadership if this ratio goes above the configured value per broker.",
"display_name": "Leader Imbalance Allowed Per Broker",
"name": "leader.imbalance.per.broker.percentage",
"value": "10"
}
]