diff --git a/elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 b/elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 index 764ab3d5..c61df4ff 100644 --- a/elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 +++ b/elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 @@ -1,114 +1,788 @@ {% import 'templates/_macros.j2' as elk_macros %} -######################## APM Server Configuration ############################# +######################### APM Server Configuration ######################### -############################# APM Server ###################################### +################################ APM Server ################################ apm-server: - # Defines the host and port the server is listening on + # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket. host: "{{ apm_interface }}:{{ apm_port }}" - # Maximum permitted size in bytes of an unzipped request accepted by the server to be processed. - #max_unzipped_size: 52428800 # Maximum permitted size in bytes of a request's header accepted by the server to be processed. #max_header_size: 1048576 - # Maximum permitted duration in seconds for reading an entire request. - #read_timeout: 2s - # Maximum permitted duration in seconds for writing a response. - #write_timeout: 2s + # Maximum amount of time to wait for the next incoming request before underlying connection is closed. + #idle_timeout: 45s - # Maximum duration in seconds before releasing resources when shutting down the server. + # Maximum permitted duration for reading an entire request. + #read_timeout: 30s + + # Maximum permitted duration for writing a response. + #write_timeout: 30s + + # Maximum duration before releasing resources when shutting down the server. #shutdown_timeout: 5s - # Maximum number of requests permitted to be sent to the server concurrently. - #concurrent_requests: 40 + # Maximum permitted size in bytes of an event accepted by the server to be processed. + #max_event_size: 307200 - # Authorization token to be checked. If a token is set here the agents must - # send their token in the following format: Authorization: Bearer . - # It is recommended to use an authorization token in combination with SSL enabled. + # Maximum number of new connections to accept simultaneously (0 means unlimited). + #max_connections: 0 + + # Custom HTTP headers to add to all HTTP responses, e.g. for security policy compliance. + #response_headers: + # X-My-Header: Contents of the header + + # If true (default), APM Server captures the IP of the instrumented service + # or the IP and User Agent of the real user (RUM requests). + #capture_personal_data: true + + # Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/). + #expvar: + #enabled: false + + # Url to expose expvar. + #url: "/debug/vars" + + # A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch. + # Using pipelines involves two steps: + # (1) registering a pipeline + # (2) applying a pipeline during data ingestion (see `output.elasticsearch.pipeline`) + # + # You can manually register a pipeline, or use this configuration option to ensure + # the pipeline is loaded and registered at the configured Elasticsearch instances. + # Find the default pipeline configuration at `ingest/pipeline/definition.json`. + # Automatic pipeline registration requires the `output.elasticsearch` to be enabled and configured. + #register.ingest.pipeline: + # Registers APM pipeline definition in Elasticsearch on APM Server startup. Defaults to true. + #enabled: true + # Overwrites existing APM pipeline definition in Elasticsearch. Defaults to false. + #overwrite: false + + + #---------------------------- APM Server - Secure Communication with Agents ---------------------------- + + # Enable secure communication between APM agents and the server. By default ssl is disabled. + #ssl: + #enabled: false + + # Path to file containing the certificate for server authentication. + # Needs to be configured when ssl is enabled. + #certificate: '' + + # Path to file containing server certificate key. + # Needs to be configured when ssl is enabled. + #key: '' + + # Optional configuration options for ssl communication. + + # Passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #key_passphrase: '' + + # List of supported/valid protocol versions. By default TLS versions 1.1 up to 1.3 are enabled. + #supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # Configure cipher suites to be used for SSL connections. + # Note that cipher suites are not configurable for TLS 1.3. + #cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #curve_types: [] + + # The APM Server endpoints can be secured by configuring a secret token or enabling the usage of API keys. Both + # options can be enabled in parallel, allowing Elastic APM agents to chose whichever mechanism they support. + # As soon as one of the options is enabled, requests without a valid token are denied by the server. An exception + # to this are requests to any enabled RUM endpoint. RUM endpoints are generally not secured by any token. + # + # Configure authorization via a common `secret_token`. By default it is disabled. + # Agents include the token in the following format: Authorization: Bearer . + # It is recommended to use an authorization token in combination with SSL enabled, + # and save the token in the apm-server keystore. secret_token: {{ apm_token }} - #ssl.enabled: false - #ssl.certificate : "path/to/cert" - #ssl.key : "path/to/private_key" - # Please be aware that frontend support is an experimental feature at the moment! - frontend: - # To enable experimental frontend support set this to true. + # Enable API key authorization by setting enabled to true. By default API key support is disabled. + # Agents include a valid API key in the following format: Authorization: ApiKey . + # The key must be the base64 encoded representation of the API key's "id:key". + # This is an experimental feature, use with care. + #api_key: + #enabled: false + + # Restrict how many unique API keys are allowed per minute. Should be set to at least the amount of different + # API keys configured in your monitored services. Every unique API key triggers one request to Elasticsearch. + #limit: 100 + + # API keys need to be fetched from Elasticsearch. If nothing is configured, configuration settings from the + # output section will be reused. + # Note that configuration needs to point to a secured Elasticsearch cluster that is able to serve API key requests. + #elasticsearch: + #hosts: ["localhost:9200"] + + #protocol: "http" + + # Username and password are only needed for the apm-server apikey sub-command, and they are ignored otherwise + # See `apm-server apikey --help` for details. + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path. + #path: "" + + # Proxy server url. + #proxy_url: "" + #proxy_disable: false + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 5s + + # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # List of root certificates for HTTPS server verifications. + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + + #---------------------------- APM Server - RUM Real User Monitoring ---------------------------- + + # Enable Real User Monitoring (RUM) Support. By default RUM is disabled. + # RUM does not support token based authorization. Enabled RUM endpoints will not require any authorization + # token configured for other endpoints. + rum: enabled: true - # Rate limit per second and IP address for requests sent to the frontend endpoint. - #rate_limit: 10 + #event_rate: - # Comma separated list of permitted origins for frontend. User-agents will send - # a origin header that will be validated against this list. + # Defines the maximum amount of events allowed to be sent to the APM Server RUM + # endpoint per IP per second. Defaults to 300. + #limit: 300 + + # An LRU cache is used to keep a rate limit per IP for the most recently seen IPs. + # This setting defines the number of unique IPs that can be tracked in the cache. + # Sites with many concurrent clients should consider increasing this limit. Defaults to 1000. + #lru_size: 1000 + + #-- General RUM settings + + # A list of permitted origins for real user monitoring. + # User-agents will send an origin header that will be validated against this list. # An origin is made of a protocol scheme, host and port, without the url path. # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com) - # If an item in the list is a single '*', everything will be allowed + # If an item in the list is a single '*', everything will be allowed. #allow_origins : ['*'] + # A list of Access-Control-Allow-Headers to allow RUM requests, in addition to "Content-Type", + # "Content-Encoding", and "Accept" + #allow_headers : [] + + # Custom HTTP headers to add to RUM responses, e.g. for security policy compliance. + #response_headers : + # X-My-Header: Contents of the header + # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes. # If the regexp matches, the stacktrace frame is considered to be a library frame. #library_pattern: "node_modules|bower_components|~" # Regexp to be matched against a stacktrace frame's `file_name`. # If the regexp matches, the stacktrace frame is not used for calculating error groups. - # The default pattern excludes stacktrace frames that have - # - a filename starting with '/webpack' + # The default pattern excludes stacktrace frames that have a filename starting with '/webpack' #exclude_from_grouping: "^/webpack" - # If a source map has previously been uploaded, source mapping is automatically applied - # to all error and transaction documents sent to the frontend endpoint. + # If a source map has previously been uploaded, source mapping is automatically applied. + # to all error and transaction documents sent to the RUM endpoint. #source_mapping: - # Source maps are are fetched from Elasticsearch and then kept in an in-memory cache for a certain time. + # Sourcemapping is enabled by default. + #enabled: true + + # Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration. + # A different instance must be configured when using any other output. + # This setting only affects sourcemap reads - the output determines where sourcemaps are written. + #elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (`http` and `9200`). + # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`. + # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`. + # hosts: ["localhost:9200"] + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + # The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch. # Note that values configured without a time unit will be interpreted as seconds. #cache: #expiration: 5m - # Source maps are stored in the same index as transaction and error documents. - # If the default index pattern at 'outputs.elasticsearch.index' is changed, - # a matching index pattern needs to be specified here. - #index_pattern: "apm-*" + # Source maps are stored in a separate index. + # If the default index pattern for source maps at 'outputs.elasticsearch.indices' + # is changed, a matching index pattern needs to be specified here. + #index_pattern: "apm-*-sourcemap*" -#================================ General ====================================== + #---------------------------- APM Server - Agent Configuration ---------------------------- -# Internal queue configuration for buffering events to be published. + # When using APM agent configuration, information fetched from Kibana will be cached in memory for some time. + # Specify cache key expiration via this setting. Default is 30 seconds. + #agent.config.cache.expiration: 30s + +{% if (groups['kibana'] | length) > 0 %} + kibana: + # For APM Agent configuration in Kibana, enabled must be true. + enabled: true + + # Scheme and port can be left out and will be set to the default (`http` and `5601`). + # In case you specify an additional path, the scheme is required: `http://localhost:5601/path`. + # IPv6 addresses should always be defined as: `https://[2001:db8::1]:5601`. + host: {{ hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port }} + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP path. + #path: "" + + # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # List of root certificates for HTTPS server verifications. + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] +{% endif %} + #---------------------------- APM Server - ILM Index Lifecycle Management ---------------------------- + + #ilm: + # Supported values are `auto`, `true` and `false`. + # `true`: Make use of Elasticsearch's Index Lifecycle Management (ILM) for APM indices. If no Elasticsearch output is + # configured or the configured instance does not support ILM, APM Server cannot apply ILM and must create + # unmanaged indices instead. + # `false`: APM Server does not make use of ILM in Elasticsearch. + # `auto`: If an Elasticsearch output is configured with default index and indices settings, and the configured + # Elasticsearch instance supports ILM, `auto` will resolve to `true`. Otherwise `auto` will resolve to `false`. + # Default value is `auto`. + #enabled: "auto" + + #setup: + # Only disable setup if you want to set up everything related to ILM on your own. + # When setup is enabled, the APM Server creates: + # - aliases and ILM policies if `apm-server.ilm.enabled` resolves to `true`. + # - An ILM specific template per event type. This is required to map ILM aliases and policies to indices. In case + # ILM is disabled, the templates will be created without any ILM settings. + # Be aware that if you turn off setup, you need to manually manage event type specific templates on your own. + # If you simply want to disable ILM, use the above setting, `apm-server.ilm.enabled`, instead. + # Defaults to true. + #enabled: true + + # Configure whether or not existing policies and ILM related templates should be updated. This needs to be + # set to true when customizing your policies. + # Defaults to false. + #overwrite: false + + # Set `require_policy` to `false` when policies are set up outside of APM Server but referenced here. + # Default value is `true`. + #require_policy: true + + # Customized mappings will be merged with the default setup, so you only need to configure mappings for the + # event types, policies, and index suffixes that you want to customize. + # Indices are named in this way: `apm-%{[observer.version]}-%{[event.type]}-{index_suffix}`, + # e.g., apm-7.9.0-span-custom*. The `index_suffix` is optional. + # NOTE: When configuring an `index_suffix`, ensure that no previously set up templates conflict with the + # newly configured ones. If an index matches multiple templates with the same order, the settings of + # the templates will override each other. Any conflicts need to be cleaned up manually. + # NOTE: When customizing `setup.template.name` and `setup.template.pattern`, ensure they still match the indices. + #mapping: + #- event_type: "error" + # policy_name: "apm-rollover-30-days" + # index_suffix: "" + #- event_type: "span" + # policy_name: "apm-rollover-30-days" + # index_suffix: "" + #- event_type: "transaction" + # policy_name: "apm-rollover-30-days" + # index_suffix: "" + #- event_type: "metric" + # policy_name: "apm-rollover-30-days" + # index_suffix: "" + + # Configured policies are added to pre-defined default policies. + # If a policy with the same name as a default policy is configured, the configured policy overwrites the default policy. + #policies: + #- name: "apm-rollover-30-days" + #policy: + #phases: + #hot: + #actions: + #rollover: + #max_size: "50gb" + #max_age: "30d" + #set_priority: + #priority: 100 + #warm: + #min_age: "30d" + #actions: + #set_priority: + #priority: 50 + #readonly: {} + + + + #---------------------------- APM Server - Experimental Jaeger integration ---------------------------- + + # When enabling Jaeger integration, APM Server acts as Jaeger collector. It supports jaeger.thrift over HTTP + # and gRPC. This is an experimental feature, use with care. + #jaeger: + #grpc: + # Set to true to enable the Jaeger gRPC collector service. + #enabled: false + + # Defines the gRPC host and port the server is listening on. + # Defaults to the standard Jaeger gRPC collector port 14250. + #host: "localhost:14250" + + # Set to the name of a process tag to use for authorizing + # Jaeger agents. + # + # The tag value should have the same format as an HTTP + # Authorization header, i.e. "Bearer " or + # "ApiKey ". + # + # By default (if the auth_tag value is empty), authorization + # does not apply to Jaeger agents. + #auth_tag: "" + + #http: + # Set to true to enable the Jaeger HTTP collector endpoint. + #enabled: false + + # Defines the HTTP host and port the server is listening on. + # Defaults to the standard Jaeger HTTP collector port 14268. + #host: "localhost:14268" + +#================================= General ================================= + +# Data is buffered in a memory queue before it is published to the configured output. +# The memory queue will present all available events (up to the outputs +# bulk_max_size) to the output, the moment the output is ready to serve +# another batch of events. #queue: - # Queue type by name (default 'mem') - # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server - # another batch of events. + # Queue type by name (default 'mem'). #mem: # Max number of events the queue can buffer. #events: 4096 # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < min_flush_events. + # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: -#================================ Outputs ====================================== +#================================= Template ================================= +{{ elk_macros.setup_template('apm', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} -# Configure what output to use when sending the data collected by the beat. +#============================= Elastic Cloud ============================= -#----------------------------- Logstash output --------------------------------- -{{ elk_macros.output_elasticsearch(inventory_hostname, elasticsearch_data_hosts) }} +# These settings simplify using APM Server with the Elastic Cloud (https://cloud.elastic.co/). -#================================= Paths ====================================== +# The cloud.id setting overwrites the `output.elasticsearch.hosts` option. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ================================= + +# Configure the output to use when sending the data collected by apm-server. + +#-------------------------- Elasticsearch output -------------------------- +{{ elk_macros.output_elasticsearch('apm-server', inventory_hostname, elasticsearch_data_hosts) }} +#----------------------------- Console output ----------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: false + + # Configure JSON encoding. + #codec.json: + # Pretty-print JSON event. + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + +#---------------------------- Logstash output ----------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: false + + # The Logstash hosts. + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set). + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # group. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, apm-server + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to apm + # in all lowercase. + #index: 'apm' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + #ssl.enabled: false + + # Optional SSL configuration options. SSL is off by default. + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # List of root certificates for HTTPS server verifications. + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------ Kafka output ------------------------------ +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: false + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version libbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding. + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + #ssl.enabled: false + + # Optional SSL configuration options. SSL is off by default. + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # List of root certificates for HTTPS server verifications. + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/krb5kdc/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/path/config + + # The service principal name. + #kerberos.service_name: HTTP/my-service@realm + + # Name of the Kerberos user. It is used when auth_type is set to password. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +#============================= Instrumentation ============================= + +# Instrumentation support for the server's HTTP endpoints and event publisher. +#instrumentation: + + # Set to true to enable instrumentation of the APM Server itself. + #enabled: false + + # Environment in which the APM Server is running on (eg: staging, production, etc.) + #environment: "" + + # Hosts to report instrumentation results to. + # For reporting to itself, leave this field commented + #hosts: + # - http://remote-apm-server:8200 + + # API Key for the remote APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the remote APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +#================================= Paths ================================== # The home path for the apm-server installation. This is the default base path # for all other path settings and for miscellaneous files that come with the -# distribution (for example, the sample dashboards). +# distribution. # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: @@ -125,21 +799,153 @@ apm-server: # subdirectory inside the home path. #path.data: ${path.home}/data -# The logs path for a apm-server installation. This is the default location for -# the Beat's log files. If not set by a CLI flag or in the configuration file, -# the default for the logs path is a logs subdirectory inside the home path. +# The logs path for an apm-server installation. If not set by a CLI flag or in the +# configuration file, the default is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs -#============================== Dashboards ===================================== -{{ elk_macros.setup_dashboards('apm') }} - -#=============================== Template ====================================== -{{ elk_macros.setup_template('apm', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} - -#============================== Kibana ===================================== -{% if (groups['kibana'] | length) > 0 %} -{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} -{% endif %} - -#================================ Logging ====================================== +#================================= Logging ================================= {{ elk_macros.beat_logging('apm-server') }} +#=============================== HTTP Endpoint =============================== + +# apm-server can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats. For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= X-pack Monitoring ============================= + +# APM server can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires x-pack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#monitoring.enabled: false + +# Most settings from the Elasticsearch output are accepted here as well. +# Note that these settings should be configured to point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration. This means that if you have the Elasticsearch output configured, +# you can simply uncomment the following line. +#monitoring.elasticsearch: + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (`http` and `9200`). + # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`. + # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`. + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request. + #headers: + # X-My-Header: Contents of the header + + # Proxy server url. + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, apm-server + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # List of root certificates for HTTPS server verifications. + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + + #metrics.period: 10s + #state.period: 1m diff --git a/elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 b/elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 index 4cb0afa8..734556e6 100644 --- a/elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 +++ b/elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 @@ -8,14 +8,14 @@ # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/auditbeat/index.html -#============================ Config Reloading ================================ +# ============================== Config Reloading ============================== # Config reloading allows to dynamically load modules. Each file which is # monitored must contain one or multiple modules as a list. auditbeat.config.modules: # Glob pattern for configuration reloading - path: ${path.config}/conf.d/*.yml + path: ${path.config}/modules.d/*.yml # Period on which files under path should be checked for changes reload.period: 60s @@ -23,12 +23,13 @@ auditbeat.config.modules: # Set to true to enable config reloading reload.enabled: true -# Maximum amount of time to randomly delay the start of a metricset. Use 0 to +# Maximum amount of time to randomly delay the start of a dataset. Use 0 to # disable startup delay. auditbeat.max_start_delay: 10s -#========================== Modules configuration ============================= +# =========================== Modules configuration ============================ auditbeat.modules: + {% if not (containerised | default(false)) %} # The auditd module collects events from the audit framework in the Linux # kernel. You need to specify audit rules for the events that you want to audit. @@ -43,7 +44,11 @@ auditbeat.modules: include_raw_message: false include_warnings: true + # Set to true to publish fields with null values in events. + #keep_null: false + {% if not apply_security_hardening | default(true) | bool %} + # Load audit rules from separate files. Same format as audit.rules(7). audit_rule_files: - '${path.config}/audit.rules.d/*.conf' - '/etc/audit/rules.d/*.rules' @@ -61,18 +66,20 @@ auditbeat.modules: ## Executions. -a always,exit -F arch=b64 -S execve,execveat -k exec - # Things that affect identity. + ## External access (warning: these can be expensive to audit). + #-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access + + ## Identity changes. -w /etc/group -p wa -k identity -w /etc/passwd -p wa -k identity -w /etc/gshadow -p wa -k identity -w /etc/shadow -p wa -k identity - # Unauthorized access attempts to files (unsuccessful). + ## Unauthorized access attempts. -a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -F key=access -a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -F key=access -a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -F key=access -a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -F key=access - {% endif %} {% endif %} @@ -81,6 +88,10 @@ auditbeat.modules: - module: file_integrity paths: - /bin + - /usr/bin + - /sbin + - /usr/sbin + - /etc - /etc/ansible/roles - /etc/apt - /etc/apache2 @@ -96,10 +107,7 @@ auditbeat.modules: - /etc/zypp - /openstack/venvs - /opt/openstack-ansible - - /sbin - - /usr/bin - /usr/local/bin - - /usr/sbin - /var/lib/lxc # List of regular expressions to filter out notifications for unwanted files. @@ -110,6 +118,11 @@ auditbeat.modules: - '~$' - '/\.git($|/)' + # List of regular expressions used to explicitly include files. When configured, + # Auditbeat will ignore files unless they match a pattern. + #include_files: + #- '/\.ssh($|/)' + # Scan over the configured file paths at startup and send events for new or # modified files since the last time Auditbeat was running. scan_at_start: true @@ -124,22 +137,25 @@ auditbeat.modules: # Hash types to compute when the file changes. Supported types are # blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384, - # sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384 and sha3_512. + # sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384, sha3_512, and xxh64. # Default is sha1. hash_types: [sha1] # Detect changes to files included in subdirectories. Disabled by default. recursive: true + # Set to true to publish fields with null values in events. + #keep_null: false + # The system module collects security related information about a host. # All datasets send both periodic state information (e.g. all currently # running processes) and real-time changes (e.g. when a new process starts # or stops). - module: system datasets: + - package # Installed, updated, and removed packages - host # General host information, e.g. uptime, IPs - login # User logins, logouts, and system boots. - - package # Installed, updated, and removed packages - process # Started and stopped processes {% if not (containerised | default(false)) and not auditbeat_ignore_socket_data %} - socket # Opened and closed sockets @@ -153,16 +169,40 @@ auditbeat.modules: # The state.period can be overridden for any dataset. # host.state.period: 12h + # package.state.period: 12h # process.state.period: 12h # socket.state.period: 12h # user.state.period: 12h + # Average file read rate for hashing of the process executable. Default is "50 MiB". + process.hash.scan_rate_per_sec: 50 MiB + + # Limit on the size of the process executable that will be hashed. Default is "100 MiB". + process.hash.max_file_size: 100 MiB + + # Hash types to compute of the process executable. Supported types are + # blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384, + # sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384, sha3_512, and xxh64. + # Default is sha1. + process.hash.hash_types: [sha1] + + # Disabled by default. If enabled, the socket dataset will + # report sockets to and from localhost. + # socket.include_localhost: false + # Enabled by default. Auditbeat will read password fields in # /etc/passwd and /etc/shadow and store a hash locally to # detect any changes. user.detect_password_changes: true -#================================ General ====================================== + # File patterns of the login record files. + # wtmp: History of successful logins, logouts, and system shutdowns and boots. + # btmp: Failed login attempts. + login.wtmp_file_pattern: /var/log/wtmp* + login.btmp_file_pattern: /var/log/btmp* + + +# ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. @@ -203,9 +243,44 @@ auditbeat.modules: #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < min_flush_events. + # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s + # The disk queue stores incoming events on disk until the output is + # ready for them. This allows a higher event limit than the memory-only + # queue and lets pending events persist through a restart. + #disk: + # The directory path to store the queue's data. + #path: "${path.data}/diskqueue" + + # The maximum space the queue should occupy on disk. Depending on + # input settings, events that exceed this limit are delayed or discarded. + #max_size: 10GB + + # The maximum size of a single queue data file. Data in the queue is + # stored in smaller segments that are deleted after all their events + # have been processed. + #segment_size: 1GB + + # The number of events to read from disk to memory while waiting for + # the output to request them. + #read_ahead: 512 + + # The number of events to accept from inputs while waiting for them + # to be written to disk. If event data arrives faster than it + # can be written to disk, this setting prevents it from overflowing + # main memory. + #write_ahead: 2048 + + # The duration to wait before retrying when the queue encounters a disk + # write error. + #retry_interval: 1s + + # The maximum length of time to wait before retrying on a disk write + # error. If the queue encounters repeated errors, it will double the + # length of its retry interval each time, up to this maximum. + #max_retry_interval: 30s + # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # @@ -246,7 +321,7 @@ auditbeat.modules: # Sets the write buffer size. #buffer_size: 1MiB - # Maximum duration after which events are flushed, if the write buffer + # Maximum duration after which events are flushed if the write buffer # is not full yet. The default value is 1s. #flush.timeout: 1s @@ -260,7 +335,7 @@ auditbeat.modules: #codec: cbor #read: # Reader flush timeout, waiting for more events to become available, so - # to fill a complete batch, as required by the outputs. + # to fill a complete batch as required by the outputs. # If flush_timeout is 0, all available events are forwarded to the # outputs immediately. # The default value is 0s. @@ -270,7 +345,7 @@ auditbeat.modules: # default is the number of logical CPUs available in the system. #max_procs: -#================================ Processors =================================== +# ================================= Processors ================================= {{ elk_macros.beat_processors(processors) }} # Processors are used to reduce the number of fields in the exported event or to @@ -288,103 +363,155 @@ auditbeat.modules: # values: # #processors: -#- include_fields: -# fields: ["cpu"] -#- drop_fields: -# fields: ["cpu.user", "cpu.system"] +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: -#- drop_event: -# when: -# equals: -# http.code: 200 +# - drop_event: +# when: +# equals: +# http.code: 200 # # The following example renames the field a to b: # #processors: -#- rename: -# fields: -# - from: "a" -# to: "b" +# - rename: +# fields: +# - from: "a" +# to: "b" # # The following example tokenizes the string into fields: # #processors: -#- dissect: -# tokenizer: "%{key1} - %{key2}" -# field: "message" -# target_prefix: "dissect" +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: -#- add_cloud_metadata: ~ +# - add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: -#- add_locale: -# format: offset +# - add_locale: +# format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: -#- add_docker_metadata: -# host: "unix:///var/run/docker.sock" -# match_fields: ["system.process.cgroup.id"] -# match_pids: ["process.pid", "process.ppid"] -# match_source: true -# match_source_index: 4 -# match_short_id: false -# cleanup_timeout: 60 -# labels.dedot: false -# # To connect to Docker over TLS you must specify a client and CA certificate. -# #ssl: -# # certificate_authority: "/etc/pki/root/ca.pem" -# # certificate: "/etc/pki/client/cert.pem" -# # key: "/etc/pki/client/cert.key" +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: -#- add_docker_metadata: ~ +# - add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: -#- add_host_metadata: -# netinfo.enabled: false +# - add_host_metadata: ~ # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: -#- add_process_metadata: -# match_pids: ["system.process.ppid"] -# target: system.process.parent +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: -#- decode_json_fields: -# fields: ["field1", "field2", ...] -# process_array: false -# max_depth: 1 -# target: "" -# overwrite_keys: false +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true -#============================= Elastic Cloud ================================== +# =============================== Elastic Cloud ================================ -# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/). +# These settings simplify using Auditbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. @@ -395,11 +522,11 @@ auditbeat.modules: # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: -#================================ Outputs ====================================== +# ================================== Outputs =================================== # Configure what output to use when sending the data collected by the beat. -#-------------------------- Elasticsearch output ------------------------------- +# ---------------------------- Elasticsearch Output ---------------------------- #output.elasticsearch: # Boolean flag to enable or disable the output module. #enabled: true @@ -413,12 +540,18 @@ auditbeat.modules: # Set gzip compression level. #compression_level: 0 - # Optional protocol and basic auth credentials. + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Protocol - either `http` (default) or `https`. #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" #username: "elastic" #password: "changeme" - # Dictionary of HTTP parameters to pass within the url with index operations. + # Dictionary of HTTP parameters to pass within the URL with index operations. #parameters: #param1: value1 #param2: value2 @@ -429,21 +562,26 @@ auditbeat.modules: # Optional index name. The default is "auditbeat" plus date # and generates [auditbeat-]YYYY.MM.DD keys. # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "auditbeat-%{[beat.version]}-%{+yyyy.MM.dd}" + #index: "auditbeat-%{[agent.version]}-%{+yyyy.MM.dd}" # Optional ingest node pipeline. By default no pipeline will be used. #pipeline: "" - # Optional HTTP Path + # Optional HTTP path #path: "/elasticsearch" # Custom HTTP headers to add to each request #headers: # X-My-Header: Contents of the header - # Proxy server url + # Proxy server URL #proxy_url: http://proxy:3128 + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. @@ -453,55 +591,99 @@ auditbeat.modules: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # SSL configuration. By default is off. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" -#----------------------------- Logstash output --------------------------------- + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} -#------------------------------- Kafka output ---------------------------------- +# -------------------------------- Kafka Output -------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true - # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The list of Kafka broker addresses from which to fetch the cluster metadata. # The cluster metadata contain the actual Kafka brokers events are published # to. #hosts: ["localhost:9092"] @@ -510,7 +692,7 @@ auditbeat.modules: # using any event field. To set the topic from document type use `%{[type]}`. #topic: beats - # The Kafka event key setting. Use format string to create unique event key. + # The Kafka event key setting. Use format string to create a unique event key. # By default no event key will be generated. #key: '' @@ -531,37 +713,66 @@ auditbeat.modules: #username: '' #password: '' - # Kafka version auditbeat is assumed to run against. Defaults to the oldest - # supported stable version (currently version 0.8.2.0) - #version: 0.8.2 + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' - # Metadata update configuration. Metadata do contain leader information - # deciding which broker to use when publishing. + # Kafka version Auditbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Metadata update configuration. Metadata contains leader information + # used to decide which broker to use when publishing. #metadata: # Max metadata request retry attempts when cluster is in middle of leader # election. Defaults to 3 retries. #retry.max: 3 - # Waiting time between retries during leader elections. Default is 250ms. + # Wait time between retries during leader elections. Default is 250ms. #retry.backoff: 250ms # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false + # The number of concurrent load-balanced Kafka output workers. #worker: 1 # The number of times to retry publishing an event after a publishing failure. - # After the specified number of retries, the events are typically dropped. + # After the specified number of retries, events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s @@ -581,6 +792,10 @@ auditbeat.modules: # default is gzip. #compression: gzip + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + # The maximum permitted size of JSON-encoded messages. Bigger messages will be # dropped. The default value is 1000000 (bytes). This value should be equal to # or less than the broker's message.max.bytes. @@ -596,61 +811,105 @@ auditbeat.modules: # purposes. The default is "beats". #client_id: beats - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- Redis output ---------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true - # The list of Redis servers to connect to. If load balancing is enabled, the + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. #hosts: ["localhost:6379"] - # The Redis port to use if hosts does not contain a port number. The default - # is 6379. - #port: 6379 - # The name of the Redis list or channel the events are published to. The # default is auditbeat. #key: auditbeat - # The password to authenticate with. The default is no authentication. + # The password to authenticate to Redis with. The default is no authentication. #password: # The Redis database number where the events are published. The default is 0. @@ -684,6 +943,17 @@ auditbeat.modules: # until all events are published. The default is 3. #max_retries: 3 + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + # The maximum number of events to bulk in a single Redis request or pipeline. # The default is 2048. #bulk_max_size: 2048 @@ -697,54 +967,68 @@ auditbeat.modules: # occurs on the proxy server. #proxy_use_local_resolver: false - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- File output ----------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: - # Pretty print json event + # Pretty-print JSON event #pretty: false - # Configure escaping html symbols in strings. - #escape_html: true + # Configure escaping HTML symbols in strings. + #escape_html: false # Path to the directory where to save the generated files. The option is # mandatory. @@ -755,7 +1039,7 @@ auditbeat.modules: #filename: auditbeat # Maximum size in kilobytes of each file. When this size is reached, and on - # every auditbeat restart, the files are rotated. The default value is 10240 + # every Auditbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 @@ -767,92 +1051,112 @@ auditbeat.modules: # Permissions to use for file creation. The default is 0600. #permissions: 0600 - -#----------------------------- Console output --------------------------------- +# ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: - # Pretty print json event + # Pretty-print JSON event #pretty: false - # Configure escaping html symbols in strings. - #escape_html: true + # Configure escaping HTML symbols in strings. + #escape_html: false -#================================= Paths ====================================== +# =================================== Paths ==================================== -# The home path for the auditbeat installation. This is the default base path +# The home path for the Auditbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: -# The configuration path for the auditbeat installation. This is the default +# The configuration path for the Auditbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} -# The data path for the auditbeat installation. This is the default base path -# for all the files in which auditbeat needs to store its data. If not set by a +# The data path for the Auditbeat installation. This is the default base path +# for all the files in which Auditbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data -# The logs path for a auditbeat installation. This is the default location for +# The logs path for a Auditbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs -#================================ Keystore ========================================== +# ================================== Keystore ================================== + # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" -#============================== Dashboards ===================================== +# ================================= Dashboards ================================= + {{ elk_macros.setup_dashboards('auditbeat') }} -#=============================== Template ====================================== +# ================================== Template ================================== + {{ elk_macros.setup_template('auditbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} -#============================== Setup ILM ===================================== +# ====================== Index Lifecycle Management (ILM) ====================== -# Configure Index Lifecycle Management Index Lifecycle Management creates a -# write alias and adds additional settings to the template. -# The elasticsearch.output.index setting will be replaced with the write alias -# if ILM is enabled. +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. -# Enabled ILM support. Valid values are true, false, and auto. The beat will -# detect availabilty of Index Lifecycle Management in Elasticsearch and enable -# or disable ILM support. +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto -# Configure the ILM write alias name. -#setup.ilm.rollover_alias: "filebeat" +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'auditbeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'auditbeat' -# Configure rollover index pattern. +# Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" + {% if ilm_policy_name is defined %} +# Set the lifecycle policy name. The default policy name is +# 'beatname'. setup.ilm.policy_name: "{{ ilm_policy_name }}" + {% endif %} {% if ilm_policy_file_location is defined %} +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}" -{% endif %} -#============================== Kibana ===================================== +{% endif %} +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + {% if (groups['kibana'] | length) > 0 %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} -#================================ Logging ====================================== +# ================================== Logging =================================== + {{ elk_macros.beat_logging('auditbeat', auditbeat_log_level) }} -#============================== Xpack Monitoring ===================================== -{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} +# ============================= X-Pack Monitoring ============================== +{{ elk_macros.xpack_monitoring_elasticsearch('auditbeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +# =============================== HTTP Endpoint ================================ -#================================ HTTP Endpoint ====================================== # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output @@ -861,13 +1165,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }} # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 -#============================= Process Security ================================ +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true + +# ============================== Instrumentation =============================== + +# Instrumentation support for the auditbeat. +#instrumentation: + # Set to true to enable instrumentation of auditbeat. + #enabled: false + + # Environment in which auditbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: false diff --git a/elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 b/elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 index e57cdee2..9470f765 100644 --- a/elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 +++ b/elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 @@ -16,15 +16,12 @@ filebeat.modules: - module: system # Syslog syslog: - enabled: "{{ filebeat_syslog_enabled | default(true) }}" + enabled: {{ filebeat_syslog_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false - # Input configuration (advanced). Any input configuration option # can be added under this section. #input: @@ -44,11 +41,12 @@ filebeat.modules: # can be added under this section. #input: +#------------------------------- ActiveMQ Module ------------------------------- #-------------------------------- Apache Module -------------------------------- - module: apache # Access logs access: - enabled: "{{ filebeat_httpd_enabled | default(true) }}" + enabled: {{ filebeat_httpd_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. @@ -63,7 +61,7 @@ filebeat.modules: # Error logs error: - enabled: "{{ filebeat_httpd_enabled | default(true) }}" + enabled: {{ filebeat_httpd_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. @@ -77,9 +75,9 @@ filebeat.modules: #input: #-------------------------------- Auditd Module -------------------------------- -#- module: auditd -# log: -# enabled: "{{ filebeat_auditd_enabled | default(true) }}" +- module: auditd + log: + enabled: {{ filebeat_auditd_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. @@ -89,6 +87,17 @@ filebeat.modules: # can be added under this section. #input: +#--------------------------------- AWS Module --------------------------------- +#-------------------------------- Azure Module -------------------------------- +#------------------ Barracuda Web Application Firewall Module ------------------ +#-------------------------- Blue Coat Director Module -------------------------- +#--------------------------------- CEF Module --------------------------------- +#------------------------------ Checkpoint Module ------------------------------ +#-------------------------------- Cisco Module -------------------------------- +#------------------------------- Coredns Module ------------------------------- +#----------------------------- Crowdstrike Module ----------------------------- +#------------------------------ Cyber-Ark Module ------------------------------ +#---------------------------- CylanceProtect Module ---------------------------- #---------------------------- Elasticsearch Module ---------------------------- {% if (elasticsearch_enabled | default(false) | bool ) %} - module: elasticsearch @@ -100,45 +109,39 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false - gc: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: - #audit: - #enabled: true + audit: + enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false - - #slowlog: - #enabled: true + slowlog: + enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false - - #deprecation: - #enabled: true + deprecation: + enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: - - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false {% endif %} - -#------------------------------- Haproxy Module ------------------------------- +#------------------------------ Envoyproxy Module ------------------------------ +#--------------------- Big-IP Access Policy Manager Module --------------------- +#------------------------------- Fortinet Module ------------------------------- +#--------------------- Google Cloud Platform (GCP) Module --------------------- +#--------------------------- Google_workspace Module --------------------------- +#----------------------------- Googlecloud Module ----------------------------- +#-------------------------------- Gsuite Module -------------------------------- +#------------------------------- HAProxy Module ------------------------------- {% if (haproxy_enabled | default(false) | bool ) %} - module: haproxy # All logs @@ -153,6 +156,7 @@ filebeat.modules: #var.paths: {% endif %} +#-------------------------------- Ibmmq Module -------------------------------- #-------------------------------- Icinga Module -------------------------------- #- module: icinga # Main logs @@ -217,16 +221,23 @@ filebeat.modules: # can be added under this section. #input: -#------------------------------ IP Tables Module ------------------------------ +#------------------------- Imperva SecureSphere Module ------------------------- +#---------------------------- Infoblox NIOS Module ---------------------------- +#------------------------------- Iptables Module ------------------------------- {% if (filebeat_iptables_enabled | bool) %} - module: iptables - # Syslog log: - enabled: "true" + enabled: true + + # Set which input to use between syslog (default) or file. var.input: "file" + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. var.paths: {{ filebeat_iptables_log_paths | to_json }} {% endif %} +#---------------------------- Juniper JUNOS Module ---------------------------- #-------------------------------- Kafka Module -------------------------------- #- module: kafka # All logs @@ -241,16 +252,21 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false - #-------------------------------- Kibana Module -------------------------------- {% if (kibana_enabled | default(false) | bool ) %} - module: kibana - # All logs + # Server logs log: enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Audit logs + audit: + enabled: true + # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: @@ -269,12 +285,15 @@ filebeat.modules: # Slow logs #slowlog: - #enabled: true + #enabled: true + # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: {% endif %} +#------------------------------ Microsoft Module ------------------------------ +#--------------------------------- MISP Module --------------------------------- #------------------------------- Mongodb Module ------------------------------- #- module: mongodb # Logs @@ -289,11 +308,12 @@ filebeat.modules: # can be added under this section. #input: +#-------------------------------- Mssql Module -------------------------------- #-------------------------------- MySQL Module -------------------------------- - module: mysql # Error logs error: - enabled: "{{ filebeat_galera_enabled | default(true) }}" + enabled: {{ filebeat_galera_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. @@ -317,11 +337,15 @@ filebeat.modules: # can be added under this section. #input: +#--------------------------- MySQL Enterprise Module --------------------------- +#--------------------------------- NATS Module --------------------------------- +#------------------------------- NetFlow Module ------------------------------- +#-------------------------- Arbor Peakflow SP Module -------------------------- #-------------------------------- Nginx Module -------------------------------- - module: nginx # Access logs access: - enabled: "{{ filebeat_nginx_enabled | default(true) }}" + enabled: {{ filebeat_nginx_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. @@ -336,12 +360,9 @@ filebeat.modules: # can be added under this section. #input: - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false - # Error logs error: - enabled: "{{ filebeat_nginx_enabled | default(true) }}" + enabled: {{ filebeat_nginx_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. @@ -356,13 +377,21 @@ filebeat.modules: # can be added under this section. #input: - # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. - #var.convert_timezone: false + # Ingress-nginx controller logs. This is disabled by default. It could be used in Kubernetes environments to parse ingress-nginx logs + #ingress_controller: + # enabled: false + # + # # Set custom paths for the log files. If left empty, + # # Filebeat will choose the paths depending on your OS. + # #var.paths: +#------------------------------ Office 365 Module ------------------------------ +#--------------------------------- Okta Module --------------------------------- +#-------------------------------- Oracle Module -------------------------------- #------------------------------- Osquery Module ------------------------------- - module: osquery result: - enabled: "{{ filebeat_osquery_enabled | default(true) }}" + enabled: {{ filebeat_osquery_enabled | default(true) | lower }} # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. @@ -373,6 +402,7 @@ filebeat.modules: # of the document. The default is true. var.use_namespace: true +#--------------------------------- Panw Module --------------------------------- #------------------------------ PostgreSQL Module ------------------------------ #- module: postgresql # Logs @@ -387,6 +417,9 @@ filebeat.modules: # can be added under this section. #input: +#---------------------- Proofpoint Email Security Module ---------------------- +#------------------------------- RabbitMQ Module ------------------------------- +#-------------------------- Radware DefensePro Module -------------------------- #-------------------------------- Redis Module -------------------------------- #- module: redis # Main logs @@ -415,6 +448,13 @@ filebeat.modules: # Filebeat will choose the the default path. #var.paths: +#--------------------------- Snort/Sourcefire Module --------------------------- +#--------------------------------- Snyk Module --------------------------------- +#----------------------------- Sonicwall-FW Module ----------------------------- +#-------------------------------- Sophos Module -------------------------------- +#-------------------------------- Squid Module -------------------------------- +#------------------------------- Suricata Module ------------------------------- +#---------------------------- Apache Tomcat Module ---------------------------- #------------------------------- Traefik Module ------------------------------- #- module: traefik # Access logs @@ -429,6 +469,9 @@ filebeat.modules: # can be added under this section. #input: +#--------------------------------- Zeek Module --------------------------------- +#--------------------------------- Zoom Module --------------------------------- +#----------------------------- Zscaler NSS Module ----------------------------- #=========================== Filebeat inputs ============================= @@ -443,15 +486,16 @@ filebeat.inputs: # # Possible options are: # * log: Reads every line of the log file (default) +# * filestream: Improved version of log input. Experimental. # * stdin: Reads the standard in #------------------------------ Log input -------------------------------- {% for p in filebeat_prospectors %} - type: {{ p['type'] }} - enabled: {{ p['enabled'] }} + enabled: {{ p['enabled'] | lower }} paths: {% for path in p['paths'] %} - - {{ path }} + - {{ path }} {% endfor %} {% if 'multiline' in p %} multiline.pattern: '{{ p['multiline']['pattern'] }}' @@ -460,9 +504,367 @@ filebeat.inputs: {% endif %} tags: {% for tag in p['tags'] %} - - {{ tag }} + - {{ tag }} {% endfor %} + {% endfor %} + # Change to true to enable this input configuration. + #enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + #paths: + #- /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Method to determine if two files are the same or not. By default + # the Beat considers two files the same if their inode and device id are the same. + #file_identity.native: ~ + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # By default, all events contain `host.name`. This option can be set to true + # to disable the addition of this field to all events. The default value is + # false. + #publisher_pipeline.disable_host: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator. + #line_terminator: auto + + ### Recursive glob configuration + + # Expand "**" patterns into regular glob patterns. + #recursive_glob.enabled: true + + ### JSON configuration + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #json.expand_keys: false + + # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # To aggregate constant number of lines into a single event use the count mode of multiline. + #multiline.type: count + + # The number of lines to aggregate into a single event. + #multiline.count_lines: 3 + + # Do not add new line character when concatenating lines. + #multiline.skip_newline: false + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as source. + #symlinks: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # Max number of harvesters that are started in parallel. + # Default is 0 which means unlimited + #harvester_limit: 0 + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close_removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_eof: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_timeout: 0 + + # Defines if inputs is enabled + #enabled: true + +#--------------------------- Filestream input ---------------------------- +- type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + ### Prospector options + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #prospector.scanner.check_interval: 10s + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Expand "**" patterns into regular glob patterns. + #prospector.scanner.recursive_glob: true + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as source. + #prospector.scanner.symlinks: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Method to determine if two files are the same or not. By default + # the Beat considers two files the same if their inode and device id are the same. + #file_identity.native: ~ + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to publish fields with null values in events. + #keep_null: false + + # By default, all events contain `host.name`. This option can be set to true + # to disable the addition of this field to all events. The default value is + # false. + #publisher_pipeline.disable_host: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #message_max_bytes: 10485760 + + # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator. + #line_terminator: auto + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff.init: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #backoff.max: 10s + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close.on_state_change.inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.on_state_change.renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close.on_state_change.removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.eof: false + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.after_interval: 0 #----------------------------- Stdin input ------------------------------- # Configuration to use stdin input @@ -499,6 +901,10 @@ filebeat.inputs: # Maximum size of the message received over UDP #max_message_size: 10KiB + # Size of the UDP read buffer in bytes + #read_buffer: 0 + + #------------------------------ TCP input -------------------------------- # Experimental: Config options for the TCP input #- type: tcp @@ -513,6 +919,9 @@ filebeat.inputs: # Maximum size in bytes of the message received over TCP #max_message_size: 20MiB + # Max number of concurrent connections, or 0 for no limit. Default: 0 + #max_connections: 0 + # The number of seconds of inactivity before a remote connection is closed. #timeout: 300s @@ -607,21 +1016,22 @@ filebeat.inputs: # default to `required` otherwise it will be set to `none`. #ssl.client_authentication: "required" -#------------------------------ Docker input -------------------------------- -# Experimental: Docker input reads and parses `json-file` logs from Docker -#- type: docker +#------------------------------ Container input -------------------------------- +#- type: container #enabled: false - # Combine partial lines flagged by `json-file` format - #combine_partials: true + # Paths for container logs that should be crawled and fetched. + #paths: + # -/var/lib/docker/containers/*/*.log - # Use this to read from all containers, replace * with a container id to read from one: - #containers: - # stream: all # can be all, stdout or stderr - # ids: - # - '*' + # Configure stream to filter to a specific stream: stdout, stderr or all (default) + #stream: all -#========================== Filebeat autodiscover ============================== + +#------------------------------ NetFlow input -------------------------------- +#---------------------------- Google Cloud Pub/Sub Input ----------------------- +#------------------------------ S3 input -------------------------------- +# =========================== Filebeat autodiscover ============================ # Autodiscover allows you to detect changes in the system and spawn new modules # or inputs as they happen. @@ -634,11 +1044,11 @@ filebeat.inputs: # - condition: # equals.docker.container.image: busybox # config: -# - type: log +# - type: container # paths: # - /var/lib/docker/containers/${data.docker.container.id}/*.log -#========================= Filebeat global options ============================ +# ========================== Filebeat global options =========================== # Registry data path. If a relative path is used, it is considered relative to the # data path. @@ -685,7 +1095,7 @@ filebeat.inputs: #reload.enabled: true #reload.period: 10s -#================================ General ====================================== +# ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. @@ -695,7 +1105,7 @@ filebeat.inputs: # The tags of the shipper are included in their own field with each # transaction published. Tags make it easy to group servers by different # logical properties. -#tags: ["filebeat",] +#tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the # output. Fields can be scalar values, arrays, dictionaries, or any nested @@ -726,9 +1136,44 @@ filebeat.inputs: #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < min_flush_events. + # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s + # The disk queue stores incoming events on disk until the output is + # ready for them. This allows a higher event limit than the memory-only + # queue and lets pending events persist through a restart. + #disk: + # The directory path to store the queue's data. + #path: "${path.data}/diskqueue" + + # The maximum space the queue should occupy on disk. Depending on + # input settings, events that exceed this limit are delayed or discarded. + #max_size: 10GB + + # The maximum size of a single queue data file. Data in the queue is + # stored in smaller segments that are deleted after all their events + # have been processed. + #segment_size: 1GB + + # The number of events to read from disk to memory while waiting for + # the output to request them. + #read_ahead: 512 + + # The number of events to accept from inputs while waiting for them + # to be written to disk. If event data arrives faster than it + # can be written to disk, this setting prevents it from overflowing + # main memory. + #write_ahead: 2048 + + # The duration to wait before retrying when the queue encounters a disk + # write error. + #retry_interval: 1s + + # The maximum length of time to wait before retrying on a disk write + # error. If the queue encounters repeated errors, it will double the + # length of its retry interval each time, up to this maximum. + #max_retry_interval: 30s + # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # @@ -793,7 +1238,7 @@ filebeat.inputs: # default is the number of logical CPUs available in the system. #max_procs: -#================================ Processors =================================== +# ================================= Processors ================================= {{ elk_macros.beat_processors(processors) }} # Processors are used to reduce the number of fields in the exported event or to @@ -811,103 +1256,155 @@ filebeat.inputs: # values: # #processors: -#- include_fields: -# fields: ["cpu"] -#- drop_fields: -# fields: ["cpu.user", "cpu.system"] +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: -#- drop_event: -# when: -# equals: -# http.code: 200 +# - drop_event: +# when: +# equals: +# http.code: 200 # # The following example renames the field a to b: # #processors: -#- rename: -# fields: -# - from: "a" -# to: "b" +# - rename: +# fields: +# - from: "a" +# to: "b" # # The following example tokenizes the string into fields: # #processors: -#- dissect: -# tokenizer: "%{key1} - %{key2}" -# field: "message" -# target_prefix: "dissect" +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: -#- add_cloud_metadata: ~ +# - add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: -#- add_locale: -# format: offset +# - add_locale: +# format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: -#- add_docker_metadata: -# host: "unix:///var/run/docker.sock" -# match_fields: ["system.process.cgroup.id"] -# match_pids: ["process.pid", "process.ppid"] -# match_source: true -# match_source_index: 4 -# match_short_id: false -# cleanup_timeout: 60 -# labels.dedot: false -# # To connect to Docker over TLS you must specify a client and CA certificate. -# #ssl: -# # certificate_authority: "/etc/pki/root/ca.pem" -# # certificate: "/etc/pki/client/cert.pem" -# # key: "/etc/pki/client/cert.key" +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: -#- add_docker_metadata: ~ +# - add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: -#- add_host_metadata: ~ -# netinfo.enabled: false +# - add_host_metadata: ~ # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: -#- add_process_metadata: -# match_pids: ["system.process.ppid"] -# target: system.process.parent +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: -#- decode_json_fields: -# fields: ["field1", "field2", ...] -# process_array: false -# max_depth: 1 -# target: "" -# overwrite_keys: false +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true -#============================= Elastic Cloud ================================== +# =============================== Elastic Cloud ================================ -# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). +# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. @@ -918,11 +1415,11 @@ filebeat.inputs: # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: -#================================ Outputs ====================================== +# ================================== Outputs =================================== # Configure what output to use when sending the data collected by the beat. -#-------------------------- Elasticsearch output ------------------------------- +# ---------------------------- Elasticsearch Output ---------------------------- #output.elasticsearch: # Boolean flag to enable or disable the output module. #enabled: true @@ -939,8 +1436,11 @@ filebeat.inputs: # Configure escaping HTML symbols in strings. #escape_html: false - # Optional protocol and basic auth credentials. + # Protocol - either `http` (default) or `https`. #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" #username: "elastic" #password: "changeme" @@ -970,6 +1470,11 @@ filebeat.inputs: # Proxy server URL #proxy_url: http://proxy:3128 + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. @@ -996,15 +1501,23 @@ filebeat.inputs: # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL-based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -1028,10 +1541,37 @@ filebeat.inputs: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" -#----------------------------- Logstash output --------------------------------- + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} -#------------------------------- Kafka output ---------------------------------- + +# -------------------------------- Kafka Output -------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true @@ -1066,7 +1606,11 @@ filebeat.inputs: #username: '' #password: '' - # Kafka version filebeat is assumed to run against. Defaults to the "1.0.0". + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' + + # Kafka version Filebeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding @@ -1090,8 +1634,8 @@ filebeat.inputs: # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m - # Strategy for fetching the topics metadata from the broker. Default is true. - #full: true + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false # The number of concurrent load-balanced Kafka output workers. #worker: 1 @@ -1103,10 +1647,25 @@ filebeat.inputs: # until all events are published. The default is 3. #max_retries: 3 + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s @@ -1145,30 +1704,37 @@ filebeat.inputs: # purposes. The default is "beats". #client_id: beats - # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections @@ -1181,7 +1747,38 @@ filebeat.inputs: # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- Redis output ---------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true @@ -1197,6 +1794,8 @@ filebeat.inputs: # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The @@ -1261,43 +1860,57 @@ filebeat.inputs: # occurs on the proxy server. #proxy_use_local_resolver: false - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- File output ----------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true @@ -1319,7 +1932,7 @@ filebeat.inputs: #filename: filebeat # Maximum size in kilobytes of each file. When this size is reached, and on - # every filebeat restart, the files are rotated. The default value is 10240 + # every Filebeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 @@ -1331,8 +1944,7 @@ filebeat.inputs: # Permissions to use for file creation. The default is 0600. #permissions: 0600 - -#----------------------------- Console output --------------------------------- +# ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true @@ -1345,76 +1957,99 @@ filebeat.inputs: # Configure escaping HTML symbols in strings. #escape_html: false -#================================= Paths ====================================== +# =================================== Paths ==================================== -# The home path for the filebeat installation. This is the default base path +# The home path for the Filebeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: -# The configuration path for the filebeat installation. This is the default +# The configuration path for the Filebeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} -# The data path for the filebeat installation. This is the default base path -# for all the files in which filebeat needs to store its data. If not set by a +# The data path for the Filebeat installation. This is the default base path +# for all the files in which Filebeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data -# The logs path for a filebeat installation. This is the default location for +# The logs path for a Filebeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs -#================================ Keystore ========================================== +# ================================== Keystore ================================== + # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" -#============================== Dashboards ===================================== +# ================================= Dashboards ================================= + {{ elk_macros.setup_dashboards('filebeat') }} -#============================== Template ====================================== +# ================================== Template ================================== + {{ elk_macros.setup_template('filebeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} -#============================== Setup ILM ===================================== +# ====================== Index Lifecycle Management (ILM) ====================== -# Configure Index Lifecycle Management Index Lifecycle Management creates a -# write alias and adds additional settings to the template. -# The elasticsearch.output.index setting will be replaced with the write alias -# if ILM is enabled. +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. -# Enabled ILM support. Valid values are true, false, and auto. The beat will -# detect availabilty of Index Lifecycle Management in Elasticsearch and enable -# or disable ILM support. +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto -# Configure the ILM write alias name. -#setup.ilm.rollover_alias: "filebeat" +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'filebeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'filebeat' -# Configure rollover index pattern. +# Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" + {% if ilm_policy_name is defined %} +# Set the lifecycle policy name. The default policy name is +# 'beatname'. setup.ilm.policy_name: "{{ ilm_policy_name }}" + {% endif %} {% if ilm_policy_file_location is defined %} +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}" -{% endif %} -#============================== Kibana ====================================== +{% endif %} +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + {% if (groups['kibana'] | length) > 0 %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} -#================================ Logging ====================================== -{{ elk_macros.beat_logging('filebeat', filebeat_log_level) }} -#============================== Xpack Monitoring ===================================== -{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} -#================================ HTTP Endpoint ====================================== +# ================================== Logging =================================== + +{{ elk_macros.beat_logging('filebeat', filebeat_log_level) }} + +# ============================= X-Pack Monitoring ============================== +{{ elk_macros.xpack_monitoring_elasticsearch('filebeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +# =============================== HTTP Endpoint ================================ + # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output @@ -1423,18 +2058,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }} # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 -#============================= Process Security ================================ +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true -#================================= Migration ================================== +# ============================== Instrumentation =============================== + +# Instrumentation support for the filebeat. +#instrumentation: + # Set to true to enable instrumentation of filebeat. + #enabled: false + + # Environment in which filebeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false diff --git a/elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 b/elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 index f483018d..cfa7f968 100644 --- a/elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 +++ b/elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 @@ -32,9 +32,15 @@ heartbeat.monitors: - type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping # configured hosts - # Monitor name used for job name and document type. + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + #id: my-monitor + + # Human readable display name for this service in Uptime UI and elsewhere name: icmp + # Name of corresponding APM service, if Elastic APM is in use for the monitored service. + # service.name: my-apm-service-name + # Enable/Disable monitor enabled: true @@ -43,6 +49,7 @@ heartbeat.monitors: # List of hosts to ping hosts: {{ (icmp_hosts | default([])) | to_json }} + # Configure IP protocol types to ping on if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true @@ -80,6 +87,17 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # The index name associated with this input. If this is set, it + # overwrites the index option from the Elasticsearch output. + #index: + + # Set to true to publish fields with null values in events. + #keep_null: false + # Define a directory to load monitor definitions from. Definitions take the form # of individual yaml files. # heartbeat.config.monitors: @@ -102,29 +120,17 @@ heartbeat.monitors: {% if hosts | length > 0 %} - type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint # by sending/receiving a custom payload + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + #id: my-monitor - # Monitor name used for job name and document type + # Human readable display name for this service in Uptime UI and elsewhere name: {{ item.name }} # Enable/Disable monitor enabled: true # Configure task schedule - schedule: '@every 30s' # every 30 seconds from start of beat - - # configure hosts to ping. - # Entries can be: - # - plain host name or IP like `localhost`: - # Requires ports configs to be checked. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection - # will be established - name: "{{ item.name }}" - - # Enable/Disable monitor - enabled: true - - # Configure task schedule - schedule: '@every 45s' # every 5 seconds from start of beat + schedule: '@every 45s' # configure hosts to ping. # Entries can be: @@ -178,6 +184,27 @@ heartbeat.monitors: # Required TLS protocols #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] + + # NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # The index name associated with this input. If this is set, it + # overwrites the index option from the Elasticsearch output. + #index: + + # Set to true to publish fields with null values in events. + #keep_null: false + {% endif %} {% elif item.type == 'http' %} {% set hosts = [] %} @@ -188,25 +215,18 @@ heartbeat.monitors: {% endfor %} {% endfor %} {% if hosts | length > 0 %} - # NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE - # Configure file json file to be watched for changes to the monitor: - #watch.poll_file: - # Path to check for updates. - #path: - - # Interval between file file changed checks. - #interval: 5s - - type: http # monitor type `http`. Connect via HTTP an optionally verify response + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + #id: my-http-monitor - # Monitor name used for job name and document type + # Human readable display name for this service in Uptime UI and elsewhere name: "{{ item.name }}" # Enable/Disable monitor enabled: true # Configure task schedule - schedule: '@every 60s' # every 5 seconds from start of beat + schedule: '@every 60s' # Configure URLs to ping urls: {{ (hosts | default([])) | to_json }} @@ -252,13 +272,13 @@ heartbeat.monitors: # Dictionary of additional HTTP headers to send: headers: User-agent: osa-heartbeat-healthcheck + # Optional request body content #body: - # Expected response settings {% if item.check_response is defined %} + # Expected response settings check.response: {{ item.check_response }} - #check.response: # Expected status code. If not configured or set to 0 any status code not # being 404 is accepted. #status: 0 @@ -268,10 +288,6 @@ heartbeat.monitors: # Required response contents. #body: -{% endif %} -{% endif %} -{% endif %} -{% endfor %} # Parses the body as JSON, then checks against the given condition expression #json: @@ -280,7 +296,7 @@ heartbeat.monitors: # equals: # myField: expectedValue - +{% endif %} # NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE # Configure file json file to be watched for changes to the monitor: #watch.poll_file: @@ -290,16 +306,30 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # The index name associated with this input. If this is set, it + # overwrites the index option from the Elasticsearch output. + #index: + + # Set to true to publish fields with null values in events. + #keep_null: false + +{% endif %} +{% endif %} +{% endfor %} heartbeat.scheduler: # Limit number of concurrent tasks executed by heartbeat. The task limit if # disabled if set to 0. The default is 0. limit: {{ icmp_hosts | length // 4 }} - # Set the scheduler it's timezone + # Set the scheduler it's time zone #location: '' -#================================ General ====================================== +# ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. @@ -340,9 +370,44 @@ heartbeat.scheduler: #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < min_flush_events. + # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s + # The disk queue stores incoming events on disk until the output is + # ready for them. This allows a higher event limit than the memory-only + # queue and lets pending events persist through a restart. + #disk: + # The directory path to store the queue's data. + #path: "${path.data}/diskqueue" + + # The maximum space the queue should occupy on disk. Depending on + # input settings, events that exceed this limit are delayed or discarded. + #max_size: 10GB + + # The maximum size of a single queue data file. Data in the queue is + # stored in smaller segments that are deleted after all their events + # have been processed. + #segment_size: 1GB + + # The number of events to read from disk to memory while waiting for + # the output to request them. + #read_ahead: 512 + + # The number of events to accept from inputs while waiting for them + # to be written to disk. If event data arrives faster than it + # can be written to disk, this setting prevents it from overflowing + # main memory. + #write_ahead: 2048 + + # The duration to wait before retrying when the queue encounters a disk + # write error. + #retry_interval: 1s + + # The maximum length of time to wait before retrying on a disk write + # error. If the queue encounters repeated errors, it will double the + # length of its retry interval each time, up to this maximum. + #max_retry_interval: 30s + # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # @@ -407,7 +472,7 @@ heartbeat.scheduler: # default is the number of logical CPUs available in the system. #max_procs: -#================================ Processors =================================== +# ================================= Processors ================================= {{ elk_macros.beat_processors(processors) }} # Processors are used to reduce the number of fields in the exported event or to @@ -425,103 +490,155 @@ heartbeat.scheduler: # values: # #processors: -#- include_fields: -# fields: ["cpu"] -#- drop_fields: -# fields: ["cpu.user", "cpu.system"] +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: -#- drop_event: -# when: -# equals: -# http.code: 200 +# - drop_event: +# when: +# equals: +# http.code: 200 # # The following example renames the field a to b: # #processors: -#- rename: -# fields: -# - from: "a" -# to: "b" +# - rename: +# fields: +# - from: "a" +# to: "b" # # The following example tokenizes the string into fields: # #processors: -#- dissect: -# tokenizer: "%{key1} - %{key2}" -# field: "message" -# target_prefix: "dissect" +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: -#- add_cloud_metadata: ~ +# - add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: -#- add_locale: -# format: offset +# - add_locale: +# format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: -#- add_docker_metadata: -# host: "unix:///var/run/docker.sock" -# match_fields: ["system.process.cgroup.id"] -# match_pids: ["process.pid", "process.ppid"] -# match_source: true -# match_source_index: 4 -# match_short_id: false -# cleanup_timeout: 60 -# labels.dedot: false -# # To connect to Docker over TLS you must specify a client and CA certificate. -# #ssl: -# # certificate_authority: "/etc/pki/root/ca.pem" -# # certificate: "/etc/pki/client/cert.pem" -# # key: "/etc/pki/client/cert.key" +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: -#- add_docker_metadata: ~ +# - add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: -#- add_host_metadata: -# netinfo.enabled: false +# - add_host_metadata: ~ # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: -#- add_process_metadata: -# match_pids: ["system.process.ppid"] -# target: system.process.parent +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: -#- decode_json_fields: -# fields: ["field1", "field2", ...] -# process_array: false -# max_depth: 1 -# target: "" -# overwrite_keys: false +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true -#============================= Elastic Cloud ================================== +# =============================== Elastic Cloud ================================ -# These settings simplify using heartbeat with the Elastic Cloud (https://cloud.elastic.co/). +# These settings simplify using Heartbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. @@ -532,11 +649,11 @@ heartbeat.scheduler: # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: -#================================ Outputs ====================================== +# ================================== Outputs =================================== # Configure what output to use when sending the data collected by the beat. -#-------------------------- Elasticsearch output ------------------------------- +# ---------------------------- Elasticsearch Output ---------------------------- #output.elasticsearch: # Boolean flag to enable or disable the output module. #enabled: true @@ -553,8 +670,11 @@ heartbeat.scheduler: # Configure escaping HTML symbols in strings. #escape_html: false - # Optional protocol and basic auth credentials. + # Protocol - either `http` (default) or `https`. #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" #username: "elastic" #password: "changeme" @@ -584,6 +704,11 @@ heartbeat.scheduler: # Proxy server URL #proxy_url: http://proxy:3128 + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. @@ -610,15 +735,23 @@ heartbeat.scheduler: # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL-based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -636,17 +769,43 @@ heartbeat.scheduler: #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites - # #ssl.curve_types: [] + #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" -#----------------------------- Logstash output --------------------------------- + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} -#------------------------------- Kafka output ---------------------------------- +# -------------------------------- Kafka Output -------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true @@ -681,7 +840,11 @@ heartbeat.scheduler: #username: '' #password: '' - # Kafka version heartbeat is assumed to run against. Defaults to the "1.0.0". + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' + + # Kafka version Heartbeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding @@ -705,8 +868,8 @@ heartbeat.scheduler: # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m - # Strategy for fetching the topics metadata from the broker. Default is true. - #full: true + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false # The number of concurrent load-balanced Kafka output workers. #worker: 1 @@ -718,10 +881,25 @@ heartbeat.scheduler: # until all events are published. The default is 3. #max_retries: 3 + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s @@ -760,30 +938,37 @@ heartbeat.scheduler: # purposes. The default is "beats". #client_id: beats - # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections @@ -796,7 +981,38 @@ heartbeat.scheduler: # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- Redis output ---------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true @@ -812,6 +1028,8 @@ heartbeat.scheduler: # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The @@ -876,43 +1094,57 @@ heartbeat.scheduler: # occurs on the proxy server. #proxy_use_local_resolver: false - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- File output ----------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true @@ -934,7 +1166,7 @@ heartbeat.scheduler: #filename: heartbeat # Maximum size in kilobytes of each file. When this size is reached, and on - # every heartbeat restart, the files are rotated. The default value is 10240 + # every Heartbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 @@ -946,8 +1178,7 @@ heartbeat.scheduler: # Permissions to use for file creation. The default is 0600. #permissions: 0600 - -#----------------------------- Console output --------------------------------- +# ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true @@ -960,77 +1191,99 @@ heartbeat.scheduler: # Configure escaping HTML symbols in strings. #escape_html: false -#================================= Paths ====================================== +# =================================== Paths ==================================== -# The home path for the heartbeat installation. This is the default base path +# The home path for the Heartbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: -# The configuration path for the heartbeat installation. This is the default +# The configuration path for the Heartbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} -# The data path for the heartbeat installation. This is the default base path -# for all the files in which heartbeat needs to store its data. If not set by a +# The data path for the Heartbeat installation. This is the default base path +# for all the files in which Heartbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data -# The logs path for a heartbeat installation. This is the default location for +# The logs path for a Heartbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs -#================================ Keystore ========================================== +# ================================== Keystore ================================== + # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" -#============================== Dashboards ===================================== +# ================================= Dashboards ================================= + {{ elk_macros.setup_dashboards('heartbeat') }} -#============================== Template ===================================== +# ================================== Template ================================== + {{ elk_macros.setup_template('heartbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} -#============================== Setup ILM ===================================== +# ====================== Index Lifecycle Management (ILM) ====================== -# Configure Index Lifecycle Management Index Lifecycle Management creates a -# write alias and adds additional settings to the template. -# The elasticsearch.output.index setting will be replaced with the write alias -# if ILM is enabled. +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. -# Enabled ILM support. Valid values are true, false, and auto. The beat will -# detect availabilty of Index Lifecycle Management in Elasticsearch and enable -# or disable ILM support. +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto -# Configure the ILM write alias name. -#setup.ilm.rollover_alias: "heartbeat" +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'heartbeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'heartbeat' -# Configure rollover index pattern. +# Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" + {% if ilm_policy_name is defined %} +# Set the lifecycle policy name. The default policy name is +# 'beatname'. setup.ilm.policy_name: "{{ ilm_policy_name }}" + {% endif %} {% if ilm_policy_file_location is defined %} +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}" + {% endif %} -#============================== Kibana ===================================== +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + {% if (groups['kibana'] | length) > 0 %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} -#================================ Logging ====================================== +# ================================== Logging =================================== + {{ elk_macros.beat_logging('heartbeat', heartbeat_log_level) }} -#============================== Xpack Monitoring ===================================== -{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} +# ============================= X-Pack Monitoring ============================== +{{ elk_macros.xpack_monitoring_elasticsearch('heartbeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +# =============================== HTTP Endpoint ================================ -#================================ HTTP Endpoint ====================================== # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output @@ -1039,18 +1292,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }} # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 -#============================= Process Security ================================ +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true -#================================= Migration ================================== +# ============================== Instrumentation =============================== + +# Instrumentation support for the heartbeat. +#instrumentation: + # Set to true to enable instrumentation of heartbeat. + #enabled: false + + # Environment in which heartbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false diff --git a/elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 b/elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 index 17b6c73c..f54a15de 100644 --- a/elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 +++ b/elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 @@ -11,13 +11,21 @@ # For more available modules and options, please see the journalbeat.reference.yml sample # configuration file. -#=========================== Journalbeat inputs ============================= +# ============================= Journalbeat inputs ============================= journalbeat.inputs: # Paths that should be crawled and fetched. Possible values files and directories. # When setting a directory, all journals under it are merged. # When empty starts to read from local journal. -- paths: {{ journal_paths | to_json }} +- paths: +{% for jp in journal_paths %} + - {{ jp }} +{% endfor %} + + # An optional unique identifier for the input. By providing a unique `id` you + # can operate multiple inputs on the same journal. This allows each input's + # cursor to be persisted independently in the registry file. + #id: "" # The number of seconds to wait before trying to read again from journals. backoff: 10s @@ -33,6 +41,11 @@ journalbeat.inputs: # Matching for nginx entries: "systemd.unit=nginx" #include_matches: [] + # Set the option to preserve the remote hostname in entries from a remote journal. + # It is only needed when used with add_host_metadata, so the original host name + # does not get overwritten by the processor. + #save_remote_hostname: false + # Optional fields that you can specify to add additional information to the # output. Fields can be scalar values, arrays, dictionaries, or any nested # combination of these. @@ -40,19 +53,19 @@ journalbeat.inputs: # env: staging -#========================= Journalbeat global options ============================ +# ========================= Journalbeat global options ========================= journalbeat: # Name of the registry file. If a relative path is used, it is considered relative to the # data path. registry_file: registry -#==================== Elasticsearch template setting ========================== +# ======================= Elasticsearch template setting ======================= setup.template.settings: index.number_of_shards: 1 #index.codec: best_compression #_source.enabled: false -#================================ General ====================================== +# ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. @@ -94,9 +107,44 @@ queue: flush.min_events: {{ journalbeat_queue_flush_min_events }} # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < min_flush_events. + # if the number of events stored in the queue is < `flush.min_events`. flush.timeout: {{ journalbeat_queue_flush_timeout }} + # The disk queue stores incoming events on disk until the output is + # ready for them. This allows a higher event limit than the memory-only + # queue and lets pending events persist through a restart. + #disk: + # The directory path to store the queue's data. + #path: "${path.data}/diskqueue" + + # The maximum space the queue should occupy on disk. Depending on + # input settings, events that exceed this limit are delayed or discarded. + #max_size: 10GB + + # The maximum size of a single queue data file. Data in the queue is + # stored in smaller segments that are deleted after all their events + # have been processed. + #segment_size: 1GB + + # The number of events to read from disk to memory while waiting for + # the output to request them. + #read_ahead: 512 + + # The number of events to accept from inputs while waiting for them + # to be written to disk. If event data arrives faster than it + # can be written to disk, this setting prevents it from overflowing + # main memory. + #write_ahead: 2048 + + # The duration to wait before retrying when the queue encounters a disk + # write error. + #retry_interval: 1s + + # The maximum length of time to wait before retrying on a disk write + # error. If the queue encounters repeated errors, it will double the + # length of its retry interval each time, up to this maximum. + #max_retry_interval: 30s + # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # @@ -161,7 +209,7 @@ queue: # default is the number of logical CPUs available in the system. #max_procs: -#================================ Processors =================================== +# ================================= Processors ================================= {{ elk_macros.beat_processors(processors) }} # Processors are used to reduce the number of fields in the exported event or to @@ -179,103 +227,155 @@ queue: # values: # #processors: -#- include_fields: -# fields: ["cpu"] -#- drop_fields: -# fields: ["cpu.user", "cpu.system"] +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: -#- drop_event: -# when: -# equals: -# http.code: 200 +# - drop_event: +# when: +# equals: +# http.code: 200 # # The following example renames the field a to b: # #processors: -#- rename: -# fields: -# - from: "a" -# to: "b" +# - rename: +# fields: +# - from: "a" +# to: "b" # # The following example tokenizes the string into fields: # #processors: -#- dissect: -# tokenizer: "%{key1} - %{key2}" -# field: "message" -# target_prefix: "dissect" +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: -#- add_cloud_metadata: ~ +# - add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: -#- add_locale: -# format: offset +# - add_locale: +# format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: -#- add_docker_metadata: -# host: "unix:///var/run/docker.sock" -# match_fields: ["system.process.cgroup.id"] -# match_pids: ["process.pid", "process.ppid"] -# match_source: true -# match_source_index: 4 -# match_short_id: false -# cleanup_timeout: 60 -# labels.dedot: false -# # To connect to Docker over TLS you must specify a client and CA certificate. -# #ssl: -# # certificate_authority: "/etc/pki/root/ca.pem" -# # certificate: "/etc/pki/client/cert.pem" -# # key: "/etc/pki/client/cert.key" +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: -#- add_docker_metadata: ~ +# - add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: -#- add_host_metadata: -# netinfo.enabled: false +# - add_host_metadata: ~ # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: -#- add_process_metadata: -# match_pids: ["system.process.ppid"] -# target: system.process.parent +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: -#- decode_json_fields: -# fields: ["field1", "field2", ...] -# process_array: false -# max_depth: 1 -# target: "" -# overwrite_keys: false +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true -#============================= Elastic Cloud ================================== +# =============================== Elastic Cloud ================================ -# These settings simplify using journalbeat with the Elastic Cloud (https://cloud.elastic.co/). +# These settings simplify using Journalbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. @@ -286,11 +386,11 @@ queue: # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: -#================================ Outputs ====================================== +# ================================== Outputs =================================== # Configure what output to use when sending the data collected by the beat. -#-------------------------- Elasticsearch output ------------------------------- +# ---------------------------- Elasticsearch Output ---------------------------- #output.elasticsearch: # Boolean flag to enable or disable the output module. #enabled: true @@ -307,8 +407,11 @@ queue: # Configure escaping HTML symbols in strings. #escape_html: false - # Optional protocol and basic auth credentials. + # Protocol - either `http` (default) or `https`. #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" #username: "elastic" #password: "changeme" @@ -338,6 +441,11 @@ queue: # Proxy server URL #proxy_url: http://proxy:3128 + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. @@ -364,15 +472,23 @@ queue: # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL-based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -396,11 +512,37 @@ queue: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" -#----------------------------- Logstash output --------------------------------- + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count, 'journalbeat') }} -#------------------------------- Kafka output ---------------------------------- +# -------------------------------- Kafka Output -------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true @@ -435,7 +577,11 @@ queue: #username: '' #password: '' - # Kafka version journalbeat is assumed to run against. Defaults to the "1.0.0". + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' + + # Kafka version Journalbeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding @@ -459,8 +605,8 @@ queue: # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m - # Strategy for fetching the topics metadata from the broker. Default is true. - #full: true + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false # The number of concurrent load-balanced Kafka output workers. #worker: 1 @@ -472,10 +618,25 @@ queue: # until all events are published. The default is 3. #max_retries: 3 + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s @@ -514,30 +675,37 @@ queue: # purposes. The default is "beats". #client_id: beats - # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections @@ -550,7 +718,38 @@ queue: # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- Redis output ---------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true @@ -566,6 +765,8 @@ queue: # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The @@ -630,43 +831,57 @@ queue: # occurs on the proxy server. #proxy_use_local_resolver: false - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- File output ----------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true @@ -688,7 +903,7 @@ queue: #filename: journalbeat # Maximum size in kilobytes of each file. When this size is reached, and on - # every journalbeat restart, the files are rotated. The default value is 10240 + # every Journalbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 @@ -700,8 +915,7 @@ queue: # Permissions to use for file creation. The default is 0600. #permissions: 0600 - -#----------------------------- Console output --------------------------------- +# ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true @@ -714,78 +928,99 @@ queue: # Configure escaping HTML symbols in strings. #escape_html: false -#================================= Paths ====================================== +# =================================== Paths ==================================== -# The home path for the journalbeat installation. This is the default base path +# The home path for the Journalbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: -# The configuration path for the journalbeat installation. This is the default +# The configuration path for the Journalbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} -# The data path for the journalbeat installation. This is the default base path -# for all the files in which journalbeat needs to store its data. If not set by a +# The data path for the Journalbeat installation. This is the default base path +# for all the files in which Journalbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data -# The logs path for a journalbeat installation. This is the default location for +# The logs path for a Journalbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs -#================================ Keystore ========================================== +# ================================== Keystore ================================== + # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" -#============================== Dashboards ===================================== +# ================================= Dashboards ================================= + {{ elk_macros.setup_dashboards('journalbeat') }} -#============================== Template ===================================== +# ================================== Template ================================== + {{ elk_macros.setup_template('journalbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} -#============================== Setup ILM ===================================== +# ====================== Index Lifecycle Management (ILM) ====================== -# Configure Index Lifecycle Management Index Lifecycle Management creates a -# write alias and adds additional settings to the template. -# The elasticsearch.output.index setting will be replaced with the write alias -# if ILM is enabled. +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. -# Enabled ILM support. Valid values are true, false, and auto. The beat will -# detect availabilty of Index Lifecycle Management in Elasticsearch and enable -# or disable ILM support. +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto -# Configure the ILM write alias name. -#setup.ilm.rollover_alias: "journalbeat" +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'journalbeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'journalbeat' -# Configure rollover index pattern. +# Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" + {% if ilm_policy_name is defined %} +# Set the lifecycle policy name. The default policy name is +# 'beatname'. setup.ilm.policy_name: "{{ ilm_policy_name }}" + {% endif %} {% if ilm_policy_file_location is defined %} +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}" -{% endif %} -#============================== Kibana ===================================== +{% endif %} +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + {% if (groups['kibana'] | length) > 0 %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} -#================================ Logging ====================================== +# ================================== Logging =================================== + {{ elk_macros.beat_logging('journalbeat', journalbeat_log_level) }} -#============================== Xpack Monitoring ===================================== -{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} +# ============================= X-Pack Monitoring ============================== +{{ elk_macros.xpack_monitoring_elasticsearch('journalbeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +# =============================== HTTP Endpoint ================================ -#================================ HTTP Endpoint ====================================== # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output @@ -794,18 +1029,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }} # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 -#============================= Process Security ================================ +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true -#================================= Migration ================================== +# ============================== Instrumentation =============================== + +# Instrumentation support for the journalbeat. +#instrumentation: + # Set to true to enable instrumentation of journalbeat. + #enabled: false + + # Environment in which journalbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false diff --git a/elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 b/elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 index 437c94f9..1a299dd1 100644 --- a/elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 +++ b/elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 @@ -15,7 +15,7 @@ metricbeat.config.modules: # Glob pattern for configuration reloading - path: ${path.config}/conf.d/*.yml + path: ${path.config}/modules.d/*.yml # Period on which files under path should be checked for changes reload.period: 30s @@ -44,7 +44,18 @@ metricbeat.max_start_delay: 10s # metricsets: ["leader", "self", "store"] # period: 30s # hosts: ["${host}:2379"] -#========================== Modules configuration ============================ + +#=========================== Timeseries instance =============================== + +# Enabling this will add a `timeseries.instance` keyword field to all metric +# events. For a given metricset, this field will be unique for every single item +# being monitored. +# This setting is experimental. + +#timeseries.enabled: false + + +#========================== Modules configuration ============================= {% set metric_sets = ['network', 'process', 'process_summary', 'uptime', 'service'] %} {% if physical_host is defined and physical_host != inventory_hostname %} {% set host_mount_devices = (hostvars[physical_host]['ansible_mounts'] | map(attribute='device') | list) %} @@ -60,7 +71,7 @@ metricbeat.max_start_delay: 10s {% endif %} metricbeat.modules: -#------------------------------- System Module ------------------------------- +#-------------------------------- System Module -------------------------------- - module: system metricsets: {{ metric_sets }} #- cpu # CPU usage @@ -77,6 +88,7 @@ metricbeat.modules: #- fsstat # File system summary metrics #- raid # Raid #- socket # Sockets and connection info (linux only) + #- service # systemd service information enabled: true period: 60s processes: ['.*'] @@ -135,16 +147,22 @@ metricbeat.modules: # Diskio configurations #diskio.include_devices: [] -#------------------------------ Aerospike Module ----------------------------- + # Filter systemd services by status or sub-status + #service.state_filter: ["active"] + + # Filter systemd services based on a name pattern + #service.pattern_filter: ["ssh*", "nfs*"] + +#------------------------------- ActiveMQ Module ------------------------------- +#------------------------------ Aerospike Module ------------------------------ #- module: aerospike # metricsets: ["namespace"] # enabled: true # period: 10s # hosts: ["localhost:3000"] -# -#------------------------------- Apache Module ------------------------------- -{% if apache_enabled | default(false) | bool %} +#-------------------------------- Apache Module -------------------------------- +{% if apache_enabled | default(false) | bool %} - module: apache metricsets: ["status"] period: 30s @@ -161,31 +179,55 @@ metricbeat.modules: # Password of hosts. Empty by default #password: password + {% endif %} -#-------------------------------- Ceph Module -------------------------------- +#------------------------------ App Search Module ------------------------------ +#--------------------------------- AWS Module --------------------------------- +#----------------------------- AWS Fargate Module ----------------------------- +#-------------------------------- Azure Module -------------------------------- +#--------------------------------- Beat Module --------------------------------- +#--------------------------------- Ceph Module --------------------------------- {% if ceph_restapi_enabled | default(false) | bool %} +# Metricsets depending on the Ceph REST API (default port: 5000) - module: ceph metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] period: 30s hosts: {{ ceph_stats_hosts | to_json }} enabled: true +# Metricsets depending on the Ceph Manager Daemon (default port: 8003) +#- module: ceph + #metricsets: + #- mgr_cluster_disk + #- mgr_osd_perf + #- mgr_pool_disk + #- mgr_osd_pool_stats + #- mgr_osd_tree + #period: 1m + #hosts: [ "https://localhost:8003" ] + #username: "user" + #password: "secret" + {% endif %} -#------------------------------ Couchbase Module ----------------------------- +#----------------------------- Cloudfoundry Module ----------------------------- +#----------------------------- CockroachDB Module ----------------------------- +#-------------------------------- Consul Module -------------------------------- +#------------------------------- Coredns Module ------------------------------- +#------------------------------ Couchbase Module ------------------------------ #- module: couchbase # metricsets: ["bucket", "cluster", "node"] # period: 10s # hosts: ["localhost:8091"] # enabled: true # -#------------------------------- couchdb Module ------------------------------ +#------------------------------- CouchDB Module ------------------------------- {% if couchdb_enabled | bool %} - module: couchdb metricsets: ["server"] period: 10s hosts: ["localhost:5984"] {% endif %} -#------------------------------- Docker Module ------------------------------- +#-------------------------------- Docker Module -------------------------------- {% if docker_enabled | default(false) | bool %} - module: docker metricsets: @@ -213,8 +255,9 @@ metricbeat.modules: #certificate_authority: "/etc/pki/root/ca.pem" #certificate: "/etc/pki/client/cert.pem" #key: "/etc/pki/client/cert.key" + {% endif %} -#----------------------------- Dropwizard Module ----------------------------- +#------------------------------ Dropwizard Module ------------------------------ #- module: dropwizard # metricsets: ["collector"] # period: 10s @@ -222,8 +265,8 @@ metricbeat.modules: # metrics_path: /metrics/metrics # namespace: example # enabled: true -# -#---------------------------- Elasticsearch Module --------------------------- + +#---------------------------- Elasticsearch Module ---------------------------- {% if inventory_hostname in (groups['elastic'] | union(groups['kibana'])) %} - module: elasticsearch metricsets: @@ -234,38 +277,35 @@ metricbeat.modules: - index_summary - shard - ml_job - enabled: true period: 30s hosts: ["localhost:{{ elastic_port }}"] #username: "elastic" #password: "changeme" #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - # Set to false to fetch all entries #index_recovery.active_only: true - - # Set to true to send data collected by module to X-Pack - # Monitoring instead of metricbeat-* indices. #xpack.enabled: false + #scope: node + {% endif %} -#----------------------------- envoyproxy Module ----------------------------- +#------------------------------ Envoyproxy Module ------------------------------ {% if envoyproxy_enabled | bool %} - module: envoyproxy metricsets: ["server"] period: 10s hosts: ["localhost:9901"] -{% endif %} -#-------------------------------- Etcd Module -------------------------------- +{% endif %} +#--------------------------------- Etcd Module --------------------------------- {% if etcd_enabled | default(false) | bool %} - module: etcd metricsets: ["leader", "self", "store"] - enabled: true period: 30s hosts: ["localhost:2379"] -{% endif %} -#------------------------------- Golang Module ------------------------------- +{% endif %} +#------------------------ Google Cloud Platform Module ------------------------ +#-------------------------------- Golang Module -------------------------------- #- module: golang #metricsets: # - expvar @@ -276,8 +316,8 @@ metricbeat.modules: # expvar: # namespace: "example" # path: "/debug/vars" -# -#------------------------------ Graphite Module ------------------------------ + +#------------------------------- Graphite Module ------------------------------- #- module: graphite # metricsets: ["server"] # enabled: true @@ -301,16 +341,24 @@ metricbeat.modules: # delimiter: "_" -#------------------------------- HAProxy Module ------------------------------ +#------------------------------- HAProxy Module ------------------------------- {% if haproxy_enabled | default(false) | bool %} - module: haproxy metricsets: ["info", "stat"] period: 30s + # TCP socket, UNIX socket, or HTTP address where HAProxy stats are reported + # TCP socket + #hosts: ["tcp://127.0.0.1:14567"] + # UNIX socket hosts: [ {{ elastic_metricbeat_haproxy_monitoring_hosts }} ] + # Stats page + #hosts: ["http://127.0.0.1:14567"] + #username : "admin" + #password : "admin" enabled: true -{% endif %} -#-------------------------------- HTTP Module -------------------------------- +{% endif %} +#--------------------------------- HTTP Module --------------------------------- #- module: http #metricsets: # - json @@ -339,7 +387,10 @@ metricbeat.modules: # fields: # added to the the response in root. overwrites existing fields # key: "value" -#------------------------------- Jolokia Module ------------------------------ +#-------------------------------- IBM MQ Module -------------------------------- +#--------------------------------- IIS Module --------------------------------- +#-------------------------------- Istio Module -------------------------------- +#------------------------------- Jolokia Module ------------------------------- #- module: jolokia #metricsets: ["jmx"] #period: 10s @@ -370,12 +421,14 @@ metricbeat.modules: #jmx.application: #jmx.instance: -#-------------------------------- Kafka Module ------------------------------- +#-------------------------------- Kafka Module -------------------------------- +# Kafka metrics collected using the Kafka protocol #- module: kafka - #metricsets: ["consumergroup", "partition"] + #metricsets: + # - partition + # - consumergroup #period: 10s #hosts: ["localhost:9092"] - #enabled: true #client_id: metricbeat #retries: 3 @@ -394,11 +447,35 @@ metricbeat.modules: # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" + # Client Certificate Passphrase (in case your Client Certificate Key is encrypted) + #ssl.key_passphrase: "yourKeyPassphrase" + # SASL authentication #username: "" #password: "" -#------------------------------- Kibana Module ------------------------------- +# Metrics collected from a Kafka broker using Jolokia +#- module: kafka +# metricsets: +# - broker +# period: 10s +# hosts: ["localhost:8779"] + +# Metrics collected from a Java Kafka consumer using Jolokia +#- module: kafka +# metricsets: +# - consumer +# period: 10s +# hosts: ["localhost:8774"] + +# Metrics collected from a Java Kafka producer using Jolokia +#- module: kafka +# metricsets: +# - producer +# period: 10s +# hosts: ["localhost:8775"] + +#-------------------------------- Kibana Module -------------------------------- {% if inventory_hostname in groups['kibana'] | default([]) %} - module: kibana metricsets: ["status"] @@ -410,8 +487,9 @@ metricbeat.modules: # Set to true to send data collected by module to X-Pack # Monitoring instead of metricbeat-* indices. #xpack.enabled: false + {% endif %} -#----------------------------- Kubernetes Module ----------------------------- +#------------------------------ Kubernetes Module ------------------------------ # Node metrics, from kubelet: #- module: kubernetes # metricsets: @@ -421,9 +499,10 @@ metricbeat.modules: # - system # - volume # period: 10s -# hosts: ["localhost:10255"] # enabled: true +# hosts: ["https://${NODE_NAME}:10250"] # #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token +# ssl.verification_mode: "none" # #ssl.certificate_authorities: # # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt # #ssl.certificate: "/etc/pki/client/cert.pem" @@ -431,48 +510,84 @@ metricbeat.modules: # # # Enriching parameters: # add_metadata: true -# in_cluster: true # # When used outside the cluster: # #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster # #kube_config: ~/.kube/config -# -## State metrics from kube-state-metrics service: + +# State metrics from kube-state-metrics service: #- module: kubernetes # enabled: true # metricsets: # - state_node +# - state_daemonset # - state_deployment # - state_replicaset # - state_statefulset # - state_pod # - state_container -# period: 10s +# - state_cronjob +# - state_resourcequota +# - state_service +# - state_persistentvolume +# - state_persistentvolumeclaim +# - state_storageclass +# Uncomment this to get k8s events: +# - event period: 10s # hosts: ["kube-state-metrics:8080"] -# -# # Enriching parameters: + + # Enriching parameters: # add_metadata: true -# in_cluster: true -# # When used outside the cluster: + # When used outside the cluster: # #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster # #kube_config: ~/.kube/config -# -# Kubernetes events -#- module: kubernetes -# enabled: true -# metricsets: -# - event -# + # Kubernetes API server +# (when running metricbeat as a deployment) #- module: kubernetes # enabled: true # metricsets: # - apiserver # hosts: ["https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}"] +# bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token +# ssl.certificate_authorities: +# - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt +# period: 30s -#--------------------------------- kvm Module -------------------------------- +# Kubernetes proxy server +# (when running metricbeat locally at hosts or as a daemonset + host network) +#- module: kubernetes +# enabled: true +# metricsets: +# - proxy +# hosts: ["localhost:10249"] +# period: 10s + +# Kubernetes controller manager +# (URL and deployment method should be adapted to match the controller manager deployment / service / endpoint) +#- module: kubernetes +# enabled: true +# metricsets: +# - controllermanager +# hosts: ["http://localhost:10252"] +# period: 10s + +# Kubernetes scheduler +# (URL and deployment method should be adapted to match scheduler deployment / service / endpoint) +#- module: kubernetes +# enabled: true +# metricsets: +# - scheduler +# hosts: ["localhost:10251"] +# period: 10s + +#--------------------------------- KVM Module --------------------------------- {% if kvm_enabled | default(false) | bool %} - module: kvm - metricsets: ["dommemstat"] + metricsets: ["dommemstat", "status"] enabled: true period: 10s hosts: ["unix:///var/run/libvirt/libvirt-sock"] @@ -482,61 +597,63 @@ metricbeat.modules: # Timeout to connect to Libvirt server #timeout: 1s -{% endif %} -#------------------------------ Logstash Module ------------------------------ +{% endif %} +#-------------------------------- Linux Module -------------------------------- +#------------------------------- Logstash Module ------------------------------- {% if inventory_hostname in groups['logstash'] | default([]) %} - module: logstash metricsets: ["node", "node_stats"] enabled: true period: 10s hosts: ["localhost:9600"] -{% endif %} -#------------------------------ Memcached Module ----------------------------- +{% endif %} +#------------------------------ Memcached Module ------------------------------ {% if memcached_enabled | default(false) | bool %} - module: memcached metricsets: ["stats"] - enabled: true period: 30s hosts: ["{{ ansible_hostname }}:11211"] -{% endif %} + enabled: true -#------------------------------- MongoDB Module ------------------------------ +{% endif %} +#------------------------------- MongoDB Module ------------------------------- #- module: mongodb # metricsets: ["dbstats", "status", "collstats", "metrics", "replstatus"] # period: 10s # enabled: true -# # The hosts must be passed as MongoDB URLs in the format: -# # [mongodb://][user:pass@]host[:port]. -# # The username and password can also be set using the respective configuration -# # options. The credentials in the URL take precedence over the username and -# # password configuration options. + # The hosts must be passed as MongoDB URLs in the format: + # [mongodb://][user:pass@]host[:port]. + # The username and password can also be set using the respective configuration + # options. The credentials in the URL take precedence over the username and + # password configuration options. # hosts: ["localhost:27017"] -# -# # Optional SSL. By default is off. + + # Optional SSL. By default is off. # #ssl.enabled: true -# -# # Mode of verification of server certificate ('none' or 'full') + + # Mode of verification of server certificate ('none' or 'full') # #ssl.verification_mode: 'full' -# -# # List of root certificates for TLS server verifications + + # List of root certificates for TLS server verifications # #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] -# -# # Certificate for SSL client authentication + + # Certificate for SSL client authentication # #ssl.certificate: "/etc/pki/client/cert.pem" -# -# # Client Certificate Key + + # Client Certificate Key # #ssl.key: "/etc/pki/client/cert.key" -# -# # Username to use when connecting to MongoDB. Empty by default. + + # Username to use when connecting to MongoDB. Empty by default. # #username: user -# -# # Password to use when connecting to MongoDB. Empty by default. + + # Password to use when connecting to MongoDB. Empty by default. # #password: pass -# -#-------------------------------- Munin Module ------------------------------- + +#-------------------------------- MSSQL Module -------------------------------- +#-------------------------------- Munin Module -------------------------------- #- module: munin # metricsets: ["node"] # enabled: true @@ -552,16 +669,19 @@ metricbeat.modules: # are replaced by underscores). #munin.sanitize: false -#-------------------------------- MySQL Module ------------------------------- +#-------------------------------- MySQL Module -------------------------------- {% if (mysql_enabled | default(false) | bool) and galera_root_user is defined and galera_root_password is defined %} - module: mysql metricsets: - - "status" - # - "galera_status" - enabled: true + - status + # - galera_status + # - performance + # - query period: 30s # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/" + # or "unix(/var/lib/mysql/mysql.sock)/", + # or another DSN format supported by . # The username and password can either be set in the DSN or using the username # and password config options. Those specified in the DSN take precedence. hosts: ["{{ galera_root_user }}:{{ galera_root_password }}@tcp({{ ansible_hostname }}:3306)/"] @@ -574,19 +694,29 @@ metricbeat.modules: # By setting raw to true, all raw fields from the status metricset will be added to the event. #raw: false + {% endif %} -#-------------------------------- Nats Module -------------------------------- +#--------------------------------- NATS Module --------------------------------- {% if nats_enabled | bool %} - module: nats - metricsets: ["connections", "routes", "stats", "subscriptions"] + metricsets: + - "connections" + - "routes" + - "stats" + - "subscriptions" + #- "connection" + #- "route" period: 10s hosts: ["localhost:8222"] #stats.metrics_path: "/varz" #connections.metrics_path: "/connz" #routes.metrics_path: "/routez" #subscriptions.metrics_path: "/subsz" + #connection.metrics_path: "/connz" + #route.metrics_path: "/routez" + {% endif %} -#-------------------------------- Nginx Module ------------------------------- +#-------------------------------- Nginx Module -------------------------------- {% if nginx_enabled | default(false) | bool %} - module: nginx metricsets: ["stubstatus"] @@ -596,10 +726,13 @@ metricbeat.modules: # Nginx hosts hosts: ["http://127.0.1.1:18182"] - # Path to server status. Default server-status - server_status_path: "server-status" + # Path to server status. Default nginx_status + server_status_path: "nginx_status" + {% endif %} -#------------------------------- PHP_FPM Module ------------------------------ +#----------------------------- Openmetrics Module ----------------------------- +#-------------------------------- Oracle Module -------------------------------- +#------------------------------- PHP_FPM Module ------------------------------- #- module: php_fpm # metricsets: # - pool @@ -608,104 +741,145 @@ metricbeat.modules: # period: 10s # status_path: "/status" # hosts: ["localhost:8080"] - -#----------------------------- PostgreSQL Module ----------------------------- +#------------------------------ PostgreSQL Module ------------------------------ #- module: postgresql # enabled: true # metricsets: -# # Stats about every PostgreSQL database + # Stats about every PostgreSQL database # - database -# -# # Stats about the background writer process's activity + + # Stats about the background writer process's activity # - bgwriter -# -# # Stats about every PostgreSQL process + + # Stats about every PostgreSQL process # - activity -# + # period: 30s -# -# # The host must be passed as PostgreSQL URL. Example: -# # postgres://localhost:5432?sslmode=disable -# # The available parameters are documented here: -# # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters + + # The host must be passed as PostgreSQL URL. Example: + # postgres://localhost:5432?sslmode=disable + # The available parameters are documented here: + # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters # hosts: ["postgres://localhost:5432"] -# -# # Username to use when connecting to PostgreSQL. Empty by default. + + # Username to use when connecting to PostgreSQL. Empty by default. # #username: user -# -# # Password to use when connecting to PostgreSQL. Empty by default. + + # Password to use when connecting to PostgreSQL. Empty by default. # #password: pass -#----------------------------- Prometheus Module ----------------------------- +#----------------------- Prometheus Typed Metrics Module ----------------------- +#------------------------------ Prometheus Module ------------------------------ {% if (prometheus_enabled | default(false) | bool) and (prometheus_config is defined) %} {% for prometheus in prometheus_config %} +# Metrics collected from a Prometheus endpoint - module: prometheus - metricsets: [{% for mset in prometheus.metricsets|default(["collector"]) %}"{{ mset }}"{% if not loop.last %},{% endif %}{% endfor %}] - enabled: {{ prometheus.enabled | default('true') }} period: {{ prometheus.period | default("10s") }} + metricsets: [{% for mset in prometheus.metricsets|default(["collector"]) %}"{{ mset }}"{% if not loop.last %},{% endif %}{% endfor %}] hosts: [{% for phost in prometheus.hosts %}"{{ phost }}"{% if not loop.last %},{% endif %}{% endfor %}] metrics_path: {{ prometheus.metrics_path | default("/metrics") }} + #metrics_filters: + # include: [] + # exclude: [] + #username: "user" + #password: "secret" namespace: {{ prometheus.namespace }} + enabled: {{ prometheus.enabled | default('true') }} # {% endfor %} + {% endif %} {% if (ceph_prometheus_enabled | default(false) | bool) %} - module: prometheus - metricsets: ["collector"] - enabled: 'true' period: "10s" + metricsets: ["collector"] hosts: [{% for phost in ceph_stats_hosts %}"{{ phost | regex_replace(':\\d+$', '') }}:9283"{% if not loop.last %},{% endif %}{% endfor %}] metrics_path: "/metrics" namespace: ceph -{% endif %} # This can be used for service account based authorization: - # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token #ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt -#------------------------------ RabbitMQ Module ------------------------------ +{% endif %} +# Metrics sent by a Prometheus server using remote_write option +#- module: prometheus +# metricsets: ["remote_write"] +# host: "localhost" +# port: "9201" + + # Secure settings for the server using TLS/SSL: + #ssl.certificate: "/etc/pki/server/cert.pem" + #ssl.key: "/etc/pki/server/cert.key" + +# Metrics that will be collected using a PromQL +#- module: prometheus +# metricsets: ["query"] +# hosts: ["localhost:9090"] +# period: 10s +# queries: +# - name: "instant_vector" +# path: "/api/v1/query" +# params: +# query: "sum(rate(prometheus_http_requests_total[1m]))" +# - name: "range_vector" +# path: "/api/v1/query_range" +# params: +# query: "up" +# start: "2019-12-20T00:00:00.000Z" +# end: "2019-12-21T00:00:00.000Z" +# step: 1h +# - name: "scalar" +# path: "/api/v1/query" +# params: +# query: "100" +# - name: "string" +# path: "/api/v1/query" +# params: +# query: "some_value" + +#------------------------------- RabbitMQ Module ------------------------------- {% if (rabbitmq_enabled | default(false) | bool) and (rabbitmq_monitoring_password is defined) %} - module: rabbitmq metricsets: ["node", "queue", "connection"] enabled: true period: 30s hosts: [ {{ elastic_metricbeat_rabbitmq_monitoring_hosts }} ] - username: {{ rabbitmq_monitoring_userid | default('monitoring') }} - password: {{ rabbitmq_monitoring_password }} -{% endif %} + # Management path prefix, if `management.path_prefix` is set in RabbitMQ # configuration, it has to be set to the same value. #management_path_prefix: "" - #username: guest - #password: guest + username: {{ rabbitmq_monitoring_userid | default('monitoring') }} + password: {{ rabbitmq_monitoring_password }} -#-------------------------------- Redis Module ------------------------------- +{% endif %} +#-------------------------------- Redis Module -------------------------------- #- module: redis # metricsets: ["info", "keyspace"] # enabled: true # period: 10s -# -# # Redis hosts -# hosts: ["127.0.0.1:6379"] -# -# # Timeout after which time a metricset should return an error -# # Timeout is by default defined as period, as a fetch of a metricset -# # should never take longer then period, as otherwise calls can pile up. -# #timeout: 1s -# -# # Optional fields to be added to each event -# #fields: -# # datacenter: west -# -# # Network type to be used for redis connection. Default: tcp -# #network: tcp -# -# # Max number of concurrent connections. Default: 10 -# #maxconn: 10 -# -# # Filters can be used to reduce the number of fields sent. + + # Redis hosts + #hosts: ["127.0.0.1:6379"] + + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s + + # Optional fields to be added to each event + #fields: + # datacenter: west + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Filters can be used to reduce the number of fields sent. #processors: # - include_fields: # fields: ["beat", "metricset", "redis.info.stats"] @@ -713,15 +887,21 @@ metricbeat.modules: # Redis AUTH password. Empty by default. #password: foobared -#------------------------------- traefik Module ------------------------------ +#--------------------------- Redis Enterprise Module --------------------------- +#--------------------------------- SQL Module --------------------------------- +#--------------------------------- Stan Module --------------------------------- + +#-------------------------------- Statsd Module -------------------------------- +#-------------------------------- Tomcat Module -------------------------------- +#------------------------------- Traefik Module ------------------------------- {% if traefik_enabled | bool %} - module: traefik metricsets: ["health"] period: 10s hosts: ["localhost:8080"] -{% endif %} -#-------------------------------- uwsgi Module ------------------------------- +{% endif %} +#-------------------------------- UWSGI Module -------------------------------- {% if uwsgi_enabled | default(false) | bool %} - module: uwsgi metricsets: ["status"] @@ -732,49 +912,51 @@ metricbeat.modules: {% else %} hosts: ["tcp://127.0.0.1:9191"] {% endif %} + {% endif %} - -#------------------------------- vSphere Module ------------------------------ +#------------------------------- VSphere Module ------------------------------- #- module: vsphere -# enabled: true -# metricsets: ["datastore", "host", "virtualmachine"] -# period: 10s -# hosts: ["https://localhost/sdk"] -# -# username: "user" -# password: "password" -# # If insecure is true, don't verify the server's certificate chain -# insecure: false -# # Get custom fields when using virtualmachine metric set. Default false. -# # get_custom_fields: false + #enabled: true + #metricsets: ["datastore", "host", "virtualmachine"] + #period: 10s + #hosts: ["https://localhost/sdk"] -#------------------------------- Windows Module ------------------------------ + #username: "user" + #password: "password" + # If insecure is true, don't verify the server's certificate chain + #insecure: false + # Get custom fields when using virtualmachine metric set. Default false. + # get_custom_fields: false + +#------------------------------- Windows Module ------------------------------- #- module: windows # metricsets: ["perfmon"] # enabled: true # period: 10s -# perfmon.ignore_non_existent_counters: true -# perfmon.counters: -# # - instance_label: processor.name -# # instance_name: total -# # measurement_label: processor.time.total.pct -# # query: '\Processor Information(_Total)\% Processor Time' -# +# perfmon.ignore_non_existent_counters: false +# perfmon.group_measurements_by_instance: false +# perfmon.queries: +# - object: 'Process' +# instance: ["*"] +# counters: +# - name: '% Processor Time' +# field: cpu_usage +# format: "float" +# - name: "Thread Count" + #- module: windows # metricsets: ["service"] # enabled: true # period: 60s -# -#------------------------------ ZooKeeper Module ----------------------------- + +#------------------------------ ZooKeeper Module ------------------------------ #- module: zookeeper # enabled: true # metricsets: ["mntr", "server"] # period: 10s # hosts: ["localhost:2181"] - - -#================================ General ====================================== +# ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. @@ -815,9 +997,44 @@ metricbeat.modules: #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < min_flush_events. + # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s + # The disk queue stores incoming events on disk until the output is + # ready for them. This allows a higher event limit than the memory-only + # queue and lets pending events persist through a restart. + #disk: + # The directory path to store the queue's data. + #path: "${path.data}/diskqueue" + + # The maximum space the queue should occupy on disk. Depending on + # input settings, events that exceed this limit are delayed or discarded. + #max_size: 10GB + + # The maximum size of a single queue data file. Data in the queue is + # stored in smaller segments that are deleted after all their events + # have been processed. + #segment_size: 1GB + + # The number of events to read from disk to memory while waiting for + # the output to request them. + #read_ahead: 512 + + # The number of events to accept from inputs while waiting for them + # to be written to disk. If event data arrives faster than it + # can be written to disk, this setting prevents it from overflowing + # main memory. + #write_ahead: 2048 + + # The duration to wait before retrying when the queue encounters a disk + # write error. + #retry_interval: 1s + + # The maximum length of time to wait before retrying on a disk write + # error. If the queue encounters repeated errors, it will double the + # length of its retry interval each time, up to this maximum. + #max_retry_interval: 30s + # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # @@ -882,7 +1099,7 @@ metricbeat.modules: # default is the number of logical CPUs available in the system. #max_procs: -#================================ Processors =================================== +# ================================= Processors ================================= {{ elk_macros.beat_processors(processors) }} # Processors are used to reduce the number of fields in the exported event or to @@ -900,103 +1117,155 @@ metricbeat.modules: # values: # #processors: -#- include_fields: -# fields: ["cpu"] -#- drop_fields: -# fields: ["cpu.user", "cpu.system"] +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: -#- drop_event: -# when: -# equals: -# http.code: 200 +# - drop_event: +# when: +# equals: +# http.code: 200 # # The following example renames the field a to b: # #processors: -#- rename: -# fields: -# - from: "a" -# to: "b" +# - rename: +# fields: +# - from: "a" +# to: "b" # # The following example tokenizes the string into fields: # #processors: -#- dissect: -# tokenizer: "%{key1} - %{key2}" -# field: "message" -# target_prefix: "dissect" +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: -#- add_cloud_metadata: ~ +# - add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: -#- add_locale: -# format: offset +# - add_locale: +# format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: -#- add_docker_metadata: -# host: "unix:///var/run/docker.sock" -# match_fields: ["system.process.cgroup.id"] -# match_pids: ["process.pid", "process.ppid"] -# match_source: true -# match_source_index: 4 -# match_short_id: false -# cleanup_timeout: 60 -# labels.dedot: false -# # To connect to Docker over TLS you must specify a client and CA certificate. -# #ssl: -# # certificate_authority: "/etc/pki/root/ca.pem" -# # certificate: "/etc/pki/client/cert.pem" -# # key: "/etc/pki/client/cert.key" +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: -#- add_docker_metadata: ~ +# - add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: -#- add_host_metadata: -# netinfo.enabled: false +# - add_host_metadata: ~ # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: -#- add_process_metadata: -# match_pids: ["system.process.ppid"] -# target: system.process.parent +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: -#- decode_json_fields: -# fields: ["field1", "field2", ...] -# process_array: false -# max_depth: 1 -# target: "" -# overwrite_keys: false +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true -#============================= Elastic Cloud ================================== +# =============================== Elastic Cloud ================================ -# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). +# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. @@ -1007,120 +1276,162 @@ metricbeat.modules: # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: -#================================ Outputs ====================================== +# ================================== Outputs =================================== # Configure what output to use when sending the data collected by the beat. -#-------------------------- Elasticsearch output ------------------------------- +# ---------------------------- Elasticsearch Output ---------------------------- #output.elasticsearch: -# # Boolean flag to enable or disable the output module. -# #enabled: true -# -# # Array of hosts to connect to. -# # Scheme and port can be left out and will be set to the default (http and 9200) -# # In case you specify and additional path, the scheme is required: http://localhost:9200/path -# # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 -# hosts: ["localhost:9200"] -# -# # Set gzip compression level. -# #compression_level: 0 -# -# # Configure escaping HTML symbols in strings. -# #escape_html: false -# -# # Optional protocol and basic auth credentials. -# #protocol: "https" -# #username: "elastic" -# #password: "changeme" -# -# # Dictionary of HTTP parameters to pass within the URL with index operations. -# #parameters: -# #param1: value1 -# #param2: value2 -# -# # Number of workers per Elasticsearch host. -# #worker: 1 -# -# # Optional index name. The default is "metricbeat" plus date -# # and generates [metricbeat-]YYYY.MM.DD keys. -# # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. -# #index: "metricbeat-%{[agent.version]}-%{+yyyy.MM.dd}" -# -# # Optional ingest node pipeline. By default no pipeline will be used. -# #pipeline: "" -# -# # Optional HTTP path -# #path: "/elasticsearch" -# -# # Custom HTTP headers to add to each request -# #headers: -# # X-My-Header: Contents of the header -# -# # Proxy server URL -# #proxy_url: http://proxy:3128 -# -# # The number of times a particular Elasticsearch index operation is attempted. If -# # the indexing operation doesn't succeed after this many retries, the events are -# # dropped. The default is 3. -# #max_retries: 3 -# -# # The maximum number of events to bulk in a single Elasticsearch bulk API index request. -# # The default is 50. -# #bulk_max_size: 50 -# -# # The number of seconds to wait before trying to reconnect to Elasticsearch -# # after a network error. After waiting backoff.init seconds, the Beat -# # tries to reconnect. If the attempt fails, the backoff timer is increased -# # exponentially up to backoff.max. After a successful connection, the backoff -# # timer is reset. The default is 1s. -# #backoff.init: 1s -# -# # The maximum number of seconds to wait before attempting to connect to -# # Elasticsearch after a network error. The default is 60s. -# #backoff.max: 60s -# -# # Configure HTTP request timeout before failing a request to Elasticsearch. -# #timeout: 90 -# -# # Use SSL settings for HTTPS. -# #ssl.enabled: true -# -# # Configure SSL verification mode. If `none` is configured, all server hosts -# # and certificates will be accepted. In this mode, SSL-based connections are -# # susceptible to man-in-the-middle attacks. Use only for testing. Default is -# # `full`. -# #ssl.verification_mode: full -# -# # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to -# # 1.2 are enabled. -# #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] -# -# # List of root certificates for HTTPS server verifications -# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] -# -# # Certificate for SSL client authentication -# #ssl.certificate: "/etc/pki/client/cert.pem" -# -# # Client certificate key -# #ssl.key: "/etc/pki/client/cert.key" -# -# # Optional passphrase for decrypting the certificate key. -# #ssl.key_passphrase: '' -# -# # Configure cipher suites to be used for SSL connections -# #ssl.cipher_suites: [] -# -# # Configure curve types for ECDHE-based cipher suites -# #ssl.curve_types: [] -# -# # Configure what types of renegotiation are supported. Valid options are -# # never, once, and freely. Default is never. -# #ssl.renegotiation: never + # Boolean flag to enable or disable the output module. + #enabled: true + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] -#----------------------------- Logstash output --------------------------------- + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "metricbeat" plus date + # and generates [metricbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "metricbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server URL + #proxy_url: http://proxy:3128 + + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} -#------------------------------- Kafka output ---------------------------------- +# -------------------------------- Kafka Output -------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true @@ -1155,7 +1466,11 @@ metricbeat.modules: #username: '' #password: '' - # Kafka version metricbeat is assumed to run against. Defaults to the "1.0.0". + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' + + # Kafka version Metricbeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding @@ -1179,8 +1494,8 @@ metricbeat.modules: # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m - # Strategy for fetching the topics metadata from the broker. Default is true. - #full: true + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false # The number of concurrent load-balanced Kafka output workers. #worker: 1 @@ -1192,10 +1507,25 @@ metricbeat.modules: # until all events are published. The default is 3. #max_retries: 3 + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s @@ -1234,30 +1564,37 @@ metricbeat.modules: # purposes. The default is "beats". #client_id: beats - # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections @@ -1270,7 +1607,38 @@ metricbeat.modules: # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- Redis output ---------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true @@ -1286,6 +1654,8 @@ metricbeat.modules: # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The @@ -1350,43 +1720,57 @@ metricbeat.modules: # occurs on the proxy server. #proxy_use_local_resolver: false - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- File output ----------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true @@ -1408,7 +1792,7 @@ metricbeat.modules: #filename: metricbeat # Maximum size in kilobytes of each file. When this size is reached, and on - # every metricbeat restart, the files are rotated. The default value is 10240 + # every Metricbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 @@ -1420,8 +1804,7 @@ metricbeat.modules: # Permissions to use for file creation. The default is 0600. #permissions: 0600 - -#----------------------------- Console output --------------------------------- +# ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true @@ -1434,82 +1817,101 @@ metricbeat.modules: # Configure escaping HTML symbols in strings. #escape_html: false -#================================= Paths ====================================== +# =================================== Paths ==================================== -# The home path for the metricbeat installation. This is the default base path +# The home path for the Metricbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: -# The configuration path for the metricbeat installation. This is the default +# The configuration path for the Metricbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} -# The data path for the metricbeat installation. This is the default base path -# for all the files in which metricbeat needs to store its data. If not set by a +# The data path for the Metricbeat installation. This is the default base path +# for all the files in which Metricbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data -# The logs path for a metricbeat installation. This is the default location for +# The logs path for a Metricbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs -#================================ Keystore ========================================== +# ================================== Keystore ================================== + # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" -#============================== Dashboards ===================================== +# ================================= Dashboards ================================= + {% if (groups['kibana'] | length) > 0 and (setup_kibana_dashboards | default(true)) %} {{ elk_macros.setup_dashboards('metricbeat') }} {% endif %} - -#============================== Template ===================================== +# ================================== Template ================================== {{ elk_macros.setup_template('metricbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} -#============================== Setup ILM ===================================== +# ====================== Index Lifecycle Management (ILM) ====================== -# Configure Index Lifecycle Management Index Lifecycle Management creates a -# write alias and adds additional settings to the template. -# The elasticsearch.output.index setting will be replaced with the write alias -# if ILM is enabled. +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. -# Enabled ILM support. Valid values are true, false, and auto. The beat will -# detect availabilty of Index Lifecycle Management in Elasticsearch and enable -# or disable ILM support. +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto -# Configure the ILM write alias name. -#setup.ilm.rollover_alias: "metricbeat" +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'metricbeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'metricbeat' -# Configure rollover index pattern. +# Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" + {% if ilm_policy_name is defined %} +# Set the lifecycle policy name. The default policy name is +# 'beatname'. setup.ilm.policy_name: "{{ ilm_policy_name }}" + {% endif %} {% if ilm_policy_file_location is defined %} +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}" -{% endif %} -#============================== Kibana ===================================== +{% endif %} +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + {% if (groups['kibana'] | length) > 0 and (setup_kibana_dashboards | default(true)) %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} -#================================ Logging ====================================== +# ================================== Logging =================================== + {{ elk_macros.beat_logging('metricbeat', metricbeat_log_level) }} -#============================== Xpack Monitoring ===================================== -{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} +# ============================= X-Pack Monitoring ============================== +{{ elk_macros.xpack_monitoring_elasticsearch('metricbeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +# =============================== HTTP Endpoint ================================ -#================================ HTTP Endpoint ====================================== # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output @@ -1518,18 +1920,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }} # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 -#============================= Process Security ================================ +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true -#================================= Migration ================================== +# ============================== Instrumentation =============================== + +# Instrumentation support for the metricbeat. +#instrumentation: + # Set to true to enable instrumentation of metricbeat. + #enabled: false + + # Environment in which metricbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false diff --git a/elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 b/elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 index 87654778..8c0d6841 100644 --- a/elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 +++ b/elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 @@ -8,12 +8,22 @@ # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/packetbeat/index.html -#============================== Network device ================================ +# =============================== Network device =============================== # Select the network interface to sniff the data. You can use the "any" # keyword to sniff on all connected interfaces. packetbeat.interfaces.device: any +# The network CIDR blocks that are considered "internal" networks for +# the purpose of network perimeter boundary classification. The valid +# values for internal_networks are the same as those that can be used +# with processor network conditions. +# +# For a list of available values see: +# https://www.elastic.co/guide/en/beats/packetbeat/current/defining-processors.html#condition-network +#packetbeat.interfaces.internal_networks: +# - private + # Packetbeat supports three sniffer types: # * pcap, which uses the libpcap library and works on most platforms, but it's # not the fastest option. @@ -41,7 +51,14 @@ packetbeat.interfaces.with_vlans: true # Use this setting to override the automatically generated BPF filter. #packetbeat.interfaces.bpf_filter: -#================================== Flows ===================================== +# With `auto_promisc_mode` Packetbeat puts interface in promiscuous mode automatically on startup. +# This option does not work with `any` interface device. +# The default option is false and requires manual set-up of promiscuous mode. +# Warning: under some circumstances (e.g beat crash) promiscuous mode +# can stay enabled even after beat is shut down. +#packetbeat.interfaces.auto_promisc_mode: true + +# =================================== Flows ==================================== packetbeat.flows: # Enable Network flows. Default: true @@ -54,13 +71,25 @@ packetbeat.flows: # Configure reporting period. If set to -1, only killed flows will be reported period: 30s -#========================== Transaction protocols ============================= + # Set to true to publish fields with null values in events. + #keep_null: false + + # Overrides where flow events are indexed. + #index: my-custom-flow-index + +# =========================== Transaction protocols ============================ packetbeat.protocols: - type: icmp # Enable ICMPv4 and ICMPv6 monitoring. Default: true enabled: true + # Set to true to publish fields with null values in events. + #keep_null: false + + # Overrides where this protocol's events are indexed. + #index: my-custom-icmp-index + - type: amqp # Enable AMQP monitoring. Default: true enabled: true @@ -94,12 +123,17 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s -- type: cassandra + # Overrides where this protocol's events are indexed. + #index: my-custom-amqp-index +- type: cassandra #Cassandra port for traffic monitoring. ports: [9042] @@ -119,6 +153,9 @@ packetbeat.protocols: # is included in published events. The default is true. enable `send_response` first before enable this option. #send_response_header: true + # Set to true to publish fields with null values in events. + #keep_null: false + # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. # By default no compressor is configured. #compressor: "snappy" @@ -126,10 +163,16 @@ packetbeat.protocols: # This option indicates which Operator/Operators will be ignored. #ignored_ops: ["SUPPORTED","OPTIONS"] + # Overrides where this protocol's events are indexed. + #index: my-custom-cassandra-index + - type: dhcpv4 # Configure the DHCP for IPv4 ports. ports: [67, 68] + # Set to true to publish fields with null values in events. + #keep_null: false + - type: dns # Enable DNS monitoring. Default: true #enabled: true @@ -156,10 +199,16 @@ packetbeat.protocols: # send_request: true # send_response: true + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-dhcpv4-index + - type: http # Enable HTTP monitoring. Default: true #enabled: true @@ -193,6 +242,11 @@ packetbeat.protocols: # all headers by setting this option to true. The default is false. send_all_headers: true + # A list of headers to redact if present in the HTTP request. This will keep + # the header field present, but will redact it's value to show the headers + # presence. + #redact_headers: [] + # The list of content types for which Packetbeat includes the full HTTP # payload. If the request's or response's Content-Type matches any on this # list, the full body will be included under the request or response field. @@ -227,6 +281,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -235,6 +292,9 @@ packetbeat.protocols: # be trimmed to this size. Default is 10 MB. #max_message_size: 10485760 + # Overrides where this protocol's events are indexed. + #index: my-custom-http-index + - type: memcache # Enable memcache monitoring. Default: true #enabled: true @@ -280,10 +340,16 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-memcache-index + - type: mysql # Enable mysql monitoring. Default: true #enabled: true @@ -300,10 +366,16 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-mysql-index + - type: pgsql # Enable pgsql monitoring. Default: true enabled: false @@ -320,10 +392,16 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-pgsql-index + - type: redis # Enable redis monitoring. Default: true enabled: false @@ -340,10 +418,25 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Max size for per-session message queue. This places a limit on the memory + # that can be used to buffer requests and responses for correlation. + #queue_max_bytes: 1048576 + + # Max number of messages for per-session message queue. This limits the number + # of requests or responses that can be buffered for correlation. Set a value + # large enough to allow for pipelining. + #queue_max_messages: 20000 + + # Overrides where this protocol's events are indexed. + #index: my-custom-redis-index + - type: thrift # Enable thrift monitoring. Default: true enabled: false @@ -395,10 +488,16 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-thrift-index + - type: mongodb # Enable mongodb monitoring. Default: true enabled: false @@ -425,10 +524,16 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-mongodb-index + - type: nfs # Enable NFS monitoring. Default: true enabled: true @@ -445,10 +550,16 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s + # Overrides where this protocol's events are indexed. + #index: my-custom-nfs-index + - type: tls # Enable TLS monitoring. Default: true #enabled: true @@ -476,7 +587,31 @@ packetbeat.protocols: # in PEM format under the `raw` key. The default is false. #include_raw_certificates: false -#=========================== Monitored processes ============================== + # Set to true to publish fields with null values in events. + #keep_null: false + + # Overrides where this protocol's events are indexed. + #index: my-custom-tls-index + +- type: sip + # Configure the ports where to listen for SIP traffic. You can disable the SIP protocol by commenting out the list of ports. + enabled: false + + ports: [5060] + + # Parse the authorization headers + parse_authorization: true + + # Parse body contents (only when body is SDP) + parse_body: true + + # Preserve original contents in event.original + keep_original: true + + # Overrides where this protocol's events are indexed. + #index: my-custom-sip-index + +# ============================ Monitored processes ============================= # Packetbeat can enrich events with information about the process associated # the socket that sent or received the packet if Packetbeat is monitoring @@ -490,7 +625,7 @@ packetbeat.procs.enabled: false # false. packetbeat.ignore_outgoing: false -#================================ General ====================================== +# ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. @@ -531,9 +666,44 @@ packetbeat.ignore_outgoing: false #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < min_flush_events. + # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s + # The disk queue stores incoming events on disk until the output is + # ready for them. This allows a higher event limit than the memory-only + # queue and lets pending events persist through a restart. + #disk: + # The directory path to store the queue's data. + #path: "${path.data}/diskqueue" + + # The maximum space the queue should occupy on disk. Depending on + # input settings, events that exceed this limit are delayed or discarded. + #max_size: 10GB + + # The maximum size of a single queue data file. Data in the queue is + # stored in smaller segments that are deleted after all their events + # have been processed. + #segment_size: 1GB + + # The number of events to read from disk to memory while waiting for + # the output to request them. + #read_ahead: 512 + + # The number of events to accept from inputs while waiting for them + # to be written to disk. If event data arrives faster than it + # can be written to disk, this setting prevents it from overflowing + # main memory. + #write_ahead: 2048 + + # The duration to wait before retrying when the queue encounters a disk + # write error. + #retry_interval: 1s + + # The maximum length of time to wait before retrying on a disk write + # error. If the queue encounters repeated errors, it will double the + # length of its retry interval each time, up to this maximum. + #max_retry_interval: 30s + # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # @@ -598,7 +768,7 @@ packetbeat.ignore_outgoing: false # default is the number of logical CPUs available in the system. #max_procs: -#================================ Processors =================================== +# ================================= Processors ================================= {{ elk_macros.beat_processors(processors) }} # Processors are used to reduce the number of fields in the exported event or to @@ -616,103 +786,155 @@ packetbeat.ignore_outgoing: false # values: # #processors: -#- include_fields: -# fields: ["cpu"] -#- drop_fields: -# fields: ["cpu.user", "cpu.system"] +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: -#- drop_event: -# when: -# equals: -# http.code: 200 +# - drop_event: +# when: +# equals: +# http.code: 200 # # The following example renames the field a to b: # #processors: -#- rename: -# fields: -# - from: "a" -# to: "b" +# - rename: +# fields: +# - from: "a" +# to: "b" # # The following example tokenizes the string into fields: # #processors: -#- dissect: -# tokenizer: "%{key1} - %{key2}" -# field: "message" -# target_prefix: "dissect" +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: -#- add_cloud_metadata: ~ +# - add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: -#- add_locale: -# format: offset +# - add_locale: +# format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: -#- add_docker_metadata: -# host: "unix:///var/run/docker.sock" -# match_fields: ["system.process.cgroup.id"] -# match_pids: ["process.pid", "process.ppid"] -# match_source: true -# match_source_index: 4 -# match_short_id: false -# cleanup_timeout: 60 -# labels.dedot: false -# # To connect to Docker over TLS you must specify a client and CA certificate. -# #ssl: -# # certificate_authority: "/etc/pki/root/ca.pem" -# # certificate: "/etc/pki/client/cert.pem" -# # key: "/etc/pki/client/cert.key" +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: -#- add_docker_metadata: ~ +# - add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: -#- add_host_metadata: -# netinfo.enabled: false +# - add_host_metadata: ~ # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: -#- add_process_metadata: -# match_pids: ["system.process.ppid"] -# target: system.process.parent +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: -#- decode_json_fields: -# fields: ["field1", "field2", ...] -# process_array: false -# max_depth: 1 -# target: "" -# overwrite_keys: false +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true -#============================= Elastic Cloud ================================== +# =============================== Elastic Cloud ================================ -# These settings simplify using packetbeat with the Elastic Cloud (https://cloud.elastic.co/). +# These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. @@ -723,120 +945,163 @@ packetbeat.ignore_outgoing: false # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: -#================================ Outputs ====================================== +# ================================== Outputs =================================== # Configure what output to use when sending the data collected by the beat. -#-------------------------- Elasticsearch output ------------------------------- +# ---------------------------- Elasticsearch Output ---------------------------- #output.elasticsearch: -# # Boolean flag to enable or disable the output module. -# #enabled: true -# -# # Array of hosts to connect to. -# # Scheme and port can be left out and will be set to the default (http and 9200) -# # In case you specify and additional path, the scheme is required: http://localhost:9200/path -# # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 -# hosts: ["localhost:9200"] -# -# # Set gzip compression level. -# #compression_level: 0 -# -# # Configure escaping HTML symbols in strings. -# #escape_html: false -# -# # Optional protocol and basic auth credentials. -# #protocol: "https" -# #username: "elastic" -# #password: "changeme" -# -# # Dictionary of HTTP parameters to pass within the URL with index operations. -# #parameters: -# #param1: value1 -# #param2: value2 -# -# # Number of workers per Elasticsearch host. -# #worker: 1 -# -# # Optional index name. The default is "packetbeat" plus date -# # and generates [packetbeat-]YYYY.MM.DD keys. -# # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. -# #index: "packetbeat-%{[agent.version]}-%{+yyyy.MM.dd}" -# -# # Optional ingest node pipeline. By default no pipeline will be used. -# #pipeline: "" -# -# # Optional HTTP path -# #path: "/elasticsearch" -# -# # Custom HTTP headers to add to each request -# #headers: -# # X-My-Header: Contents of the header -# -# # Proxy server URL -# #proxy_url: http://proxy:3128 -# -# # The number of times a particular Elasticsearch index operation is attempted. If -# # the indexing operation doesn't succeed after this many retries, the events are -# # dropped. The default is 3. -# #max_retries: 3 -# -# # The maximum number of events to bulk in a single Elasticsearch bulk API index request. -# # The default is 50. -# #bulk_max_size: 50 -# -# # The number of seconds to wait before trying to reconnect to Elasticsearch -# # after a network error. After waiting backoff.init seconds, the Beat -# # tries to reconnect. If the attempt fails, the backoff timer is increased -# # exponentially up to backoff.max. After a successful connection, the backoff -# # timer is reset. The default is 1s. -# #backoff.init: 1s -# -# # The maximum number of seconds to wait before attempting to connect to -# # Elasticsearch after a network error. The default is 60s. -# #backoff.max: 60s -# -# # Configure HTTP request timeout before failing a request to Elasticsearch. -# #timeout: 90 -# -# # Use SSL settings for HTTPS. -# #ssl.enabled: true -# -# # Configure SSL verification mode. If `none` is configured, all server hosts -# # and certificates will be accepted. In this mode, SSL-based connections are -# # susceptible to man-in-the-middle attacks. Use only for testing. Default is -# # `full`. -# #ssl.verification_mode: full -# -# # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to -# # 1.2 are enabled. -# #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] -# -# # List of root certificates for HTTPS server verifications -# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] -# -# # Certificate for SSL client authentication -# #ssl.certificate: "/etc/pki/client/cert.pem" -# -# # Client certificate key -# #ssl.key: "/etc/pki/client/cert.key" -# -# # Optional passphrase for decrypting the certificate key. -# #ssl.key_passphrase: '' -# -# # Configure cipher suites to be used for SSL connections -# #ssl.cipher_suites: [] -# -# # Configure curve types for ECDHE-based cipher suites -# #ssl.curve_types: [] -# -# # Configure what types of renegotiation are supported. Valid options are -# # never, once, and freely. Default is never. -# #ssl.renegotiation: never -# + # Boolean flag to enable or disable the output module. + #enabled: true -#----------------------------- Logstash output --------------------------------- + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "packetbeat" plus date + # and generates [packetbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "packetbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server URL + #proxy_url: http://proxy:3128 + + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} -#------------------------------- Kafka output ---------------------------------- + +# -------------------------------- Kafka Output -------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true @@ -871,7 +1136,11 @@ packetbeat.ignore_outgoing: false #username: '' #password: '' - # Kafka version packetbeat is assumed to run against. Defaults to the "1.0.0". + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' + + # Kafka version Packetbeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding @@ -895,8 +1164,8 @@ packetbeat.ignore_outgoing: false # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m - # Strategy for fetching the topics metadata from the broker. Default is true. - #full: true + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false # The number of concurrent load-balanced Kafka output workers. #worker: 1 @@ -908,10 +1177,25 @@ packetbeat.ignore_outgoing: false # until all events are published. The default is 3. #max_retries: 3 + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s @@ -950,30 +1234,37 @@ packetbeat.ignore_outgoing: false # purposes. The default is "beats". #client_id: beats - # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections @@ -986,7 +1277,38 @@ packetbeat.ignore_outgoing: false # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- Redis output ---------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true @@ -1002,6 +1324,8 @@ packetbeat.ignore_outgoing: false # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The @@ -1066,43 +1390,57 @@ packetbeat.ignore_outgoing: false # occurs on the proxy server. #proxy_use_local_resolver: false - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never -#------------------------------- File output ----------------------------------- + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true @@ -1124,7 +1462,7 @@ packetbeat.ignore_outgoing: false #filename: packetbeat # Maximum size in kilobytes of each file. When this size is reached, and on - # every packetbeat restart, the files are rotated. The default value is 10240 + # every Packetbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 @@ -1136,8 +1474,7 @@ packetbeat.ignore_outgoing: false # Permissions to use for file creation. The default is 0600. #permissions: 0600 - -#----------------------------- Console output --------------------------------- +# ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true @@ -1150,78 +1487,99 @@ packetbeat.ignore_outgoing: false # Configure escaping HTML symbols in strings. #escape_html: false -#================================= Paths ====================================== +# =================================== Paths ==================================== -# The home path for the packetbeat installation. This is the default base path +# The home path for the Packetbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: -# The configuration path for the packetbeat installation. This is the default +# The configuration path for the Packetbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} -# The data path for the packetbeat installation. This is the default base path -# for all the files in which packetbeat needs to store its data. If not set by a +# The data path for the Packetbeat installation. This is the default base path +# for all the files in which Packetbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data -# The logs path for a packetbeat installation. This is the default location for +# The logs path for a Packetbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs -#================================ Keystore ========================================== +# ================================== Keystore ================================== + # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" -#============================== Dashboards ===================================== +# ================================= Dashboards ================================= + {{ elk_macros.setup_dashboards('packetbeat') }} -#============================== Template ===================================== +# ================================== Template ================================== + {{ elk_macros.setup_template('packetbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} -#============================== Setup ILM ===================================== +# ====================== Index Lifecycle Management (ILM) ====================== -# Configure Index Lifecycle Management Index Lifecycle Management creates a -# write alias and adds additional settings to the template. -# The elasticsearch.output.index setting will be replaced with the write alias -# if ILM is enabled. +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. -# Enabled ILM support. Valid values are true, false, and auto. The beat will -# detect availabilty of Index Lifecycle Management in Elasticsearch and enable -# or disable ILM support. +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto -# Configure the ILM write alias name. -#setup.ilm.rollover_alias: "packetbeat" +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'packetbeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'packetbeat' -# Configure rollover index pattern. +# Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" + {% if ilm_policy_name is defined %} +# Set the lifecycle policy name. The default policy name is +# 'beatname'. setup.ilm.policy_name: "{{ ilm_policy_name }}" + {% endif %} {% if ilm_policy_file_location is defined %} +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}" -{% endif %} -#============================== Kibana ===================================== +{% endif %} +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + {% if (groups['kibana'] | length) > 0 %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} -#================================ Logging ====================================== +# ================================== Logging =================================== + {{ elk_macros.beat_logging('packetbeat', packetbeat_log_level) }} -#============================== Xpack Monitoring ===================================== -{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} +# ============================= X-Pack Monitoring ============================== +{{ elk_macros.xpack_monitoring_elasticsearch('packetbeat', inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +# =============================== HTTP Endpoint ================================ -#================================ HTTP Endpoint ====================================== # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output @@ -1230,18 +1588,62 @@ setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }} # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 -#============================= Process Security ================================ +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true -#================================= Migration ================================== +# ============================== Instrumentation =============================== + +# Instrumentation support for the packetbeat. +#instrumentation: + # Set to true to enable instrumentation of packetbeat. + #enabled: false + + # Environment in which packetbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false diff --git a/elk_metrics_7x/templates/_macros.j2 b/elk_metrics_7x/templates/_macros.j2 index b6ba9940..e8b565d8 100644 --- a/elk_metrics_7x/templates/_macros.j2 +++ b/elk_metrics_7x/templates/_macros.j2 @@ -1,5 +1,4 @@ -{% macro output_elasticsearch(host, data_hosts) -%} -#-------------------------- Elasticsearch output ------------------------------- +{% macro output_elasticsearch(beat_name, host, data_hosts) -%} output.elasticsearch: # Boolean flag to enable or disable the output module. enabled: true @@ -13,12 +12,18 @@ output.elasticsearch: # Set gzip compression level. compression_level: 3 - # Optional protocol and basic auth credentials. + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Protocol - either `http` (default) or `https`. #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" #username: "elastic" #password: "changeme" - # Dictionary of HTTP parameters to pass within the url with index operations. + # Dictionary of HTTP parameters to pass within the URL with index operations. #parameters: #param1: value1 #param2: value2 @@ -26,24 +31,29 @@ output.elasticsearch: # Number of workers per Elasticsearch host. worker: 1 - # Optional index name. The default is "apm" plus date - # and generates [apm-]YYYY.MM.DD keys. + # Optional index name. The default is "{{ beat_name }}" plus date + # and generates [{{ beat_name }}-]YYYY.MM.DD keys. # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}" + #index: "{{ beat_name }}-%{[agent.version]}-%{+yyyy.MM.dd}" # Optional ingest node pipeline. By default no pipeline will be used. #pipeline: "" - # Optional HTTP Path + # Optional HTTP path #path: "/elasticsearch" # Custom HTTP headers to add to each request #headers: # X-My-Header: Contents of the header - # Proxy server url + # Proxy server URL #proxy_url: http://proxy:3128 + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. @@ -53,44 +63,89 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 - # Use SSL settings for HTTPS. Default is true. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # SSL configuration. By default is off. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC {%- endmacro %} {% macro output_logstash(host, data_hosts, processors, named_index) -%} @@ -140,9 +195,9 @@ output.logstash: # Logstash after a network error. The default is 60s. #backoff.max: 60s - # Optional index name. The default index name is set to journalbeat - # in all lowercase. {% if named_index is defined %} + # Optional index name. The default index name is set to {{ named_index }} + # in all lowercase. index: '{{ named_index }}' {% endif %} @@ -152,20 +207,27 @@ output.logstash: # Resolve names locally when using a proxy server. Defaults to false. #proxy_use_local_resolver: false - # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -175,7 +237,7 @@ output.logstash: # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections @@ -188,6 +250,12 @@ output.logstash: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting @@ -255,20 +323,31 @@ setup.dashboards.enabled: false # These settings can be adjusted to load your own template or overwrite existing ones. # Set to false to disable template loading. -setup.template.enabled: {{ host == data_nodes[0] | default(false) }} +setup.template.enabled: {{ (host == data_nodes[0]) | default(false) | lower }} -# Template name. By default the template name is "{{ beat_name }}-%{[beat.version]}" -# The template name and pattern has to be set in case the elasticsearch index pattern is modified. -setup.template.name: "{{ beat_name }}-%{[beat.version]}" +# Select the kind of index template. From Elasticsearch 7.8, it is possible to +# use component templates. Available options: legacy, component, index. +# By default {{ beat_name }} uses the legacy index templates. +#setup.template.type: legacy -# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# Template name. By default the template name is "{{ beat_name }}-%{[agent.version]}" +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +setup.template.name: "{{ beat_name }}-%{[agent.version]}" + +# Template pattern. By default the template pattern is "-%{[agent.version]}-*" to apply to the default index settings. # The first part is the version of the beat and then -* is used to match all daily indices. -# The template name and pattern has to be set in case the elasticsearch index pattern is modified. -setup.template.pattern: "{{ beat_name }}-%{[beat.version]}-*" +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +setup.template.pattern: "{{ beat_name }}-%{[agent.version]}-*" # Path to fields.yml file to generate the template setup.template.fields: "${path.config}/fields.yml" +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +#setup.template.append_fields: +#- name: field_name +# type: field_type + # Enable JSON template loading. If this is enabled, the fields.yml is ignored. #setup.template.json.enabled: false @@ -279,10 +358,11 @@ setup.template.fields: "${path.config}/fields.yml" #setup.template.json.name: "" # Overwrite existing template -setup.template.overwrite: {{ host == data_nodes[0] | default(false)}} +# Do not enable this option for more than one instance of {{ beat_name }} as it might +# overload your Elasticsearch with too many update requests. +setup.template.overwrite: {{ (host == data_nodes[0]) | default(false) | lower }} {% set shards = elasticsearch_beat_settings.shard_count | int %} - # Elasticsearch template settings setup.template.settings: @@ -301,7 +381,6 @@ setup.template.settings: {% if 'max_docvalue_fields_search' in elasticsearch_beat_settings %} max_docvalue_fields_search: {{ elasticsearch_beat_settings.max_docvalue_fields_search | int }} {% endif %} - # A dictionary of settings for the _source field. For more details, please check # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html _source: @@ -324,40 +403,60 @@ setup.kibana: #username: "elastic" #password: "changeme" - # Optional HTTP Path + # Optional HTTP path #path: "" - # Use SSL settings for HTTPS. Default is true. + # Optional Kibana space ID. + #space.id: "" + + # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # SSL configuration. By default is off. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" {%- endmacro %} {% macro beat_logging(beat_name, log_level='info') -%} @@ -369,17 +468,20 @@ setup.kibana: logging.level: {{ log_level }} # Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are "beat", "publish", "service" +# Other available selectors are "beat", "publisher", "service" # Multiple selectors can be chained. #logging.selectors: [ ] +# Send all logging output to stderr. The default is false. +#logging.to_stderr: false + # Send all logging output to syslog. The default is false. #logging.to_syslog: false # Send all logging output to Windows Event Logs. The default is false. #logging.to_eventlog: false -# If enabled, packetbeat periodically logs its internal metrics that have changed +# If enabled, {{ (beat_name == 'apm-server') | ternary( beat_name, beat_name | capitalize) }} periodically logs its internal metrics that have changed # in the last period. For each metric that changed, the delta from the value at # the beginning of the period is logged. Also, the total values for # all non-zero internal metrics are logged on shutdown. The default is true. @@ -414,27 +516,43 @@ logging.files: # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the - # unix epoch. Defaults to disabled. + # Unix epoch. Defaults to disabled. #interval: 0 -# Set to true to log messages in json format. + # Rotate existing logs on startup rather than appending to the existing + # file. Defaults to true. + # rotateonstartup: true + +# Set to true to log messages in JSON format. #logging.json: false + +# Set to true, to log messages with minimal required Elastic Common Schema (ECS) +# information. Recommended to use in combination with `logging.json=true` +# Defaults to false. +#logging.ecs: false {%- endmacro %} -{% macro xpack_monitoring_elasticsearch(host, data_hosts, processors) -%} -# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster. -# This requires xpack monitoring to be enabled in Elasticsearch. -# The reporting is disabled by default. +{% macro xpack_monitoring_elasticsearch(beat_name, host, data_hosts, processors) -%} +# {{ beat_name | capitalize }} can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. # Set to true to enable the monitoring reporter. -xpack.monitoring.enabled: true +monitoring.enabled: true + +# Sets the UUID of the Elasticsearch cluster under which monitoring data for this +# {{ beat_name | capitalize }} instance will appear in the Stack Monitoring UI. If output.elasticsearch +# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. +#monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. Any setting that is not set is -# automatically inherited from the Elasticsearch output configuration, so if you -# have the Elasticsearch output configured, you can simply uncomment the -# following line, and leave the rest commented out. -xpack.monitoring.elasticsearch: +# Elasticsearch output are accepted here as well. +# Note that the settings should point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration, so if you have the Elasticsearch output configured such +# that it is pointing to your Elasticsearch monitoring cluster, you can simply +# uncomment the following line. +monitoring.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) @@ -445,12 +563,15 @@ xpack.monitoring.elasticsearch: # Set gzip compression level. compression_level: 9 - # Optional protocol and basic auth credentials. + # Protocol - either `http` (default) or `https`. #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" #username: "beats_system" #password: "changeme" - # Dictionary of HTTP parameters to pass within the url with index operations. + # Dictionary of HTTP parameters to pass within the URL with index operations. #parameters: #param1: value1 #param2: value2 @@ -482,52 +603,92 @@ xpack.monitoring.elasticsearch: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure http request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing an request to Elasticsearch. timeout: 120 # Use SSL settings for HTTPS. #ssl.enabled: true - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. #ssl.verification_mode: full - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - # SSL configuration. By default is off. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" - # Client Certificate Key + # Client certificate key #ssl.key: "/etc/pki/client/cert.key" - # Optional passphrase for decrypting the Certificate Key. + # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] - # Configure curve types for ECDHE based cipher suites + # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m + +# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts` +# setting. You can find the value for this setting in the Elastic Cloud web UI. +#monitoring.cloud.id: + +# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username` +# and `monitoring.elasticsearch.password` settings. The format is `:`. +#monitoring.cloud.auth: {%- endmacro %} {% macro beat_processors(processors) -%} -# Processors are used to reduce the number of fields in the exported event or to -# enhance the event with external metadata. processors: {% if processors is defined and processors is iterable and processors | length > 0 %} {{ processors | to_yaml }}