{% import 'templates/_macros.j2' as elk_macros %} ###################### Packetbeat Configuration Example ####################### # This file is a full configuration example documenting all non-deprecated # options in comments. For a shorter configuration example, that contains only # the most common options, please see packetbeat.yml in the same directory. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/packetbeat/index.html #============================== Network device ================================ # Select the network interface to sniff the data. You can use the "any" # keyword to sniff on all connected interfaces. packetbeat.interfaces.device: any # Packetbeat supports three sniffer types: # * pcap, which uses the libpcap library and works on most platforms, but it's # not the fastest option. # * af_packet, which uses memory-mapped sniffing. This option is faster than # libpcap and doesn't require a kernel module, but it's Linux-specific. packetbeat.interfaces.type: af_packet # The maximum size of the packets to capture. The default is 65535, which is # large enough for almost all networks and interface types. If you sniff on a # physical network interface, the optimal setting is the MTU size. On virtual # interfaces, however, it's safer to accept the default value. packetbeat.interfaces.snaplen: 65535 # The maximum size of the shared memory buffer to use between the kernel and # user space. A bigger buffer usually results in lower CPU usage, but consumes # more memory. This setting is only available for the af_packet sniffer type. # The default is 30 MB. packetbeat.interfaces.buffer_size_mb: 30 # Packetbeat automatically generates a BPF for capturing only the traffic on # ports where it expects to find known protocols. Use this settings to tell # Packetbeat to generate a BPF filter that accepts VLAN tags. packetbeat.interfaces.with_vlans: true # Use this setting to override the automatically generated BPF filter. #packetbeat.interfaces.bpf_filter: #================================== Flows ===================================== packetbeat.flows: # Enable Network flows. Default: true enabled: true # Set network flow timeout. Flow is killed if no packet is received before being # timed out. timeout: 90s # Configure reporting period. If set to -1, only killed flows will be reported period: 30s #========================== Transaction protocols ============================= packetbeat.protocols: - type: icmp # Enable ICMPv4 and ICMPv6 monitoring. Default: true enabled: true - type: amqp # Enable AMQP monitoring. Default: true {% set ns = namespace(enabled=(inventory_hostname in groups['rabbitmq_all'] | default([]))) %} {% if not ns.enabled | bool %} {% for _item in groups['rabbitmq_all'] | default([]) %} {% if not ns.enabled | bool | bool or _item in groups[inventory_hostname + '-host_containers'] | default([]) %} {% set ns.enabled = true %} {% endif %} {% endfor %} {% endif %} enabled: {{ ns.enabled | bool }} # Configure the ports where to listen for AMQP traffic. You can disable # the AMQP protocol by commenting out the list of ports. ports: [5672] # Truncate messages that are published and avoid huge messages being # indexed. # Default: 1000 #max_body_length: 1000 # Hide the header fields in header frames. # Default: false parse_headers: true # Hide the additional arguments of method frames. # Default: false parse_arguments: true # Hide all methods relative to connection negotiation between server and # client. # Default: true hide_connection_information: false # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: cassandra # Enable cassandra monitoring. Default: false enabled: false #Cassandra port for traffic monitoring. ports: [9042] # If this option is enabled, the raw message of the request (`cassandra_request` field) # is included in published events. The default is true. #send_request: true # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) # is included in published events. The default is true. enable `send_request` first before enable this option. #send_request_header: true # If this option is enabled, the raw message of the response (`cassandra_response` field) # is included in published events. The default is true. #send_response: true # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) # is included in published events. The default is true. enable `send_response` first before enable this option. #send_response_header: true # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. # By default no compressor is configured. #compressor: "snappy" # This option indicates which Operator/Operators will be ignored. #ignored_ops: ["SUPPORTED","OPTIONS"] - type: dns # Enable DNS monitoring. Default: true enabled: true # Configure the ports where to listen for DNS traffic. You can disable # the DNS protocol by commenting out the list of ports. ports: [53] # include_authorities controls whether or not the dns.authorities field # (authority resource records) is added to messages. # Default: false include_authorities: true # include_additionals controls whether or not the dns.additionals field # (additional resource records) is added to messages. # Default: false include_additionals: true # send_request and send_response control whether or not the stringified DNS # request and response message are added to the result. # Nearly all data about the request/response is available in the dns.* # fields, but this can be useful if you need visibility specifically # into the request or the response. # Default: false # send_request: true # send_response: true # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: http # Enable HTTP monitoring. Default: true {% set used_ports = [53, 443, 2049, 3306, 5432, 5672, 6379, 9042, 9090, 11211, 27017] %} {% set ports = [] %} {% for item in heartbeat_services %} {% for port in item.ports %} {% if (item.type == 'http') and (not port in used_ports) %} {% set _ = ports.extend([port]) %} {% endif %} {% endfor %} {% endfor %} enabled: true # Configure the ports where to listen for HTTP traffic. You can disable # the HTTP protocol by commenting out the list of ports. ports: {{ ports | unique }} # Uncomment the following to hide certain parameters in URL or forms attached # to HTTP requests. The names of the parameters are case insensitive. # The value of the parameters will be replaced with the 'xxxxx' string. # This is generally useful for avoiding storing user passwords or other # sensitive information. # Only query parameters and top level form parameters are replaced. # hide_keywords: ['pass', 'password', 'passwd'] # A list of header names to capture and send to Elasticsearch. These headers # are placed under the `headers` dictionary in the resulting JSON. send_headers: true # Instead of sending a white list of headers to Elasticsearch, you can send # all headers by setting this option to true. The default is false. send_all_headers: true # The list of content types for which Packetbeat includes the full HTTP # payload in the response field. #include_body_for: [] # If the Cookie or Set-Cookie headers are sent, this option controls whether # they are split into individual values. #split_cookie: false # The header field to extract the real IP from. This setting is useful when # you want to capture traffic behind a reverse proxy, but you want to get the # geo-location information. #real_ip_header: # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s # Maximum message size. If an HTTP message is larger than this, it will # be trimmed to this size. Default is 10 MB. #max_message_size: 10485760 - type: memcache # Enable memcache monitoring. Default: true {% set ns = namespace(enabled=(inventory_hostname in groups['memcached_all'] | default([]))) %} {% if not ns.enabled | bool %} {% for _item in groups['memcached_all'] | default([]) %} {% if not ns.enabled | bool or _item in groups[inventory_hostname + '-host_containers'] | default([]) %} {% set ns.enabled = true %} {% endif %} {% endfor %} {% endif %} enabled: {{ ns.enabled }} # Configure the ports where to listen for memcache traffic. You can disable # the Memcache protocol by commenting out the list of ports. ports: [11211] # Uncomment the parseunknown option to force the memcache text protocol parser # to accept unknown commands. # Note: All unknown commands MUST not contain any data parts! # Default: false # parseunknown: true # Update the maxvalue option to store the values - base64 encoded - in the # json output. # possible values: # maxvalue: -1 # store all values (text based protocol multi-get) # maxvalue: 0 # store no values at all # maxvalue: N # store up to N values # Default: 0 # maxvalues: -1 # Use maxbytespervalue to limit the number of bytes to be copied per value element. # Note: Values will be base64 encoded, so actual size in json document # will be 4 times maxbytespervalue. # Default: unlimited # maxbytespervalue: 100 # UDP transaction timeout in milliseconds. # Note: Quiet messages in UDP binary protocol will get response only in error case. # The memcached analyzer will wait for udptransactiontimeout milliseconds # before publishing quiet messages. Non quiet messages or quiet requests with # error response will not have to wait for the timeout. # Default: 200 # udptransactiontimeout: 1000 # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: mysql # Enable mysql monitoring. Default: true {% set ns = namespace(enabled=(inventory_hostname in groups['galera_all'] | default([]))) %} {% if not ns.enabled | bool %} {% for _item in groups['galera_all'] | default([]) %} {% if not ns.enabled | bool and _item in groups[inventory_hostname + '-host_containers'] | default([]) %} {% set ns.enabled = true %} {% endif %} {% endfor %} {% endif %} enabled: {{ ns.enabled }} # Configure the ports where to listen for MySQL traffic. You can disable # the MySQL protocol by commenting out the list of ports. ports: [3306] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: pgsql # Enable pgsql monitoring. Default: true enabled: false # Configure the ports where to listen for Pgsql traffic. You can disable # the Pgsql protocol by commenting out the list of ports. ports: [5432] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: redis # Enable redis monitoring. Default: true enabled: false # Configure the ports where to listen for Redis traffic. You can disable # the Redis protocol by commenting out the list of ports. ports: [6379] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: thrift # Enable thrift monitoring. Default: true enabled: false # Configure the ports where to listen for Thrift-RPC traffic. You can disable # the Thrift-RPC protocol by commenting out the list of ports. ports: [9090] # The Thrift transport type. Currently this option accepts the values socket # for TSocket, which is the default Thrift transport, and framed for the # TFramed Thrift transport. The default is socket. #transport_type: socket # The Thrift protocol type. Currently the only accepted value is binary for # the TBinary protocol, which is the default Thrift protocol. #protocol_type: binary # The Thrift interface description language (IDL) files for the service that # Packetbeat is monitoring. Providing the IDL enables Packetbeat to include # parameter and exception names. #idl_files: [] # The maximum length for strings in parameters or return values. If a string # is longer than this value, the string is automatically truncated to this # length. #string_max_size: 200 # The maximum number of elements in a Thrift list, set, map, or structure. #collection_max_size: 15 # If this option is set to false, Packetbeat decodes the method name from the # reply and simply skips the rest of the response message. #capture_reply: true # If this option is set to true, Packetbeat replaces all strings found in # method parameters, return codes, or exception structures with the "*" # string. #obfuscate_strings: false # The maximum number of fields that a structure can have before Packetbeat # ignores the whole transaction. #drop_after_n_struct_fields: 500 # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: mongodb # Enable mongodb monitoring. Default: true enabled: false # Configure the ports where to listen for MongoDB traffic. You can disable # the MongoDB protocol by commenting out the list of ports. ports: [27017] # The maximum number of documents from the response to index in the `response` # field. The default is 10. #max_docs: 10 # The maximum number of characters in a single document indexed in the # `response` field. The default is 5000. You can set this to 0 to index an # unlimited number of characters per document. #max_doc_length: 5000 # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: nfs # Enable NFS monitoring. Default: true {% set ns = namespace(enabled=((inventory_hostname in groups['glance_all'] | default([])) or (inventory_hostname in groups['nova_compute'] | default([])) or 'nfs4' in (ansible_mounts | map(attribute='fstype') | list))) %} {% if not ns.enabled | bool %} {% for _item in groups['glance_all'] | default([]) + groups['nova_compute'] | default([]) %} {% if not ns.enabled | bool or _item in groups[inventory_hostname + '-host_containers'] | default([]) %} {% set ns.enabled = true %} {% endif %} {% endfor %} {% endif %} enabled: {{ ns.enabled }} # Configure the ports where to listen for NFS traffic. You can disable # the NFS protocol by commenting out the list of ports. ports: [2049] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: tls # Enable TLS monitoring. Default: true {% set ns = namespace(enabled=((inventory_hostname in groups['haproxy_all'] | default([])) or (inventory_hostname in groups['horizon_all'] | default([])))) %} {% if not ns.enabled | bool %} {% for _item in groups['haproxy_all'] | default([]) + groups['horizon_all'] | default([]) %} {% if not ns.enabled | bool or _item in groups[inventory_hostname + '-host_containers'] | default([]) %} {% set ns.enabled = true %} {% endif %} {% endfor %} {% endif %} enabled: {{ ns.enabled }} # Configure the ports where to listen for TLS traffic. You can disable # the TLS protocol by commenting out the list of ports. ports: [443] # If this option is enabled, the client and server certificates and # certificate chains are sent to Elasticsearch. The default is true. send_certificates: true # If this option is enabled, the raw certificates will be stored # in PEM format under the `raw` key. The default is false. #include_raw_certificates: false #=========================== Monitored processes ============================== # Configure the processes to be monitored and how to find them. If a process is # monitored then Packetbeat attempts to use it's name to fill in the `proc` and # `client_proc` fields. # The processes can be found by searching their command line by a given string. # # Process matching is optional and can be enabled by uncommenting the following # lines. # #packetbeat.procs: # enabled: false # monitored: # - process: mysqld # cmdline_grep: mysqld # # - process: pgsql # cmdline_grep: postgres # # - process: nginx # cmdline_grep: nginx # # - process: app # cmdline_grep: gunicorn # Uncomment the following if you want to ignore transactions created # by the server on which the shipper is installed. This option is useful # to remove duplicates if shippers are installed on multiple servers. #packetbeat.ignore_outgoing: true #================================ General ====================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. # If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the # output. Fields can be scalar values, arrays, dictionaries, or any nested # combination of these. #fields: # env: staging # If this option is set to true, the custom fields are stored as top-level # fields in the output document instead of being grouped under a fields # sub-dictionary. Default is false. #fields_under_root: false # Internal queue configuration for buffering events to be published. #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs # bulk_max_size) to the output, the moment the output is ready to server # another batch of events. #mem: # Max number of events the queue can buffer. #events: 4096 # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. # A value of 0 (the default) ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: #================================ Processors =================================== # Processors are used to reduce the number of fields in the exported event or to # enhance the event with external metadata. This section defines a list of # processors that are applied one by one and the first one receives the initial # event: # # event -> filter1 -> event1 -> filter2 ->event2 ... # # The supported processors are drop_fields, drop_event, include_fields, and # add_cloud_metadata. # # For example, you can use the following processors to keep the fields that # contain CPU load percentages, but remove the fields that contain CPU ticks # values: # #processors: #- include_fields: # fields: ["cpu"] #- drop_fields: # fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: #- drop_event: # when: # equals: # http.code: 200 # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: #- add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: #- add_locale: # format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: #- add_docker_metadata: # host: "unix:///var/run/docker.sock" # match_fields: ["system.process.cgroup.id"] # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: # # certificate_authority: "/etc/pki/root/ca.pem" # # certificate: "/etc/pki/client/cert.pem" # # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: #- add_docker_metadata: ~ processors: - add_host_metadata: ~ #============================= Elastic Cloud ================================== # These settings simplify using packetbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. # You can find the `cloud.id` in the Elastic Cloud web UI. #cloud.id: # The cloud.auth setting overwrites the `output.elasticsearch.username` and # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: #================================ Outputs ====================================== # Configure what output to use when sending the data collected by the beat. #-------------------------- Elasticsearch output ------------------------------- #output.elasticsearch: # # Boolean flag to enable or disable the output module. # #enabled: true # # # Array of hosts to connect to. # # Scheme and port can be left out and will be set to the default (http and 9200) # # In case you specify and additional path, the scheme is required: http://localhost:9200/path # # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 # hosts: ["localhost:9200"] # # # Set gzip compression level. # #compression_level: 0 # # # Optional protocol and basic auth credentials. # #protocol: "https" # #username: "elastic" # #password: "changeme" # # # Dictionary of HTTP parameters to pass within the url with index operations. # #parameters: # #param1: value1 # #param2: value2 # # # Number of workers per Elasticsearch host. # #worker: 1 # # # Optional index name. The default is "packetbeat" plus date # # and generates [packetbeat-]YYYY.MM.DD keys. # # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. # #index: "packetbeat-%{[beat.version]}-%{+yyyy.MM.dd}" # # # Optional ingest node pipeline. By default no pipeline will be used. # #pipeline: "" # # # Optional HTTP Path # #path: "/elasticsearch" # # # Custom HTTP headers to add to each request # #headers: # # X-My-Header: Contents of the header # # # Proxy server url # #proxy_url: http://proxy:3128 # # # The number of times a particular Elasticsearch index operation is attempted. If # # the indexing operation doesn't succeed after this many retries, the events are # # dropped. The default is 3. # #max_retries: 3 # # # The maximum number of events to bulk in a single Elasticsearch bulk API index request. # # The default is 50. # #bulk_max_size: 50 # # # Configure http request timeout before failing an request to Elasticsearch. # #timeout: 90 # # # Use SSL settings for HTTPS. # #ssl.enabled: true # # # Configure SSL verification mode. If `none` is configured, all server hosts # # and certificates will be accepted. In this mode, SSL based connections are # # susceptible to man-in-the-middle attacks. Use only for testing. Default is # # `full`. # #ssl.verification_mode: full # # # List of supported/valid TLS versions. By default all TLS versions 1.0 up to # # 1.2 are enabled. # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # # # SSL configuration. By default is off. # # List of root certificates for HTTPS server verifications # #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # # # Certificate for SSL client authentication # #ssl.certificate: "/etc/pki/client/cert.pem" # # # Client Certificate Key # #ssl.key: "/etc/pki/client/cert.key" # # # Optional passphrase for decrypting the Certificate Key. # #ssl.key_passphrase: '' # # # Configure cipher suites to be used for SSL connections # #ssl.cipher_suites: [] # # # Configure curve types for ECDHE based cipher suites # #ssl.curve_types: [] # # # Configure what types of renegotiation are supported. Valid options are # # never, once, and freely. Default is never. # #ssl.renegotiation: never #----------------------------- Logstash output --------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} #------------------------------- Kafka output ---------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true # The list of Kafka broker addresses from where to fetch the cluster metadata. # The cluster metadata contain the actual Kafka brokers events are published # to. #hosts: ["localhost:9092"] # The Kafka topic used for produced events. The setting can be a format string # using any event field. To set the topic from document type use `%{[type]}`. #topic: beats # The Kafka event key setting. Use format string to create unique event key. # By default no event key will be generated. #key: '' # The Kafka event partitioning strategy. Default hashing strategy is `hash` # using the `output.kafka.key` setting or randomly distributes events if # `output.kafka.key` is not configured. #partition.hash: # If enabled, events will only be published to partitions with reachable # leaders. Default is false. #reachable_only: false # Configure alternative event field names used to compute the hash value. # If empty `output.kafka.key` setting will be used. # Default value is empty list. #hash: [] # Authentication details. Password is required if username is set. #username: '' #password: '' # Kafka version packetbeat is assumed to run against. Defaults to the oldest # supported stable version (currently version 0.8.2.0) #version: 0.8.2 # Metadata update configuration. Metadata do contain leader information # deciding which broker to use when publishing. #metadata: # Max metadata request retry attempts when cluster is in middle of leader # election. Defaults to 3 retries. #retry.max: 3 # Waiting time between retries during leader elections. Default is 250ms. #retry.backoff: 250ms # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m # The number of concurrent load-balanced Kafka output workers. #worker: 1 # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s # The maximum duration a broker will wait for number of required ACKs. The # default is 10s. #broker_timeout: 10s # The number of messages buffered for each Kafka broker. The default is 256. #channel_buffer_size: 256 # The keep-alive period for an active network connection. If 0s, keep-alives # are disabled. The default is 0 seconds. #keep_alive: 0 # Sets the output compression codec. Must be one of none, snappy and gzip. The # default is gzip. #compression: gzip # The maximum permitted size of JSON-encoded messages. Bigger messages will be # dropped. The default value is 1000000 (bytes). This value should be equal to # or less than the broker's message.max.bytes. #max_message_bytes: 1000000 # The ACK reliability level required from broker. 0=no response, 1=wait for # local commit, -1=wait for all replicas to commit. The default is 1. Note: # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently # on error. #required_acks: 1 # The configurable ClientID used for logging, debugging, and auditing # purposes. The default is "beats". #client_id: beats # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. #ssl.enabled: true # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #------------------------------- Redis output ---------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true # The list of Redis servers to connect to. If load balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. #hosts: ["localhost:6379"] # The Redis port to use if hosts does not contain a port number. The default # is 6379. #port: 6379 # The name of the Redis list or channel the events are published to. The # default is packetbeat. #key: packetbeat # The password to authenticate with. The default is no authentication. #password: # The Redis database number where the events are published. The default is 0. #db: 0 # The Redis data type to use for publishing events. If the data type is list, # the Redis RPUSH command is used. If the data type is channel, the Redis # PUBLISH command is used. The default value is list. #datatype: list # The number of workers to use for each host configured to publish events to # Redis. Use this setting along with the loadbalance option. For example, if # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each # host). #worker: 1 # If set to true and multiple hosts or workers are configured, the output # plugin load balances published events onto all Redis hosts. If set to false, # the output plugin sends all events to only one host (determined at random) # and will switch to another host if the currently selected one becomes # unreachable. The default value is true. #loadbalance: true # The Redis connection timeout in seconds. The default is 5 seconds. #timeout: 5s # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Redis request or pipeline. # The default is 2048. #bulk_max_size: 2048 # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The # value must be a URL with a scheme of socks5://. #proxy_url: # This option determines whether Redis hostnames are resolved locally when # using a proxy. The default value is false, which means that name resolution # occurs on the proxy server. #proxy_use_local_resolver: false # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. #ssl.enabled: true # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #------------------------------- File output ----------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true # Path to the directory where to save the generated files. The option is # mandatory. #path: "/tmp/packetbeat" # Name of the generated files. The default is `packetbeat` and it generates # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. #filename: packetbeat # Maximum size in kilobytes of each file. When this size is reached, and on # every packetbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 # Maximum number of files under path. When this number of files is reached, # the oldest file is deleted and the rest are shifted from last to first. The # default is 7 files. #number_of_files: 7 # Permissions to use for file creation. The default is 0600. #permissions: 0600 #----------------------------- Console output --------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true # Pretty print json event #pretty: false #================================= Paths ====================================== # The home path for the packetbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: # The configuration path for the packetbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} # The data path for the packetbeat installation. This is the default base path # for all the files in which packetbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data # The logs path for a packetbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs #============================== Dashboards ===================================== {{ elk_macros.setup_dashboards('packetbeat') }} #=============================== Template ====================================== {{ elk_macros.setup_template('packetbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} #================================ Kibana ======================================= {% if (groups['kibana'] | length) > 0 %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} #================================ Logging ====================================== {{ elk_macros.beat_logging('packetbeat') }} #============================== Xpack Monitoring =============================== {{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} #================================ HTTP Endpoint ================================ # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false # The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066