From ce0e62584f57e84b3d1c1abfa17b9b7d6c68ba8c Mon Sep 17 00:00:00 2001 From: "arseni.lipinski" Date: Tue, 3 Dec 2019 16:57:53 +0100 Subject: [PATCH] Upgrade Elkstack in new API The commit contains upgrade of Elk components, default index pattern creation in new API. Story: 2006376 Task: 38125 Depends-On: https://review.opendev.org/#/c/679781 Change-Id: Ib0b966c0d7db993802b5372156c41b3ebdf1a77f --- .../files/elasticsearch/elasticsearch.yml | 372 +++--------------- devstack/files/kibana/kibana.yml | 132 ++++--- devstack/files/monasca-agent/elastic.yaml | 6 +- devstack/files/monasca-agent/http_check.yaml | 10 + devstack/files/monasca-log-agent/agent.conf | 7 +- .../monasca-log-metrics/log-metrics.conf | 28 +- .../monasca-log-persister/persister.conf | 18 +- .../monasca-log-transformer/transformer.conf | 51 +-- devstack/lib/monasca-log.sh | 105 +++-- devstack/settings | 12 +- ...arch-cluster-upgrade-4b7bdc9c17e0169f.yaml | 5 + 11 files changed, 255 insertions(+), 491 deletions(-) create mode 100644 releasenotes/notes/elasticsearch-cluster-upgrade-4b7bdc9c17e0169f.yaml diff --git a/devstack/files/elasticsearch/elasticsearch.yml b/devstack/files/elasticsearch/elasticsearch.yml index 6d013bb8f..cb72492ec 100644 --- a/devstack/files/elasticsearch/elasticsearch.yml +++ b/devstack/files/elasticsearch/elasticsearch.yml @@ -1,360 +1,88 @@ -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at . +# ======================== Elasticsearch Configuration ========================= # -# The installation procedure is covered at -# . +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. # -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. # -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: +# Please consult the documentation for further information on configuration options: +# https://www.elastic.co/guide/en/elasticsearch/reference/index.html # -#node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. +# ---------------------------------- Cluster ----------------------------------- +# +# Use a descriptive name for your cluster: # cluster.name: monasca_elastic - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: # -node.name: "devstack" - -# Allow this node to be eligible as a master node (enabled by default): -node.master: true - -# Allow this node to store data (enabled by default) -node.data: true - -# You can exploit these settings to design advanced cluster topologies. +# ------------------------------------ Node ------------------------------------ # -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. +# Use a descriptive name for the node: # -#node.master: false -#node.data: true +#node.name: node-1 # -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. +# Add custom attributes to the node: # -#node.master: true -#node.data: false +#node.attr.rack: r1 # -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) +# ----------------------------------- Paths ------------------------------------ # -#node.master: false -#node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as , -# , -# and -# to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: +# Path to directory where to store the data (separate multiple locations by comma): # -#node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -#node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. -# -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. -# -# See and -# -# for more information. - -# Set the number of shards (splits) of an index (5 by default): -# -#index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): -# -#index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: -# -#index.number_of_shards: 1 -#index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: -# -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. -# -# The "number_of_shards" is a one-time setting for an index. -# -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. -# -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API () to inspect -# the index status. - - -#################################### Paths #################################### - -# Path to directory where to store index data allocated for this node. path.data: %ES_DATA_DIR% - +# # Path to log files: +# path.logs: %ES_LOG_DIR% - -# Path to where plugins are installed: -#path.plugins: /path/to/plugins - -# Path to temporary files -#path.work: /path/to/work - -# Path to directory containing configuration (this file and logging.yml): -#path.conf: /path/to/conf - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. # -#plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. +# ----------------------------------- Memory ----------------------------------- # -# Set this property to true to lock the memory: +# Lock the memory on startup: # -#bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. +#bootstrap.memory_lock: true # -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): -network.bind_host: %ES_SERVICE_BIND_HOST% - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. -network.publish_host: %ES_SERVICE_PUBLISH_HOST% - -# Set a custom port for the node to node communication (9300 by default): -transport.tcp.port: %ES_SERVICE_PUBLISH_PORT% - -# Enable compression for all communication between nodes (disabled by default): +# Make sure that the heap size is set to about half the memory available +# on the system and that the owner of the process is allowed to use this +# limit. # -#transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: +# Elasticsearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# Set the bind address to a specific IP (IPv4 or IPv6): +# +network.host: %ES_SERVICE_BIND_HOST% +# +# Set a custom port for HTTP: # http.port: %ES_SERVICE_BIND_PORT% - -# Set a custom allowed content length: # -#http.max_content_length: 100mb - -# Disable HTTP completely: +# For more information, consult the network module documentation. # -#http.enabled: false - - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# There are several types of gateway implementations. For more information, see -# . - -# The default gateway type is the "local" gateway (recommended): +# --------------------------------- Discovery ---------------------------------- # -#gateway.type: local - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: +# Pass an initial list of hosts to perform discovery when this node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] # -#gateway.recover_after_nodes: 1 -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): +#discovery.seed_hosts: ["host1", "host2"] # -#gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): +# Bootstrap the cluster using an initial set of master-eligible nodes: # -#gateway.expected_nodes: 2 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: +cluster.initial_master_nodes: ["%ES_SERVICE_BIND_HOST%"] # -# 1. During the initial recovery +# For more information, consult the discovery and cluster formation module documentation. # -#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +# ---------------------------------- Gateway ----------------------------------- # -# 2. During adding/removing nodes, rebalancing, etc +# Block initial recovery after a full cluster restart until N nodes are started: # -#cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +#gateway.recover_after_nodes: 3 # -#indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: +# For more information, consult the gateway module documentation. # -#indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. This should be set to a quorum/majority of -# the master-eligible nodes in the cluster. +# ---------------------------------- Various ----------------------------------- # -discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: +# Require explicit names when deleting indices: # -#discovery.zen.ping.timeout: 3s - -# For more information, see -# - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. -# -# 1. Disable multicast discovery (enabled by default): -# -discovery.zen.ping.multicast.enabled: false -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: -# -# discovery.zen.ping.unicast.hosts: [127.0.0.1] - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. -# -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# -# -# See -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see . - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see . - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s - -################################## Security ################################ - -# Uncomment if you want to enable JSONP as a valid return transport on the -# http server. With this enabled, it may pose a security risk, so disabling -# it unless you need it is recommended (it is disabled by default). -# -#http.jsonp.enable: true +#action.destructive_requires_name: true \ No newline at end of file diff --git a/devstack/files/kibana/kibana.yml b/devstack/files/kibana/kibana.yml index 0f7b02b47..cf1544e29 100644 --- a/devstack/files/kibana/kibana.yml +++ b/devstack/files/kibana/kibana.yml @@ -1,78 +1,116 @@ -# Kibana is served by a back end server. This controls which port to use. +# Kibana is served by a back end server. This setting specifies the port to use. server.port: %KIBANA_SERVICE_PORT% -# The host to bind the server to. +# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values. +# The default is 'localhost', which usually means remote machines will not be able to connect. +# To allow connections from remote users, set this parameter to a non-loopback address. server.host: %KIBANA_SERVICE_HOST% -# If you are running kibana behind a proxy, and want to mount it at a path, -# specify that path here. The basePath can't end in a slash. -server.basePath: /dashboard/monitoring/logs_proxy +# Enables you to specify a path to mount Kibana at if you are running behind a proxy. +# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath +# from requests it receives, and to prevent a deprecation warning at startup. +# This setting cannot end in a slash. +server.basePath: %KIBANA_SERVER_BASE_PATH% -# The Elasticsearch instance to use for all your queries. -elasticsearch.url: http://%ES_SERVICE_BIND_HOST%:%ES_SERVICE_BIND_PORT% +# Specifies whether Kibana should rewrite requests that are prefixed with +# `server.basePath` or require that they are rewritten by your reverse proxy. +# This setting was effectively always `false` before Kibana 6.3 and will +# default to `true` starting in Kibana 7.0. +server.rewriteBasePath: false -# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false, -# then the host you use to connect to *this* Kibana instance will be sent. -elasticsearch.preserveHost: True +# The maximum payload size in bytes for incoming server requests. +#server.maxPayloadBytes: 1048576 -# Kibana uses an index in Elasticsearch to store saved searches, visualizations -# and dashboards. It will create a new index if it doesn't already exist. +# The Kibana server's name. This is used for display purposes. +#server.name: "your-hostname" + +# The URLs of the Elasticsearch instances to use for all your queries. +elasticsearch.hosts: ["http://%ES_SERVICE_BIND_HOST%:%ES_SERVICE_BIND_PORT%"] + +# When this setting's value is true Kibana uses the hostname specified in the server.host +# setting. When the value of this setting is false, Kibana uses the hostname of the host +# that connects to this Kibana instance. +#elasticsearch.preserveHost: true + +# Kibana uses an index in Elasticsearch to store saved searches, visualizations and +# dashboards. Kibana creates a new index if the index doesn't already exist. kibana.index: ".kibana" # The default application to load. kibana.defaultAppId: "discover" -# If your Elasticsearch is protected with basic auth, these are the user credentials -# used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana -# users will still need to authenticate with Elasticsearch (which is proxied through -# the Kibana server) -# elasticsearch.username: "user" -# elasticsearch.password: "pass" +# If your Elasticsearch is protected with basic authentication, these settings provide +# the username and password that the Kibana server uses to perform maintenance on the Kibana +# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which +# is proxied through the Kibana server. +#elasticsearch.username: "kibana" +#elasticsearch.password: "pass" -# SSL for outgoing requests from the Kibana Server to the browser (PEM formatted) -# server.ssl.cert: /path/to/your/server.crt -# server.ssl.key: /path/to/your/server.key +# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively. +# These settings enable SSL for outgoing requests from the Kibana server to the browser. +#server.ssl.enabled: false +#server.ssl.certificate: /path/to/your/server.crt +#server.ssl.key: /path/to/your/server.key -# Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted) -# elasticsearch.ssl.cert: /path/to/your/client.crt -# elasticsearch.ssl.key: /path/to/your/client.key +# Optional settings that provide the paths to the PEM-format SSL certificate and key files. +# These files validate that your Elasticsearch backend uses the same key files. +#elasticsearch.ssl.certificate: /path/to/your/client.crt +#elasticsearch.ssl.key: /path/to/your/client.key -# If you need to provide a CA certificate for your Elasticsearch instance, put -# the path of the pem file here. -# elasticsearch.ssl.ca: /path/to/your/CA.pem +# Optional setting that enables you to specify a path to the PEM file for the certificate +# authority for your Elasticsearch instance. +#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ] -# Set to false to have a complete disregard for the validity of the SSL -# certificate. -# elasticsearch.ssl.verify: true +# To disregard the validity of SSL certificates, change this setting's value to 'none'. +#elasticsearch.ssl.verificationMode: full -# Time in milliseconds to wait for elasticsearch to respond to pings, defaults to -# request_timeout setting +# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of +# the elasticsearch.requestTimeout setting. elasticsearch.pingTimeout: 1500 -# Time in milliseconds to wait for responses from the back end or elasticsearch. -# This must be > 0 +# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value +# must be a positive integer. elasticsearch.requestTimeout: 300000 -# Time in milliseconds for Elasticsearch to wait for responses from shards. -# Set to 0 to disable. +# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side +# headers, set this value to [] (an empty list). +#elasticsearch.requestHeadersWhitelist: [ authorization ] + +# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten +# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration. +#elasticsearch.customHeaders: {} + +# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable. elasticsearch.shardTimeout: 0 -# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying +# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying. elasticsearch.startupTimeout: 5000 -# Set the path to where you would like the process id file to be created. -# pid.file: /var/run/kibana.pid +# Logs queries sent to Elasticsearch. Requires logging.verbose set to true. +#elasticsearch.logQueries: false -# Set this to true to suppress all logging output. +# Specifies the path where Kibana creates the process ID file. +#pid.file: /var/run/kibana.pid + +# Enables you specify a file where Kibana stores log output. +#logging.dest: stdout + +# Set the value of this setting to true to suppress all logging output. logging.silent: false -# Set this to true to suppress all logging output except for error messages. + +# Set the value of this setting to true to suppress all logging output other than error messages. logging.quiet: false -# Set this to true to log all events, including system usage information and all requests. + +# Set the value of this setting to true to log all events, including system usage information +# and all requests. logging.verbose: true -# monasca-kibana-plugin configuration -monasca-kibana-plugin.auth_uri: %KEYSTONE_AUTH_URI% -monasca-kibana-plugin.enabled: True -monasca-kibana-plugin.cookie.isSecure: False +# Set the interval in milliseconds to sample system and process performance +# metrics. Minimum is 100ms. Defaults to 5000. +#ops.interval: 5000 -optimize.useBundleCache: False +# Specifies locale to be used for all localizable strings, dates and number formats. +# Supported languages are the following: English - en , by default , Chinese - zh-CN . +#i18n.locale: "en" + +optimize.useBundleCache: False \ No newline at end of file diff --git a/devstack/files/monasca-agent/elastic.yaml b/devstack/files/monasca-agent/elastic.yaml index 8b0149310..154d20ab5 100644 --- a/devstack/files/monasca-agent/elastic.yaml +++ b/devstack/files/monasca-agent/elastic.yaml @@ -1,3 +1,7 @@ init_config: instances: - - url: http://{{IP}}:9200 +- url: http://{{IP}}:9200 + cluster_stats: true + pshard_stats: true + index_stats: true + pending_task_stats: true \ No newline at end of file diff --git a/devstack/files/monasca-agent/http_check.yaml b/devstack/files/monasca-agent/http_check.yaml index 8a31e06f7..c4da5c431 100644 --- a/devstack/files/monasca-agent/http_check.yaml +++ b/devstack/files/monasca-agent/http_check.yaml @@ -15,3 +15,13 @@ instances: service: influxdb timeout: 3 url: http://127.0.0.1:8086/ping + - name: elasticsearch + dimensions: + service: elasticsearch + timeout: 3 + url: http://{{IP}}:9200/_cat/health +- name: kibana + dimensions: + service: kibana + timeout: 3 + url: http://{{IP}}:5601/api/status diff --git a/devstack/files/monasca-log-agent/agent.conf b/devstack/files/monasca-log-agent/agent.conf index c5091b948..f70aedd29 100644 --- a/devstack/files/monasca-log-agent/agent.conf +++ b/devstack/files/monasca-log-agent/agent.conf @@ -19,12 +19,7 @@ input { add_field => { "dimensions" => { "service" => "system" }} path => "/var/log/syslog" tags => ["syslog"] - } -} - -filter { - if "syslog" in [tags] { - multiline { + codec => multiline { negate => "true" pattern => "^%{SYSLOGTIMESTAMP}" what => "previous" diff --git a/devstack/files/monasca-log-metrics/log-metrics.conf b/devstack/files/monasca-log-metrics/log-metrics.conf index bb8cd36df..cf7447345 100644 --- a/devstack/files/monasca-log-metrics/log-metrics.conf +++ b/devstack/files/monasca-log-metrics/log-metrics.conf @@ -15,13 +15,14 @@ input { - kafka { - zk_connect => "127.0.0.1:2181" - topic_id => "transformed-log" - group_id => "log-metric" - consumer_id => "monasca_log_metrics" - consumer_threads => "4" - } + kafka { + bootstrap_servers => "%KAFKA_SERVICE_HOST%:%KAFKA_SERVICE_PORT%" + topics => ["transformed-log"] + group_id => "log-metric" + client_id => "monasca_log_metrics" + consumer_threads => 4 + codec => json + } } @@ -33,8 +34,8 @@ filter { } else { ruby { code => " - log_level = event['log']['level'].downcase - event['log']['level'] = log_level + log_level = event.get('[log][level]').downcase + event.set('[log][level]', log_level) " } } @@ -46,7 +47,7 @@ filter { ruby { code => " - log_level = event['log']['level'].downcase + log_level = event.get('[log][level]').downcase log_ts = Time.now.to_f * 1000.0 # metric name @@ -57,10 +58,10 @@ filter { metric['name'] = metric_name metric['timestamp'] = log_ts metric['value'] = 1 - metric['dimensions'] = event['log']['dimensions'] + metric['dimensions'] = event.get('[log][dimensions]') metric['value_meta'] = {} - event['metric'] = metric.to_hash + event.set('[metric]',metric.to_hash) " } @@ -73,9 +74,10 @@ filter { output { kafka { - bootstrap_servers => "127.0.0.1:9092" + bootstrap_servers => "%KAFKA_SERVICE_HOST%:%KAFKA_SERVICE_PORT%" topic_id => "metrics" client_id => "monasca_log_metrics" compression_type => "none" + codec => json } } diff --git a/devstack/files/monasca-log-persister/persister.conf b/devstack/files/monasca-log-persister/persister.conf index c0f581e37..3ab01a5c0 100644 --- a/devstack/files/monasca-log-persister/persister.conf +++ b/devstack/files/monasca-log-persister/persister.conf @@ -16,8 +16,9 @@ # input { kafka { - zk_connect => "127.0.0.1:2181" - topic_id => "transformed-log" + bootstrap_servers => "%KAFKA_SERVICE_HOST%:%KAFKA_SERVICE_PORT%" + codec => json + topics => ["transformed-log"] group_id => "logstash-persister" } } @@ -42,9 +43,9 @@ filter { if "dimensions" in [log] { ruby { code => " - fieldHash = event['log']['dimensions'] + fieldHash = event.get('[log][dimensions]') fieldHash.each do |key, value| - event[key] = value + event.set(key,value) end " } @@ -52,10 +53,10 @@ filter { mutate { add_field => { - message => "%{[log][message]}" - log_level => "%{[log][level]}" - tenant => "%{[meta][tenantId]}" - region => "%{[meta][region]}" + "message" => "%{[log][message]}" + "log_level" => "%{[log][level]}" + "tenant" => "%{[meta][tenantId]}" + "region" => "%{[meta][region]}" } remove_field => ["@version", "host", "type", "tags" ,"_index_date", "meta", "log"] } @@ -66,6 +67,5 @@ output { index => "logs-%{tenant}-%{index_date}" document_type => "log" hosts => ["%ES_SERVICE_BIND_HOST%"] - flush_size => 500 } } diff --git a/devstack/files/monasca-log-transformer/transformer.conf b/devstack/files/monasca-log-transformer/transformer.conf index 0e2f7c7f2..c284e3ee0 100644 --- a/devstack/files/monasca-log-transformer/transformer.conf +++ b/devstack/files/monasca-log-transformer/transformer.conf @@ -16,19 +16,20 @@ # input { kafka { - zk_connect => "127.0.0.1:2181" - topic_id => "log" + bootstrap_servers => "%KAFKA_SERVICE_HOST%:%KAFKA_SERVICE_PORT%" + topics => ["log"] group_id => "transformer-logstash-consumer" + codec => json } } filter { ruby { - code => "event['message_tmp'] = event['log']['message'][0..49]" + code => 'event.set("message_tmp", event.get("[log][message]")[0..49])' } grok { match => { - "[message_tmp]" => "(?i)(?AUDIT|CRITICAL|DEBUG|INFO|TRACE|ERR(OR)?|WARN(ING)?)|\"level\":\s?(?\d{2})" + "message_tmp" => "(?i)(?AUDIT|CRITICAL|DEBUG|INFO|TRACE|ERR(OR)?|WARN(ING)?)|\"level\":\s?(?\d{2})" } } if ! [log_level] { @@ -39,49 +40,49 @@ filter { } } ruby { - init => " + init => ' LOG_LEVELS_MAP = { # SYSLOG - 'warn' => :Warning, - 'err' => :Error, + "warn" => "Warning", + "err" => "Error", # Bunyan errcodes - '10' => :Trace, - '20' => :Debug, - '30' => :Info, - '40' => :Warning, - '50' => :Error, - '60' => :Fatal + "10" => "Trace", + "20" => "Debug", + "30" => "Info", + "40" => "Warning", + "50" => "Error", + "60" => "Fatal" } - " - code => " - if event['log_level'] + ' + code => ' + if event.get("log_level") # keep original value - log_level = event['log_level'].downcase + log_level = event.get("log_level").downcase if LOG_LEVELS_MAP.has_key?(log_level) - event['log_level_original'] = event['log_level'] - event['log_level'] = LOG_LEVELS_MAP[log_level] + event.set("log_level_original",event.get("log_level")) + event.set("log_level",LOG_LEVELS_MAP[log_level]) else - event['log_level'] = log_level.capitalize + event.set("log_level",log_level.capitalize) end else - event['log_level'] = 'Unknown' + event.set("log_level","Unknown") end - " + ' } mutate { add_field => { "[log][level]" => "%{log_level}" } - # remove temporary fields - remove_field => ["log_level", "message_tmp"] + remove_field => ["message","log_level", "message_tmp"] } } output { kafka { + codec => json bootstrap_servers => "%KAFKA_SERVICE_HOST%:%KAFKA_SERVICE_PORT%" topic_id => "transformed-log" } -} +} \ No newline at end of file diff --git a/devstack/lib/monasca-log.sh b/devstack/lib/monasca-log.sh index 49eeddef8..cd000b014 100644 --- a/devstack/lib/monasca-log.sh +++ b/devstack/lib/monasca-log.sh @@ -38,7 +38,7 @@ KIBANA_DIR=$DEST/kibana KIBANA_CFG_DIR=$KIBANA_DIR/config LOGSTASH_DIR=$DEST/logstash - +LOGSTASH_DATA_DIR=$DEST/logstash-data ES_SERVICE_BIND_HOST=${ES_SERVICE_BIND_HOST:-${SERVICE_HOST}} ES_SERVICE_BIND_PORT=${ES_SERVICE_BIND_PORT:-9200} @@ -49,7 +49,14 @@ KIBANA_SERVICE_HOST=${KIBANA_SERVICE_HOST:-${SERVICE_HOST}} KIBANA_SERVICE_PORT=${KIBANA_SERVICE_PORT:-5601} KIBANA_SERVER_BASE_PATH=${KIBANA_SERVER_BASE_PATH:-"/dashboard/monitoring/logs_proxy"} - +# Settings needed for Elasticsearch +# Elasticsearch uses a lot of file descriptors or file handles. +# Increase the limit on the number of open files descriptors for the user running Elasticsearch to 65,536 or higher. +LIMIT_NOFILE=${LIMIT_NOFILE:-65535} +# Elasticsearch uses a mmapfs directory by default to store its indices. +# The default operating system limits on mmap counts is likely to be too low, +# which may result in out of memory exceptions, increase to at least 262144. +VM_MAX_MAP_COUNT=${VM_MAX_MAP_COUNT:-262144} MONASCA_LOG_API_BASE_URI=https://${MONASCA_API_BASE_URI}/logs @@ -79,7 +86,6 @@ function pre_install_logs_services { } function install_monasca_log { - build_kibana_plugin install_log_agent if $USE_OLD_LOG_API = true; then install_old_log_api @@ -107,7 +113,6 @@ function configure_monasca_log { configure_kafka configure_elasticsearch configure_kibana - install_kibana_plugin if $USE_OLD_LOG_API = true; then configure_old_monasca_log_api fi @@ -258,6 +263,7 @@ function start_monasca_log { start_monasca_log_api fi start_monasca_log_agent + create_default_index_pattern } function clean_monasca_log { @@ -287,8 +293,8 @@ function install_logstash { if is_logstash_required; then echo_summary "Installing Logstash ${LOGSTASH_VERSION}" - local logstash_tarball=logstash-${LOGSTASH_VERSION}.tar.gz - local logstash_url=http://download.elastic.co/logstash/logstash/${logstash_tarball} + local logstash_tarball=logstash-oss-${LOGSTASH_VERSION}.tar.gz + local logstash_url=https://artifacts.elastic.co/downloads/logstash/${logstash_tarball} local logstash_dest logstash_dest=`get_extra_file ${logstash_url}` @@ -297,6 +303,9 @@ function install_logstash { sudo chown -R $STACK_USER $DEST/logstash-${LOGSTASH_VERSION} ln -sf $DEST/logstash-${LOGSTASH_VERSION} $LOGSTASH_DIR + + sudo mkdir -p $LOGSTASH_DATA_DIR + sudo chown $STACK_USER:monasca $LOGSTASH_DATA_DIR fi } @@ -314,8 +323,8 @@ function install_elasticsearch { if is_service_enabled elasticsearch; then echo_summary "Installing ElasticSearch ${ELASTICSEARCH_VERSION}" - local es_tarball=elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz - local es_url=https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/${ELASTICSEARCH_VERSION}/${es_tarball} + local es_tarball=elasticsearch-oss-${ELASTICSEARCH_VERSION}-linux-x86_64.tar.gz + local es_url=https://artifacts.elastic.co/downloads/elasticsearch/${es_tarball} local es_dest es_dest=`get_extra_file ${es_url}` @@ -344,13 +353,17 @@ function configure_elasticsearch { sudo sed -e " s|%ES_SERVICE_BIND_HOST%|$ES_SERVICE_BIND_HOST|g; s|%ES_SERVICE_BIND_PORT%|$ES_SERVICE_BIND_PORT|g; - s|%ES_SERVICE_PUBLISH_HOST%|$ES_SERVICE_PUBLISH_HOST|g; - s|%ES_SERVICE_PUBLISH_PORT%|$ES_SERVICE_PUBLISH_PORT|g; s|%ES_DATA_DIR%|$ELASTICSEARCH_DATA_DIR|g; s|%ES_LOG_DIR%|$ELASTICSEARCH_LOG_DIR|g; " -i $ELASTICSEARCH_CFG_DIR/elasticsearch.yml ln -sf $ELASTICSEARCH_CFG_DIR/elasticsearch.yml $GATE_CONFIGURATION_DIR/elasticsearch.yml + + echo "[Service]" | sudo tee --append /etc/systemd/system/devstack\@elasticsearch.service > /dev/null + echo "LimitNOFILE=$LIMIT_NOFILE" | sudo tee --append /etc/systemd/system/devstack\@elasticsearch.service > /dev/null + + echo "vm.max_map_count=$VM_MAX_MAP_COUNT" | sudo tee --append /etc/sysctl.conf > /dev/null + sudo sysctl -w vm.max_map_count=$VM_MAX_MAP_COUNT fi } @@ -380,9 +393,8 @@ function install_kibana { if is_service_enabled kibana; then echo_summary "Installing Kibana ${KIBANA_VERSION}" - local kibana_tarball=kibana-${KIBANA_VERSION}.tar.gz - local kibana_tarball_url=http://download.elastic.co/kibana/kibana/${kibana_tarball} - + local kibana_tarball=kibana-oss-${KIBANA_VERSION}.tar.gz + local kibana_tarball_url=https://artifacts.elastic.co/downloads/kibana/${kibana_tarball} local kibana_tarball_dest kibana_tarball_dest=`get_extra_file ${kibana_tarball_url}` @@ -406,9 +418,9 @@ function configure_kibana { sudo sed -e " s|%KIBANA_SERVICE_HOST%|$KIBANA_SERVICE_HOST|g; s|%KIBANA_SERVICE_PORT%|$KIBANA_SERVICE_PORT|g; - s|%KIBANA_SERVER_BASE_PATH%|$KIBANA_SERVER_BASE_PATH|g; s|%ES_SERVICE_BIND_HOST%|$ES_SERVICE_BIND_HOST|g; s|%ES_SERVICE_BIND_PORT%|$ES_SERVICE_BIND_PORT|g; + s|%KIBANA_SERVER_BASE_PATH%|$KIBANA_SERVER_BASE_PATH|g; s|%KEYSTONE_AUTH_URI%|$KEYSTONE_AUTH_URI|g; " -i $KIBANA_CFG_DIR/kibana.yml @@ -416,20 +428,6 @@ function configure_kibana { fi } -function install_kibana_plugin { - if is_service_enabled kibana; then - echo_summary "Install Kibana plugin" - - # note(trebskit) that needs to happen after kibana received - # its configuration otherwise the plugin fails to be installed - - local pkg=file://$DEST/monasca-kibana-plugin.tar.gz - - $KIBANA_DIR/bin/kibana plugin -r monasca-kibana-plugin - $KIBANA_DIR/bin/kibana plugin -i monasca-kibana-plugin -u $pkg - fi -} - function clean_kibana { if is_service_enabled kibana; then echo_summary "Cleaning Kibana ${KIBANA_VERSION}" @@ -443,12 +441,22 @@ function clean_kibana { function start_kibana { if is_service_enabled kibana; then echo_summary "Starting Kibana ${KIBANA_VERSION}" - local kibanaSleepTime=${KIBANA_SLEEP_TIME:-90} # kibana takes some time to load up + local kibanaSleepTime=${KIBANA_SLEEP_TIME:-120} # kibana takes some time to load up local kibanaCFG="$KIBANA_CFG_DIR/kibana.yml" run_process_sleep "kibana" "$KIBANA_DIR/bin/kibana --config $kibanaCFG" $kibanaSleepTime fi } +function create_default_index_pattern { + local tenant_id + tenant_id=`get_or_create_project "mini-mon"` + local index_pattern="logs-$tenant_id*" + + curl -XPOST "$KIBANA_SERVICE_HOST:$KIBANA_SERVICE_PORT/api/saved_objects/index-pattern/$index_pattern" \ + -H 'kbn-xsrf: true' -H "Content-Type: application/json" -d '{"attributes":{"title":"'$index_pattern'", "timeFieldName": "@timestamp"}}' + curl -X GET "$KIBANA_SERVICE_HOST:$KIBANA_SERVICE_PORT/api/saved_objects/index-pattern/$index_pattern" -H 'kbn-xsrf: true' +} + function configure_monasca_log_persister { if is_service_enabled monasca-log-persister; then echo_summary "Configuring monasca-log-persister" @@ -461,6 +469,8 @@ function configure_monasca_log_persister { sudo sed -e " s|%ES_SERVICE_BIND_HOST%|$ES_SERVICE_BIND_HOST|g; + s|%KAFKA_SERVICE_HOST%|$KAFKA_SERVICE_HOST|g; + s|%KAFKA_SERVICE_PORT%|$KAFKA_SERVICE_PORT|g; " -i $LOG_PERSISTER_DIR/persister.conf ln -sf $LOG_PERSISTER_DIR/persister.conf $GATE_CONFIGURATION_DIR/log-persister.conf @@ -478,7 +488,7 @@ function start_monasca_log_persister { if is_service_enabled monasca-log-persister; then echo_summary "Starting monasca-log-persister" local logstash="$LOGSTASH_DIR/bin/logstash" - run_process "monasca-log-persister" "$logstash -f $LOG_PERSISTER_DIR/persister.conf" + run_process "monasca-log-persister" "$logstash -f $LOG_PERSISTER_DIR/persister.conf --path.data $LOGSTASH_DATA_DIR/monasca-log-persister" fi } @@ -512,7 +522,7 @@ function start_monasca_log_transformer { if is_service_enabled monasca-log-transformer; then echo_summary "Starting monasca-log-transformer" local logstash="$LOGSTASH_DIR/bin/logstash" - run_process "monasca-log-transformer" "$logstash -f $LOG_TRANSFORMER_DIR/transformer.conf" + run_process "monasca-log-transformer" "$logstash -f $LOG_TRANSFORMER_DIR/transformer.conf --path.data $LOGSTASH_DATA_DIR/monasca-log-transformer" fi } @@ -546,15 +556,15 @@ function start_monasca_log_metrics { if is_service_enabled monasca-log-metrics; then echo_summary "Starting monasca-log-metrics" local logstash="$LOGSTASH_DIR/bin/logstash" - run_process "monasca-log-metrics" "$logstash -f $LOG_METRICS_DIR/log-metrics.conf" + run_process "monasca-log-metrics" "$logstash -f $LOG_METRICS_DIR/log-metrics.conf --path.data $LOGSTASH_DATA_DIR/monasca-log-metrics" fi } function install_log_agent { if is_service_enabled monasca-log-agent; then - echo_summary "Installing monasca-log-agent [monasca-output-plugin]" + echo_summary "Installing monasca-log-agent [logstash-output-monasca-plugin]" - $LOGSTASH_DIR/bin/plugin install --version \ + $LOGSTASH_DIR/bin/logstash-plugin install --version \ "${LOGSTASH_OUTPUT_MONASCA_VERSION}" logstash-output-monasca_log_api fi } @@ -633,7 +643,7 @@ function start_monasca_log_agent { if is_service_enabled monasca-log-agent; then echo_summary "Starting monasca-log-agent" local logstash="$LOGSTASH_DIR/bin/logstash" - run_process "monasca-log-agent" "$logstash -f $LOG_AGENT_DIR/agent.conf" "root" "root" + run_process "monasca-log-agent" "$logstash -f $LOG_AGENT_DIR/agent.conf --path.data $LOGSTASH_DATA_DIR/monasca-log-agent" "root" "root" fi } @@ -660,31 +670,6 @@ function clean_gate_config_holder { sudo rm -rf $GATE_CONFIGURATION_DIR || true } -function build_kibana_plugin { - if is_service_enabled kibana; then - echo "Building Kibana plugin" - - git_clone $MONASCA_KIBANA_PLUGIN_REPO $MONASCA_KIBANA_PLUGIN_DIR \ - $MONASCA_KIBANA_PLUGIN_BRANCH - - pushd $MONASCA_KIBANA_PLUGIN_DIR - - local monasca_kibana_plugin_version - monasca_kibana_plugin_version="$(python -c 'import json; \ - obj = json.load(open("package.json")); print obj["version"]')" - - npm install - npm run package - - local pkg=$MONASCA_KIBANA_PLUGIN_DIR/target/monasca-kibana-plugin-${monasca_kibana_plugin_version}.tar.gz - local easyPkg=$DEST/monasca-kibana-plugin.tar.gz - - ln -sf $pkg $easyPkg - - popd - fi -} - function configure_kafka { echo_summary "Configuring Kafka topics" /opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \ diff --git a/devstack/settings b/devstack/settings index 0191a1141..9da60f084 100644 --- a/devstack/settings +++ b/devstack/settings @@ -122,10 +122,10 @@ STORM_VERSION=${STORM_VERSION:-1.2.2} GO_VERSION=${GO_VERSION:-"1.7.1"} NODE_JS_VERSION=${NODE_JS_VERSION:-"4.0.0"} NVM_VERSION=${NVM_VERSION:-"0.32.1"} -KIBANA_VERSION=${KIBANA_VERSION:-4.6.3-linux-x86_64} -LOGSTASH_VERSION=${LOGSTASH_VERSION:-2.4.1} -ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-2.4.6} -LOGSTASH_OUTPUT_MONASCA_VERSION=${LOGSTASH_OUTPUT_MONASCA_VERSION:-1.0.4} +KIBANA_VERSION=${KIBANA_VERSION:-7.3.0-linux-x86_64} +LOGSTASH_VERSION=${LOGSTASH_VERSION:-7.3.0} +ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-7.3.0} +LOGSTASH_OUTPUT_MONASCA_VERSION=${LOGSTASH_OUTPUT_MONASCA_VERSION:-2.0.0} # Path settings MONASCA_BASE=${DEST} @@ -218,10 +218,6 @@ MONASCA_API_LOG_DIR=${MONASCA_API_LOG_DIR:-/var/log/monasca/api} MONASCA_API_USE_MOD_WSGI=${MONASCA_API_USE_MOD_WSGI:-$ENABLE_HTTPD_MOD_WSGI_SERVICES} MONASCA_API_UWSGI_CONF=${MONASCA_API_UWSGI_CONF:-$MONASCA_API_CONF_DIR/api-uwsgi.ini} -MONASCA_KIBANA_PLUGIN_REPO=${MONASCA_KIBANA_PLUGIN_REPO:-${GIT_BASE}/openstack/monasca-kibana-plugin.git} -MONASCA_KIBANA_PLUGIN_BRANCH=${MONASCA_KIBANA_PLUGIN_BRANCH:-master} -MONASCA_KIBANA_PLUGIN_DIR=${DEST}/monasca-kibana-plugin - # OLD LOG-API CONFIGURATION MONASCA_LOG_API_SERVICE_HOST=${MONASCA_LOG_API_SERVICE_HOST:-${SERVICE_HOST}} MONASCA_LOG_API_SERVICE_PORT=${MONASCA_LOG_API_SERVICE_PORT:-5607} diff --git a/releasenotes/notes/elasticsearch-cluster-upgrade-4b7bdc9c17e0169f.yaml b/releasenotes/notes/elasticsearch-cluster-upgrade-4b7bdc9c17e0169f.yaml new file mode 100644 index 000000000..2bb11523c --- /dev/null +++ b/releasenotes/notes/elasticsearch-cluster-upgrade-4b7bdc9c17e0169f.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Guide to upgrading to Elasticsearch 7.x can be found here: + https://www.elastic.co/guide/en/cloud/current/ec-upgrading-v7.html