Update ElasticSearch and Kibana to 5.4.2

This commit attempts to update logging stack to newer versions. The
images used are official docker.io ones. Update of ES required a newer
version of fluentd as well.

Upgrading fluentd resulted in hundereds of MBs of logs being printed in
fluentd docker right after it was up and running. The root cause of this
was multiline parser plugin which created infinite loops (pay attention
to nested 'got incomplete line before first line' log):
https://pastebin.com/1zsyEDN8 .
I've removed the part that caused this error as parsing multiline
tracebacks from Openstack services will be handled when the following
PS is merged:
https://review.openstack.org/#/c/490036/

Change-Id: I3e2c5c817f17dd344b56b717251afadc03e3f186
Co-Authored-By: Mateusz Blaszkowski <mateusz.blaszkowski@intel.com>
This commit is contained in:
Michał Dulko 2017-07-25 12:27:51 +02:00 committed by Mateusz Blaszkowski
parent 877ee5a961
commit 56e6beed47
16 changed files with 67 additions and 137 deletions

View File

@ -19,7 +19,8 @@ set -ex
COMMAND="${@:-start}"
function start () {
exec /usr/share/elasticsearch/bin/elasticsearch
ulimit -l unlimited
exec /docker-entrypoint.sh elasticsearch
}
function stop () {

View File

@ -19,7 +19,7 @@ limitations under the License.
set -ex
function create_index () {
index_result=$(curl -XPUT "$ELASTICSEARCH_ENDPOINT/test_index?pretty" -H 'Content-Type: application/json' -d'
index_result=$(curl -XPUT "${ELASTICSEARCH_ENDPOINT}test_index?pretty" -H 'Content-Type: application/json' -d'
{
"settings" : {
"index" : {
@ -39,7 +39,7 @@ function create_index () {
}
function insert_test_data () {
insert_result=$(curl -XPUT "$ELASTICSEARCH_ENDPOINT/sample_index/sample_type/123/_create?pretty" -H 'Content-Type: application/json' -d'
insert_result=$(curl -XPUT "${ELASTICSEARCH_ENDPOINT}sample_index/sample_type/123/_create?pretty" -H 'Content-Type: application/json' -d'
{
"name" : "Elasticsearch",
"message" : "Test data text entry"
@ -56,7 +56,7 @@ function insert_test_data () {
function check_hits () {
total_hits=$(curl -XGET "$ELASTICSEARCH_ENDPOINT/_search?pretty" -H 'Content-Type: application/json' -d'
total_hits=$(curl -XGET "${ELASTICSEARCH_ENDPOINT}_search?pretty" -H 'Content-Type: application/json' -d'
{
"query" : {
"bool": {

View File

@ -21,5 +21,5 @@ metadata:
data:
elasticsearch.yml: |+
{{- tuple .Values.conf.elasticsearch "etc/_elasticsearch.yml.tpl" . | include "helm-toolkit.utils.configmap_templater" }}
logging.yml: |+
{{- tuple .Values.conf.elasticsearch "etc/_logging.yml.tpl" . | include "helm-toolkit.utils.configmap_templater" }}
log4j2.properties: |+
{{- tuple .Values.conf.elasticsearch "etc/_log4j2.properties.tpl" . | include "helm-toolkit.utils.configmap_templater" }}

View File

@ -39,6 +39,7 @@ spec:
- name: memory-map-increase
securityContext:
privileged: true
runAsUser: 0
image: {{ .Values.images.memory_init }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
@ -100,10 +101,10 @@ spec:
subPath: elasticsearch.yml
readOnly: true
- name: elastic-etc
mountPath: /usr/share/elasticsearch/config/logging.yml
subPath: logging.yml
mountPath: /usr/share/elasticsearch/config/log4j2.properties
subPath: log4j2.properties
readOnly: true
- mountPath: /data
- mountPath: /var/lib/elasticsearch/data
name: storage
volumes:
- name: elastic-bin

View File

@ -39,6 +39,7 @@ spec:
- name: memory-map-increase
securityContext:
privileged: true
runAsUser: 0
image: {{ .Values.images.memory_init }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
@ -103,7 +104,11 @@ spec:
mountPath: /usr/share/elasticsearch/config/logging.yml
subPath: logging.yml
readOnly: true
- mountPath: /data
- name: elastic-etc
mountPath: /usr/share/elasticsearch/config/log4j2.properties
subPath: log4j2.properties
readOnly: true
- mountPath: /var/lib/elasticsearch/data
name: storage
volumes:
- name: elastic-bin

View File

@ -21,6 +21,7 @@ node:
master: ${NODE_MASTER}
data: ${NODE_DATA}
name: ${NODE_NAME}
max_local_storage_nodes: {{ .Values.pod.replicas.data }}
network.host: {{ .Values.conf.elasticsearch.network.host }}

View File

@ -0,0 +1,34 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
status = error
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
rootLogger.level = info
rootLogger.appenderRef.console.ref = console

View File

@ -1,95 +0,0 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console, file
logger:
# log action execution errors for easier debugging
action: DEBUG
# deprecation logging, turn to DEBUG to see them
deprecation: INFO, deprecation_log_file
org.apache.http: INFO
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
# peer shard recovery
#indices.recovery: DEBUG
# discovery
#discovery: TRACE
index.search.slowlog: TRACE, index_search_slow_log_file
index.indexing.slowlog: TRACE, index_indexing_slow_log_file
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
deprecation: false
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
# Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
# For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
#file:
#type: extrasRollingFile
#file: ${path.logs}/${cluster.name}.log
#rollingPolicy: timeBased
#rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
#layout:
#type: pattern
#conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
deprecation_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_deprecation.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_search_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_indexing_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"

View File

@ -36,6 +36,7 @@ spec:
- name: memory-map-increase
securityContext:
privileged: true
runAsUser: 0
image: {{ .Values.images.memory_init }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.data | include "helm-toolkit.snippets.kubernetes_resources" | indent 8 }}
@ -95,8 +96,8 @@ spec:
subPath: elasticsearch.yml
readOnly: true
- name: elastic-etc
mountPath: /usr/share/elasticsearch/config/logging.yml
subPath: logging.yml
mountPath: /usr/share/elasticsearch/config/log4j2.properties
subPath: log4j2.properties
readOnly: true
- name: storage
mountPath: /var/lib/elasticsearch/data

View File

@ -35,7 +35,7 @@ conf:
min_masters: 2
env:
java_opts: "-Xms256m -Xmx256m"
logging:
log4j2:
override:
prefix:
append:
@ -60,9 +60,9 @@ endpoints:
default: 9200
images:
memory_init: docker.io/kolla/ubuntu-source-elasticsearch:4.0.0
elasticsearch: docker.io/kolla/ubuntu-source-elasticsearch:4.0.0
helm_tests: docker.io/kolla/ubuntu-source-elasticsearch:4.0.0
memory_init: docker.io/kolla/ubuntu-source-kolla-toolbox:4.0.0
elasticsearch: docker.io/elasticsearch:5.4.2
helm_tests: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
pull_policy: "IfNotPresent"
pod:

View File

@ -21,7 +21,7 @@ set -ex
# Tests whether fluentd has successfully indexed data into Elasticsearch under
# the logstash-* index via the fluent-elasticsearch plugin
function check_logstash_index () {
total_hits=$(curl -XGET "$ELASTICSEARCH_ENDPOINT/logstash-*/fluentd/_search?pretty" -H 'Content-Type: application/json' \
total_hits=$(curl -XGET "${ELASTICSEARCH_ENDPOINT}logstash-*/fluentd/_search?pretty" -H 'Content-Type: application/json' \
| python -c "import sys, json; print json.load(sys.stdin)['hits']['total']")
if [ "$total_hits" -gt 0 ]; then
echo "PASS: Successful hits on logstash-* index, provided by fluentd!"
@ -34,7 +34,7 @@ function check_logstash_index () {
# Tests whether fluentd has successfully tagged data with the kubernetes.var.*
# prefix via the fluent-kubernetes plugin
function check_kubernetes_tag () {
total_hits=$(curl -XGET "$ELASTICSEARCH_ENDPOINT/logstash-*/fluentd/_search?q=tag:kubernetes.var.*" -H 'Content-Type: application/json' \
total_hits=$(curl -XGET "${ELASTICSEARCH_ENDPOINT}logstash-*/fluentd/_search?q=tag:kubernetes.var.*" -H 'Content-Type: application/json' \
| python -c "import sys, json; print json.load(sys.stdin)['hits']['total']")
if [ "$total_hits" -gt 0 ]; then
echo "PASS: Successful hits on logstash-* index, provided by fluentd!"

View File

@ -101,6 +101,7 @@
# Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
<source>
type tail
path /var/lib/docker/containers/*/*-json.log
@ -111,25 +112,6 @@
read_from_head true
</source>
# Used to scan multiple lines for Python error stacktraces. Tag with stack.*
<source>
type tail
path /var/lib/docker/containers/*/*-json.log
pos_file stack.pos
time_format %Y-%m-%dT%H:%M:%S
tag stack.*
multiline_flush_interval 1s
format multiline
format_firstline /ERROR/
format1 /.*/ERROR/(?<log>.*)$/
key_name log
read_from_head true
</source>
<filter stack.**>
type kubernetes_metadata
</filter>
<filter kubernetes.**>
type kubernetes_metadata
</filter>

View File

@ -17,7 +17,7 @@
# Declare variables to be passed into your templates.
images:
fluentd: gcr.io/google_containers/fluentd-elasticsearch:1.11
fluentd: gcr.io/google_containers/fluentd-elasticsearch:1.23
helm_tests: docker.io/kolla/ubuntu-source-elasticsearch:3.0.3
pull_policy: IfNotPresent

View File

@ -19,7 +19,7 @@ set -ex
COMMAND="${@:-start}"
function start () {
exec /opt/kibana/bin/kibana
exec kibana
}
function stop () {

View File

@ -47,9 +47,9 @@ spec:
subPath: kibana.sh
readOnly: true
- name: etckibana
mountPath: /opt/kibana/config
mountPath: /etc/kibana
- name: kibana-etc
mountPath: /opt/kibana/config/kibana.yml
mountPath: /etc/kibana/kibana.yml
subPath: kibana.yml
readOnly: true
volumes:

View File

@ -17,7 +17,7 @@ labels:
node_selector_value:
images:
kibana: 'docker.io/kolla/ubuntu-source-kibana:3.0.3'
kibana: 'docker.io/kibana:5.4.2'
pull_policy: IfNotPresent
conf: