Upgrade Apache Kafka to version 2.0.1

Upgrade Apache Kafka to current stable version 2.0.1.

Issues have been observed with legacy Kafka producer being used in
notification engine together with newer Kafka broker versions.

Following changes are included:

* Deprecated configuration option `advertised.host.name` is replaced
with `listeners`.
* Default `log4j.properties` is used.

Story: 2005624
Task: 30933
Change-Id: I898b511b2ab8f68e4850faab2098044cd3f94ee7
This commit is contained in:
Witek Bedyk 2019-07-15 22:27:30 +02:00
parent ec6daef9dd
commit dfd9423526
6 changed files with 68 additions and 122 deletions

View File

@ -1,72 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log4j.rootLogger=WARN, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
log4j.appender.kafkaAppender.MaxFileSize=50MB
log4j.appender.kafkaAppender.MaxBackupIndex=4
log4j.appender.kafkaAppender.File=/var/log/kafka/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
log4j.appender.stateChangeAppender.MaxFileSize=50MB
log4j.appender.stateChangeAppender.MaxBackupIndex=4
log4j.appender.stateChangeAppender.File=/var/log/kafka/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.controllerAppender.MaxFileSize=50MB
log4j.appender.controllerAppender.MaxBackupIndex=4
log4j.appender.controllerAppender.File=/var/log/kafka/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
# Turn on all our debugging info
#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
#log4j.logger.kafka.perf=DEBUG, kafkaAppender
#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
log4j.logger.kafka=WARN, kafkaAppender
# Tracing requests results in large logs
#log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
#log4j.appender.requestAppender.MaxFileSize=50MB
#log4j.appender.requestAppender.MaxBackupIndex=4
#log4j.appender.requestAppender.File=/var/log/kafka/kafka-request.log
#log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
#log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
#
#log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
#log4j.additivity.kafka.network.RequestChannel$=false
#
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
#log4j.logger.kafka.request.logger=TRACE, requestAppender
#log4j.additivity.kafka.request.logger=false
log4j.logger.kafka.controller=TRACE, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.state.change.logger=TRACE, stateChangeAppender
log4j.additivity.state.change.logger=false

View File

@ -1,19 +1,22 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
@ -22,32 +25,32 @@ broker.id=0
############################# Socket Server Settings #############################
# The port the socket server listens on
port=9092
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
#host.name=127.0.0.1
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
#advertised.listeners=PLAINTEXT://your.host.name:9092
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
#advertised.host.name=<hostname routable by clients>
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads handling network requests
num.network.threads=2
# The number of threads doing disk I/O
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=2
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=1048576
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=1048576
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
@ -59,9 +62,14 @@ socket.request.max.bytes=104857600
log.dirs=/var/kafka
auto.create.topics.enable=false
# The number of logical partitions per topic per server. More partitions allow greater parallelism
# for consumption, but also mean more files.
num.partitions=2
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
# Specify the message format version the broker will use to append messages to
# the logs. Once consumers are upgraded, one can change the message format and
@ -70,6 +78,13 @@ num.partitions=2
# (TODO) Use new message format after updating consumers
log.message.format.version=0.9.0.0
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
@ -77,7 +92,7 @@ log.message.format.version=0.9.0.0
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
@ -94,11 +109,11 @@ log.flush.interval.ms=1000
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=24
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
log.retention.bytes=104857600
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
@ -106,11 +121,7 @@ log.segment.bytes=104857600
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=60000
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
@ -122,7 +133,14 @@ log.cleaner.enable=false
zookeeper.connect=127.0.0.1:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=1000000
zookeeper.connection.timeout.ms=6000
#kafka delete will have no impact if this is not set true
delete.topic.enable=true
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0

View File

@ -91,7 +91,7 @@ configure_monasca-notification() {
dbDriver="monasca_notification.common.repositories.orm.orm_repo:OrmRepo"
fi
iniset "${MONASCA_NOTIFICATION_CONF}" kafka url ${DATABASE_HOST}:9092
iniset "${MONASCA_NOTIFICATION_CONF}" kafka url ${SERVICE_HOST}:9092
iniset "${MONASCA_NOTIFICATION_CONF}" database repo_driver ${dbDriver}
iniset "${MONASCA_NOTIFICATION_CONF}" email_notifier grafana_url ${SERVICE_HOST}:3000
iniset "${MONASCA_NOTIFICATION_CONF}" keystone auth_url ${SERVICE_HOST}/identity/v3

View File

@ -363,20 +363,14 @@ function install_kafka {
sudo ln -sf /opt/kafka/config /etc/kafka
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/kafka/log4j.properties /etc/kafka/log4j.properties
sudo chown kafka:kafka /etc/kafka/log4j.properties
sudo chmod 644 /etc/kafka/log4j.properties
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/kafka/server.properties /etc/kafka/server.properties
sudo chown kafka:kafka /etc/kafka/server.properties
sudo chmod 644 /etc/kafka/server.properties
# set kafka advertised broker address.
sudo sed -i "s/^#advertised.host.name=<hostname routable by clients>/#advertised.host.name=<hostname routable by clients>\nadvertised.host.name=${SERVICE_HOST}/"\
# set kafka listeners address.
sudo sed -i "s/listeners = PLAINTEXT:\/\/your.host.name:9092/listeners = PLAINTEXT:\/\/your.host.name:9092\nlisteners=PLAINTEXT:\/\/${SERVICE_HOST}:9092/"\
/etc/kafka/server.properties
sudo systemctl enable kafka

View File

@ -103,8 +103,8 @@ INFLUXDB_PYTHON_VERSION=${INFLUXDB_PYTHON_VERSION:-1.7.6}
VERTICA_VERSION=${VERTICA_VERSION:-8.0.0-0}
CASSANDRA_VERSION=${CASSANDRA_VERSION:-311x}
# Kafka deb consists of the version of scala plus the version of kafka
BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-1.0.1}
SCALA_VERSION=${SCALA_VERSION:-2.11}
BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-2.0.1}
SCALA_VERSION=${SCALA_VERSION:-2.12}
KAFKA_VERSION=${KAFKA_VERSION:-${SCALA_VERSION}-${BASE_KAFKA_VERSION}}
STORM_VERSION=${STORM_VERSION:-1.2.2}
GO_VERSION=${GO_VERSION:-"1.7.1"}

View File

@ -0,0 +1,6 @@
---
upgrade:
- |
Upgrade Apache Kafka from version 1.0.1 to 2.0.1. Please consult official
`upgrading notes <https://kafka.apache.org/20/documentation.html#upgrade>`_
for complete information on upgrading from previous versions.