Create devstack plugin for monasca-events

!Parts of code taken from monasca-api and monasca-log-api!

Also create a dummy implementataion for version endpoint
so that the deployment can be tested.

Story: 2001112
Task: 4808

Change-Id: Ic35f6388c34c4d4c1198401a1c6972b802a54c67
This commit is contained in:
Jakub Wachowski 2017-08-17 09:45:03 +02:00 committed by Adrian Czarnecki
parent 6149b0d436
commit c3a3b4b765
29 changed files with 1920 additions and 37 deletions

19
.zuul.yaml Normal file
View File

@ -0,0 +1,19 @@
- job:
name: monascaevents-tempest-events-base
parent: legacy-dsvm-base
run: playbooks/legacy/monasca-tempest-events-base/run
post-run: playbooks/legacy/monasca-tempest-events-base/post
timeout: 7800
required-projects:
- openstack-infra/devstack-gate
- openstack/monasca-events-api
- openstack/monasca-common
- openstack/monasca-persister
- openstack/python-monascaclient
- openstack/tempest
voting: false
- project:
name: openstack/monasca-events-api
check:
jobs:
- monascaevents-tempest-events-base

View File

@ -79,7 +79,9 @@ LOGDIR=$DEST/logs
LOG_COLOR=False
disable_all_services
enable_service zookeeper rabbit mysql key tempest horizon
enable_service rabbit mysql key tempest horizon
enable_plugin monasca-events-api https://git.openstack.org/openstack/monasca-events-api
' > local.conf
./stack.sh

View File

@ -0,0 +1,3 @@
jq # dist:xenial
python-dev # dist:xenial
build-essential # dist:xenial

View File

@ -0,0 +1,360 @@
##################### Elasticsearch Configuration Example #####################
# This file contains an overview of various configuration settings,
# targeted at operations staff. Application developers should
# consult the guide at <http://elasticsearch.org/guide>.
#
# The installation procedure is covered at
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
#
# Elasticsearch comes with reasonable defaults for most settings,
# so you can try it out without bothering with configuration.
#
# Most of the time, these defaults are just fine for running a production
# cluster. If you're fine-tuning your cluster, or wondering about the
# effect of certain configuration option, please _do ask_ on the
# mailing list or IRC channel [http://elasticsearch.org/community].
# Any element in the configuration can be replaced with environment variables
# by placing them in ${...} notation. For example:
#
#node.rack: ${RACK_ENV_VAR}
# For information on supported formats and syntax for the config file, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
################################### Cluster ###################################
# Cluster name identifies your cluster for auto-discovery. If you're running
# multiple clusters on the same network, make sure you're using unique names.
#
cluster.name: monasca_events_elastic
#################################### Node #####################################
# Node names are generated dynamically on startup, so you're relieved
# from configuring them manually. You can tie this node to a specific name:
#
node.name: "devstack"
# Allow this node to be eligible as a master node (enabled by default):
node.master: true
# Allow this node to store data (enabled by default)
node.data: true
# You can exploit these settings to design advanced cluster topologies.
#
# 1. You want this node to never become a master node, only to hold data.
# This will be the "workhorse" of your cluster.
#
#node.master: false
#node.data: true
#
# 2. You want this node to only serve as a master: to not store any data and
# to have free resources. This will be the "coordinator" of your cluster.
#
#node.master: true
#node.data: false
#
# 3. You want this node to be neither master nor data node, but
# to act as a "search load balancer" (fetching data from nodes,
# aggregating results, etc.)
#
#node.master: false
#node.data: false
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
# Node Info API [http://localhost:9200/_nodes] or GUI tools
# such as <http://www.elasticsearch.org/overview/marvel/>,
# <http://github.com/karmi/elasticsearch-paramedic>,
# <http://github.com/lukas-vlcek/bigdesk> and
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
# A node can have generic attributes associated with it, which can later be used
# for customized shard allocation filtering, or allocation awareness. An attribute
# is a simple key value pair, similar to node.key: value, here is an example:
#
#node.rack: rack314
# By default, multiple nodes are allowed to start from the same installation location
# to disable it, set the following:
#node.max_local_storage_nodes: 1
#################################### Index ####################################
# You can set a number of options (such as shard/replica options, mapping
# or analyzer definitions, translog settings, ...) for indices globally,
# in this file.
#
# Note, that it makes more sense to configure index settings specifically for
# a certain index, either when creating it or by using the index templates API.
#
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
# for more information.
# Set the number of shards (splits) of an index (5 by default):
#
#index.number_of_shards: 5
# Set the number of replicas (additional copies) of an index (1 by default):
#
#index.number_of_replicas: 1
# Note, that for development on a local machine, with small indices, it usually
# makes sense to "disable" the distributed features:
#
#index.number_of_shards: 1
#index.number_of_replicas: 0
# These settings directly affect the performance of index and search operations
# in your cluster. Assuming you have enough machines to hold shards and
# replicas, the rule of thumb is:
#
# 1. Having more *shards* enhances the _indexing_ performance and allows to
# _distribute_ a big index across machines.
# 2. Having more *replicas* enhances the _search_ performance and improves the
# cluster _availability_.
#
# The "number_of_shards" is a one-time setting for an index.
#
# The "number_of_replicas" can be increased or decreased anytime,
# by using the Index Update Settings API.
#
# Elasticsearch takes care about load balancing, relocating, gathering the
# results from nodes, etc. Experiment with different settings to fine-tune
# your setup.
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
# the index status.
#################################### Paths ####################################
# Path to directory where to store index data allocated for this node.
path.data: %ELASTICSEARCH_DATA_DIR%
# Path to log files:
path.logs: %ELASTICSEARCH_LOG_DIR%
# Path to where plugins are installed:
#path.plugins: /path/to/plugins
# Path to temporary files
#path.work: /path/to/work
# Path to directory containing configuration (this file and logging.yml):
#path.conf: /path/to/conf
#################################### Plugin ###################################
# If a plugin listed here is not installed for current node, the node will not start.
#
#plugin.mandatory: mapper-attachments,lang-groovy
################################### Memory ####################################
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
#
# Set this property to true to lock the memory:
#
#bootstrap.mlockall: true
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate
# for Elasticsearch, leaving enough memory for the operating system itself.
#
# You should also make sure that the Elasticsearch process is allowed to lock
# the memory, eg. by using `ulimit -l unlimited`.
############################## Network And HTTP ###############################
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
# communication. (the range means that if the port is busy, it will automatically
# try the next port).
# Set the bind address specifically (IPv4 or IPv6):
network.bind_host: %ELASTICSEARCH_BIND_HOST%
# Set the address other nodes will use to communicate with this node. If not
# set, it is automatically derived. It must point to an actual IP address.
network.publish_host: %ELASTICSEARCH_PUBLISH_HOST%
# Set a custom port for the node to node communication (9300 by default):
transport.tcp.port: %ELASTICSEARCH_PUBLISH_PORT%
# Enable compression for all communication between nodes (disabled by default):
#
#transport.tcp.compress: true
# Set a custom port to listen for HTTP traffic:
#
http.port: %ELASTICSEARCH_BIND_PORT%
# Set a custom allowed content length:
#
#http.max_content_length: 100mb
# Disable HTTP completely:
#
#http.enabled: false
################################### Gateway ###################################
# The gateway allows for persisting the cluster state between full cluster
# restarts. Every change to the state (such as adding an index) will be stored
# in the gateway, and when the cluster starts up for the first time,
# it will read its state from the gateway.
# There are several types of gateway implementations. For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
# The default gateway type is the "local" gateway (recommended):
#
#gateway.type: local
# Settings below control how and when to start the initial recovery process on
# a full cluster restart (to reuse as much local data as possible when using shared
# gateway).
# Allow recovery process after N nodes in a cluster are up:
#
#gateway.recover_after_nodes: 1
# Set the timeout to initiate the recovery process, once the N nodes
# from previous setting are up (accepts time value):
#
#gateway.recover_after_time: 5m
# Set how many nodes are expected in this cluster. Once these N nodes
# are up (and recover_after_nodes is met), begin recovery process immediately
# (without waiting for recover_after_time to expire):
#
#gateway.expected_nodes: 2
############################# Recovery Throttling #############################
# These settings allow to control the process of shards allocation between
# nodes during initial recovery, replica allocation, rebalancing,
# or when adding and removing nodes.
# Set the number of concurrent recoveries happening on a node:
#
# 1. During the initial recovery
#
#cluster.routing.allocation.node_initial_primaries_recoveries: 4
#
# 2. During adding/removing nodes, rebalancing, etc
#
#cluster.routing.allocation.node_concurrent_recoveries: 2
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
#
#indices.recovery.max_bytes_per_sec: 20mb
# Set to limit the number of open concurrent streams when
# recovering a shard from a peer:
#
#indices.recovery.concurrent_streams: 5
################################## Discovery ##################################
# Discovery infrastructure ensures nodes can be found within a cluster
# and master node is elected. Multicast discovery is the default.
# Set to ensure a node sees N other master eligible nodes to be considered
# operational within the cluster. This should be set to a quorum/majority of
# the master-eligible nodes in the cluster.
#
discovery.zen.minimum_master_nodes: 1
# Set the time to wait for ping responses from other nodes when discovering.
# Set this option to a higher value on a slow or congested network
# to minimize discovery failures:
#
#discovery.zen.ping.timeout: 3s
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
#
# 1. Disable multicast discovery (enabled by default):
#
discovery.zen.ping.multicast.enabled: false
# 2. Configure an initial list of master nodes in the cluster
# to perform discovery when new nodes (master or data) are started:
#
# discovery.zen.ping.unicast.hosts: [127.0.0.1]
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
#
# You have to install the cloud-aws plugin for enabling the EC2 discovery.
#
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
#
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
# for a step-by-step tutorial.
# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
#
# You have to install the cloud-gce plugin for enabling the GCE discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
# Azure discovery allows to use Azure API in order to perform discovery.
#
# You have to install the cloud-azure plugin for enabling the Azure discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
################################## Slow Log ##################################
# Shard level query and fetch threshold logging.
#index.search.slowlog.threshold.query.warn: 10s
#index.search.slowlog.threshold.query.info: 5s
#index.search.slowlog.threshold.query.debug: 2s
#index.search.slowlog.threshold.query.trace: 500ms
#index.search.slowlog.threshold.fetch.warn: 1s
#index.search.slowlog.threshold.fetch.info: 800ms
#index.search.slowlog.threshold.fetch.debug: 500ms
#index.search.slowlog.threshold.fetch.trace: 200ms
#index.indexing.slowlog.threshold.index.warn: 10s
#index.indexing.slowlog.threshold.index.info: 5s
#index.indexing.slowlog.threshold.index.debug: 2s
#index.indexing.slowlog.threshold.index.trace: 500ms
################################## GC Logging ################################
#monitor.jvm.gc.young.warn: 1000ms
#monitor.jvm.gc.young.info: 700ms
#monitor.jvm.gc.young.debug: 400ms
#monitor.jvm.gc.old.warn: 10s
#monitor.jvm.gc.old.info: 5s
#monitor.jvm.gc.old.debug: 2s
################################## Security ################################
# Uncomment if you want to enable JSONP as a valid return transport on the
# http server. With this enabled, it may pose a security risk, so disabling
# it unless you need it is recommended (it is disabled by default).
#
#http.jsonp.enable: true

View File

@ -0,0 +1,38 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] server.properties"
exit 1
fi
base_dir=$(dirname $0)
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
export KAFKA_HEAP_OPTS="-Xms256m -Xmx256m"
EXTRA_ARGS="-name kafkaServer -loggc"
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka $@

View File

@ -0,0 +1,72 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log4j.rootLogger=WARN, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
log4j.appender.kafkaAppender.MaxFileSize=50MB
log4j.appender.kafkaAppender.MaxBackupIndex=4
log4j.appender.kafkaAppender.File=/var/log/kafka/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
log4j.appender.stateChangeAppender.MaxFileSize=50MB
log4j.appender.stateChangeAppender.MaxBackupIndex=4
log4j.appender.stateChangeAppender.File=/var/log/kafka/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.controllerAppender.MaxFileSize=50MB
log4j.appender.controllerAppender.MaxBackupIndex=4
log4j.appender.controllerAppender.File=/var/log/kafka/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
# Turn on all our debugging info
#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
#log4j.logger.kafka.perf=DEBUG, kafkaAppender
#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
log4j.logger.kafka=WARN, kafkaAppender
# Tracing requests results in large logs
#log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
#log4j.appender.requestAppender.MaxFileSize=50MB
#log4j.appender.requestAppender.MaxBackupIndex=4
#log4j.appender.requestAppender.File=/var/log/kafka/kafka-request.log
#log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
#log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
#
#log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
#log4j.additivity.kafka.network.RequestChannel$=false
#
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
#log4j.logger.kafka.request.logger=TRACE, requestAppender
#log4j.additivity.kafka.request.logger=false
log4j.logger.kafka.controller=TRACE, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.state.change.logger=TRACE, stateChangeAppender
log4j.additivity.state.change.logger=false

View File

@ -0,0 +1,118 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The port the socket server listens on
port=9092
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
#host.name=127.0.0.1
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
#advertised.host.name=<hostname routable by clients>
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>
# The number of threads handling network requests
num.network.threads=2
# The number of threads doing disk I/O
num.io.threads=2
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=1048576
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=1048576
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/var/kafka
auto.create.topics.enable=false
# The number of logical partitions per topic per server. More partitions allow greater parallelism
# for consumption, but also mean more files.
num.partitions=2
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
log.retention.hours=24
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
log.retention.bytes=104857600
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=104857600
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=60000
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=127.0.0.1:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=1000000

View File

@ -0,0 +1,36 @@
[loggers]
keys = root, kafka
[handlers]
keys = console, file
[formatters]
keys = generic
[logger_root]
level = DEBUG
formatter = default
handlers = console, file
[logger_kafka]
qualname = monasca_common.kafka_lib
level = INFO
formatter = default
handlers = console, file
propagate = 0
[handler_console]
class = logging.StreamHandler
args = (sys.stderr,)
level = DEBUG
formatter = generic
[handler_file]
class = logging.handlers.RotatingFileHandler
level = DEBUG
formatter = generic
# store up to 5*100MB of logs
args = ('%MONASCA_EVENTS_LOG_DIR%/events-persister.log', 'a', 104857600, 5)
[formatter_generic]
format = %(asctime)s %(levelname)s [%(name)s][%(threadName)s] %(message)s

View File

@ -0,0 +1,36 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified from http://packages.ubuntu.com/saucy/zookeeperd
NAME=zookeeper
ZOOCFGDIR=/etc/zookeeper/conf
# seems, that log4j requires the log4j.properties file to be in the classpath
CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar"
ZOOCFG="$ZOOCFGDIR/zoo.cfg"
ZOO_LOG_DIR=/var/log/zookeeper
USER=$NAME
GROUP=$NAME
PIDDIR=/var/run/$NAME
PIDFILE=$PIDDIR/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
JAVA=/usr/bin/java
ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
JMXLOCALONLY=false
JAVA_OPTS=""

View File

@ -0,0 +1,69 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# From http://packages.ubuntu.com/saucy/zookeeperd
# ZooKeeper Logging Configuration
#
# Format is "<default threshold> (, <appender>)+
log4j.rootLogger=${zookeeper.root.logger}
# Example: console appender only
# log4j.rootLogger=INFO, CONSOLE
# Example with rolling log file
#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
# Example with rolling log file and tracing
#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
#
# Log INFO level and above messages to the console
#
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=INFO
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
#
# Add ROLLINGFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.ROLLINGFILE.Threshold=WARN
log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log
# Max log file size of 10MB
log4j.appender.ROLLINGFILE.MaxFileSize=10MB
# uncomment the next line to limit number of backup files
#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
#
# Add TRACEFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
log4j.appender.TRACEFILE.Threshold=TRACE
log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log
log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
### Notice we are including log4j's NDC here (%x)
log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

View File

@ -0,0 +1 @@
0

View File

@ -0,0 +1,74 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
dataDir=/var/lib/zookeeper
# Place the dataLogDir to a separate physical disc for better performance
# dataLogDir=/disk2/zookeeper
# the port at which the clients will connect
clientPort=2181
# Maximum number of clients that can connect from one client
maxClientCnxns=60
# specify all zookeeper servers
# The fist port is used by followers to connect to the leader
# The second one is used for leader election
server.0=127.0.0.1:2888:3888
# To avoid seeks ZooKeeper allocates space in the transaction log file in
# blocks of preAllocSize kilobytes. The default block size is 64M. One reason
# for changing the size of the blocks is to reduce the block size if snapshots
# are taken more often. (Also, see snapCount).
#preAllocSize=65536
# Clients can submit requests faster than ZooKeeper can process them,
# especially if there are a lot of clients. To prevent ZooKeeper from running
# out of memory due to queued requests, ZooKeeper will throttle clients so that
# there is no more than globalOutstandingLimit outstanding requests in the
# system. The default limit is 1,000.ZooKeeper logs transactions to a
# transaction log. After snapCount transactions are written to a log file a
# snapshot is started and a new transaction log file is started. The default
# snapCount is 10,000.
#snapCount=1000
# If this option is defined, requests will be will logged to a trace file named
# traceFile.year.month.day.
#traceFile=
# Leader accepts client connections. Default value is "yes". The leader machine
# coordinates updates. For higher update throughput at thes slight expense of
# read throughput the leader can be configured to not accept clients and focus
# on coordination.
#leaderServes=yes
# Autopurge every hour to avoid using lots of disk in bursts
# Order of the next 2 properties matters.
# autopurge.snapRetainCount must be before autopurge.purgeInterval.
autopurge.snapRetainCount=3
autopurge.purgeInterval=1

View File

@ -0,0 +1,98 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_XTRACE_ELASTICSEARCH=$(set +o | grep xtrace)
set +o xtrace
function is_elasticsearch_enabled {
is_service_enabled monasca-elasticsearch && return 0
return 1
}
function install_elasticsearch {
if is_elasticsearch_enabled; then
echo_summary "Installing ElasticSearch ${ELASTICSEARCH_VERSION}"
local es_tarball=elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz
local es_url=http://download.elasticsearch.org/elasticsearch/elasticsearch/${es_tarball}
local es_dest=${FILES}/${es_tarball}
download_file ${es_url} ${es_dest}
tar xzf ${es_dest} -C $DEST
sudo chown -R $STACK_USER $DEST/elasticsearch-${ELASTICSEARCH_VERSION}
ln -sf $DEST/elasticsearch-${ELASTICSEARCH_VERSION} $ELASTICSEARCH_DIR
fi
}
function configure_elasticsearch {
if is_elasticsearch_enabled; then
echo_summary "Configuring ElasticSearch ${ELASTICSEARCH_VERSION}"
local templateDir=$ELASTICSEARCH_CFG_DIR/templates
for dir in $ELASTICSEARCH_LOG_DIR $templateDir $ELASTICSEARCH_DATA_DIR; do
sudo install -m 755 -d -o $STACK_USER $dir
done
sudo cp -f "${PLUGIN_FILES}"/elasticsearch/elasticsearch.yml $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
sudo chown -R $STACK_USER $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
sudo chmod 0644 $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
sudo sed -e "
s|%ELASTICSEARCH_BIND_HOST%|$ELASTICSEARCH_BIND_HOST|g;
s|%ELASTICSEARCH_BIND_PORT%|$ELASTICSEARCH_BIND_PORT|g;
s|%ELASTICSEARCH_PUBLISH_HOST%|$ELASTICSEARCH_PUBLISH_HOST|g;
s|%ELASTICSEARCH_PUBLISH_PORT%|$ELASTICSEARCH_PUBLISH_PORT|g;
s|%ELASTICSEARCH_DATA_DIR%|$ELASTICSEARCH_DATA_DIR|g;
s|%ELASTICSEARCH_LOG_DIR%|$ELASTICSEARCH_LOG_DIR|g;
" -i $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
fi
}
function start_elasticsearch {
if is_elasticsearch_enabled; then
echo_summary "Starting ElasticSearch ${ELASTICSEARCH_VERSION}"
# TODO(jwachowski) find some nicer solution for setting env variable
local service_file="/etc/systemd/system/devstack@elasticsearch.service"
local es_java_opts="ES_JAVA_OPTS=-Dmapper.allow_dots_in_name=true"
iniset -sudo "$service_file" "Service" "Environment" "$es_java_opts"
run_process "elasticsearch" "$ELASTICSEARCH_DIR/bin/elasticsearch"
fi
}
function stop_elasticsearch {
if is_elasticsearch_enabled; then
echo_summary "Stopping ElasticSearch ${ELASTICSEARCH_VERSION}"
stop_process "elasticsearch" || true
fi
}
function clean_elasticsearch {
if is_elasticsearch_enabled; then
echo_summary "Cleaning Elasticsearch ${ELASTICSEARCH_VERSION}"
sudo rm -rf ELASTICSEARCH_DIR || true
sudo rm -rf ELASTICSEARCH_CFG_DIR || true
sudo rm -rf ELASTICSEARCH_LOG_DIR || true
sudo rm -rf ELASTICSEARCH_DATA_DIR || true
sudo rm -rf $FILES/elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz || true
sudo rm -rf $DEST/elasticsearch-${ELASTICSEARCH_VERSION} || true
fi
}
$_XTRACE_ELASTICSEARCH

View File

@ -0,0 +1,61 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_XTRACE_EVENTS_AGENT=$(set +o | grep xtrace)
set +o xtrace
function is_events_agent_enabled {
is_service_enabled monasca-events-agent && return 0
return 1
}
function install_events_agent {
if is_events_agent_enabled; then
echo_summary "Installing Events Agent"
# TODO implement this
fi
}
function configure_events_agent {
if is_events_agent_enabled; then
echo_summary "Configuring Events Agent"
# TODO implement this
fi
}
function start_events_agent {
if is_events_agent_enabled; then
echo_summary "Starting Events Agent"
# TODO implement this
fi
}
function stop_events_agent {
if is_events_agent_enabled; then
echo_summary "Stopping Events Agent"
# TODO implement this
fi
}
function clean_events_agent {
if is_events_agent_enabled; then
echo_summary "Cleaning Events Agent"
# TODO implement this
fi
}
$_XTRACE_EVENTS_AGENT

View File

@ -0,0 +1,97 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_XTRACE_EVENTS_API=$(set +o | grep xtrace)
set +o xtrace
function is_events_api_enabled {
is_service_enabled monasca-events-api && return 0
return 1
}
function install_events_api {
if is_events_api_enabled; then
echo_summary "Installing Events Api"
git_clone $MONASCA_EVENTS_API_REPO $MONASCA_EVENTS_API_DIR $MONASCA_EVENTS_API_BRANCH
setup_develop ${MONASCA_EVENTS_API_DIR}
install_keystonemiddleware
pip_install gunicorn
fi
}
function configure_events_api {
if is_events_api_enabled; then
echo_summary "Configuring Events Api"
# Put config files in ``$MONASCA_EVENTS_API_CONF_DIR`` for everyone to find
sudo install -d -o $STACK_USER $MONASCA_EVENTS_API_CONF_DIR
# ensure fresh installation of configuration files
rm -rf $MONASCA_EVENTS_API_CONF $MONASCA_EVENTS_API_PASTE $MONASCA_EVENTS_API_LOGGING_CONF
if [[ "$MONASCA_EVENTS_API_CONF_DIR" != "$MONASCA_EVENTS_API_DIR/etc/monasca" ]]; then
install -m 600 $MONASCA_EVENTS_API_DIR/etc/monasca/events-api-paste.ini $MONASCA_EVENTS_API_PASTE
install -m 600 $MONASCA_EVENTS_API_DIR/etc/monasca/events-api-logging.conf $MONASCA_EVENTS_API_LOGGING_CONF
fi
oslo-config-generator \
--config-file $MONASCA_EVENTS_API_DIR/config-generator/config.conf \
--output-file $MONASCA_EVENTS_API_CONF
iniset "$MONASCA_EVENTS_API_CONF" DEFAULT log_config_append $MONASCA_EVENTS_API_LOGGING_CONF
# configure keystone middleware
configure_auth_token_middleware "$MONASCA_EVENTS_API_CONF" "admin" $MONASCA_EVENTS_API_CACHE_DIR
iniset "$MONASCA_EVENTS_API_CONF" keystone_authtoken region_name $REGION_NAME
iniset "$MONASCA_EVENTS_API_CONF" keystone_authtoken project_name "admin"
iniset "$MONASCA_EVENTS_API_CONF" keystone_authtoken password $ADMIN_PASSWORD
# configure log-api-paste.ini
iniset "$MONASCA_EVENTS_API_PASTE" server:main bind $MONASCA_EVENTS_API_SERVICE_HOST:$MONASCA_EVENTS_API_SERVICE_PORT
iniset "$MONASCA_EVENTS_API_PASTE" server:main chdir $MONASCA_EVENTS_API_DIR
iniset "$MONASCA_EVENTS_API_PASTE" server:main workers $API_WORKERS
fi
}
function start_events_api {
if is_events_api_enabled; then
echo_summary "Starting Events Api"
run_process "monasca-events-api" "/usr/local/bin/gunicorn --paste $MONASCA_EVENTS_API_PASTE"
fi
}
function stop_events_api {
if is_events_api_enabled; then
echo_summary "Stopping Events Api"
stop_process "monasca-events-api"
fi
}
function clean_events_api {
if is_events_api_enabled; then
echo_summary "Cleaning Events Api"
sudo rm -f $MONASCA_EVENTS_API_CONF || true
sudo rm -f $MONASCA_EVENTS_API_PASTE || true
sudo rm -f $MONASCA_EVENTS_API_LOGGING_CONF || true
sudo rm -rf $MONASCA_EVENTS_API_CACHE_DIR || true
sudo rm -rf $MONASCA_EVENTS_API_DIR || true
fi
}
$_XTRACE_EVENTS_API

View File

@ -0,0 +1,89 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_XTRACE_EVENTS_PERSISTER=$(set +o | grep xtrace)
set +o xtrace
function is_events_persister_enabled {
is_service_enabled monasca-events-persister && return 0
return 1
}
function install_events_persister {
if is_events_persister_enabled; then
echo_summary "Installing Events Persister"
git_clone $MONASCA_EVENTS_PERSISTER_REPO $MONASCA_EVENTS_PERSISTER_DIR $MONASCA_EVENTS_PERSISTER_BRANCH
setup_develop ${MONASCA_EVENTS_PERSISTER_DIR}
pip_install "elasticsearch>=2.0.0,<3.0.0"
fi
}
function configure_events_persister {
if is_events_persister_enabled; then
echo_summary "Configuring Events Persister"
# Put config files in ``$MONASCA_EVENTS_PERSISTER_CONF_DIR`` for everyone to find
sudo install -d -o $STACK_USER $MONASCA_EVENTS_PERSISTER_CONF_DIR
# ensure fresh installation of configuration files
rm -rf $MONASCA_EVENTS_PERSISTER_CONF $MONASCA_EVENTS_PERSISTER_LOGGING_CONF
oslo-config-generator \
--config-file $MONASCA_EVENTS_PERSISTER_DIR/config-generator/persister.conf \
--output-file $MONASCA_EVENTS_PERSISTER_CONF
iniset "$MONASCA_EVENTS_PERSISTER_CONF" DEFAULT log_config_append $MONASCA_EVENTS_PERSISTER_LOGGING_CONF
iniset "$MONASCA_EVENTS_PERSISTER_CONF" zookeeper uri 127.0.0.1:2181
iniset "$MONASCA_EVENTS_PERSISTER_CONF" zookeeper partition_interval_recheck_seconds 15
iniset "$MONASCA_EVENTS_PERSISTER_CONF" kafka num_processors 0
iniset "$MONASCA_EVENTS_PERSISTER_CONF" kafka_events num_processors 1
iniset "$MONASCA_EVENTS_PERSISTER_CONF" kafka_events uri 127.0.0.1:9092
iniset "$MONASCA_EVENTS_PERSISTER_CONF" elasticsearch hosts ${ELASTICSEARCH_BIND_HOST}:${ELASTICSEARCH_BIND_PORT}
sudo cp -f "${MONASCA_EVENTS_DEVSTACK_DIR}"/files/monasca-events-persister/events-persister-logging.conf \
"${MONASCA_EVENTS_PERSISTER_LOGGING_CONF}"
sudo sed -e "
s|%MONASCA_EVENTS_LOG_DIR%|$MONASCA_EVENTS_LOG_DIR|g;
" -i ${MONASCA_EVENTS_PERSISTER_LOGGING_CONF}
fi
}
function start_events_persister {
if is_events_persister_enabled; then
echo_summary "Starting Events Persister"
run_process "monasca-events-persister" "/usr/local/bin/monasca-persister --config-file $MONASCA_EVENTS_PERSISTER_CONF"
fi
}
function stop_events_persister {
if is_events_persister_enabled; then
echo_summary "Stopping Events Persister"
stop_process "monasca-events-persister" || true
fi
}
function clean_events_persister {
if is_events_persister_enabled; then
echo_summary "Cleaning Events Persister"
sudo rm -f $MONASCA_EVENTS_PERSISTER_CONF || true
sudo rm -f $MONASCA_EVENTS_PERSISTER_LOGGING_CONF || true
sudo rm -rf $MONASCA_EVENTS_PERSISTER_DIR || true
fi
}
$_XTRACE_EVENTS_PERSISTER

98
devstack/lib/kafka.sh Normal file
View File

@ -0,0 +1,98 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_XTRACE_KAFKA=$(set +o | grep xtrace)
set +o xtrace
function is_kafka_enabled {
is_service_enabled monasca-kafka && return 0
return 1
}
function install_kafka {
if is_kafka_enabled; then
echo_summary "Installing kafka"
local kafka_tarball=kafka_${KAFKA_VERSION}.tgz
local kafka_tarball_url=${APACHE_MIRROR}kafka/${BASE_KAFKA_VERSION}/${kafka_tarball}
local kafka_tarball_dest=${FILES}/${kafka_tarball}
download_file ${kafka_tarball_url} ${kafka_tarball_dest}
sudo groupadd --system kafka || true
sudo useradd --system -g kafka kafka || true
sudo tar -xzf ${kafka_tarball_dest} -C /opt
sudo ln -sf /opt/kafka_${KAFKA_VERSION} /opt/kafka
sudo cp -f "${MONASCA_EVENTS_API_DIR}"/devstack/files/kafka/kafka-server-start.sh /opt/kafka_${KAFKA_VERSION}/bin/kafka-server-start.sh
fi
}
function configure_kafka {
if is_kafka_enabled; then
echo_summary "Configuring kafka"
sudo mkdir -p /var/kafka || true
sudo chown kafka:kafka /var/kafka
sudo chmod 755 /var/kafka
sudo rm -rf /var/kafka/lost+found
sudo mkdir -p /var/log/kafka || true
sudo chown kafka:kafka /var/log/kafka
sudo chmod 755 /var/log/kafka
sudo ln -sf /opt/kafka/config /etc/kafka
sudo ln -sf /var/log/kafka /opt/kafka/logs
sudo cp -f "${MONASCA_EVENTS_DEVSTACK_DIR}"/files/kafka/log4j.properties /etc/kafka/log4j.properties
sudo cp -f "${MONASCA_EVENTS_DEVSTACK_DIR}"/files/kafka/server.properties /etc/kafka/server.properties
sudo chown kafka:kafka /etc/kafka/*
sudo chmod 644 /etc/kafka/*
fi
}
function start_kafka {
if is_kafka_enabled; then
echo_summary "Starting Monasca Kafka"
run_process "kafka" "/opt/kafka/bin/kafka-server-start.sh /etc/kafka/server.properties" "kafka" "kafka"
fi
}
function stop_kafka {
if is_kafka_enabled; then
echo_summary "Stopping Monasca Kafka"
stop_process "kafka" || true
fi
}
function clean_kafka {
if is_kafka_enabled; then
echo_summary "Clean Monasca Kafka"
sudo rm -rf /var/kafka
sudo rm -rf /var/log/kafka
sudo rm -rf /etc/kafka
sudo rm -rf /opt/kafka
sudo userdel kafka || true
sudo groupdel kafka || true
sudo rm -rf /opt/kafka_${KAFKA_VERSION}
sudo rm -rf ${FILES}/kafka_${KAFKA_VERSION}.tgz
fi
}
function create_kafka_topic {
if is_kafka_enabled; then
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 4 --topic $1
fi
}
$_XTRACE_KAFKA

75
devstack/lib/utils.sh Normal file
View File

@ -0,0 +1,75 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_XTRACE_UTILS=$(set +o | grep xtrace)
set +o xtrace
function find_nearest_apache_mirror {
if [ -z $APACHE_MIRROR ]; then
local mirror;
mirror=`curl -s 'https://www.apache.org/dyn/closer.cgi?as_json=1' | jq --raw-output '.preferred'`
APACHE_MIRROR=$mirror
fi
}
# download_file
# $1 - url to download
# $2 - location where to save url to
#
# Download file only when it not exists or there is newer version of it.
#
# Uses global variables:
# - OFFLINE
# - DOWNLOAD_FILE_TIMEOUT
function download_file {
local url=$1
local file=$2
# If in OFFLINE mode check if file already exists
if [[ ${OFFLINE} == "True" ]] && [[ ! -f ${file} ]]; then
die $LINENO "You are running in OFFLINE mode but
the target file \"$file\" was not found"
fi
local curl_z_flag=""
if [[ -f "${file}" ]]; then
# If the file exists tell cURL to download only if newer version
# is available
curl_z_flag="-z $file"
fi
# yeah...downloading...devstack...hungry..om, om, om
local timeout=0
if [[ -n "${DOWNLOAD_FILE_TIMEOUT}" ]]; then
timeout=${DOWNLOAD_FILE_TIMEOUT}
fi
time_start "download_file"
_safe_permission_operation ${CURL_GET} -L $url --connect-timeout $timeout --retry 3 --retry-delay 5 -o $file $curl_z_flag
time_stop "download_file"
}
function configure_log_dir {
local logdir=$1
sudo mkdir -p $logdir
sudo chmod -R 0777 $logdir
}
$_XTRACE_UTILS

67
devstack/lib/zookeeper.sh Normal file
View File

@ -0,0 +1,67 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_XTRACE_ZOOKEEPER=$(set +o | grep xtrace)
set +o xtrace
# Set up default directories
ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper
ZOOKEEPER_CONF_DIR=/etc/zookeeper
function is_zookeeper_enabled {
is_service_enabled monasca-zookeeper && return 0
return 1
}
function install_zookeeper {
if is_zookeeper_enabled; then
if is_ubuntu; then
install_package zookeeperd
else
die $LINENO "Don't know how to install zookeeper on this platform"
fi
fi
}
function configure_zookeeper {
if is_zookeeper_enabled; then
sudo cp $MONASCA_EVENTS_API_DIR/devstack/files/zookeeper/* $ZOOKEEPER_CONF_DIR
sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg
sudo rm -rf $ZOOKEEPER_DATA_DIR || true
sudo mkdir -p $ZOOKEEPER_DATA_DIR || true
fi
}
function start_zookeeper {
if is_zookeeper_enabled; then
start_service zookeeper
fi
}
function stop_zookeeper {
if is_zookeeper_enabled; then
stop_service zookeeper
fi
}
function clean_zookeeper {
if is_zookeeper_enabled; then
sudo rm -rf $ZOOKEEPER_DATA_DIR
apt_get -y purge zookeeper
fi
}
$_XTRACE_ZOOKEEPER

View File

@ -1,7 +1,7 @@
#!/bin/bash
#
# Copyright 2016 FUJITSU LIMITED
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -15,3 +15,127 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Save trace setting
_EVENTS_XTRACE=$(set +o | grep xtrace)
set -o xtrace
_EVENTS_ERREXIT=$(set +o | grep errexit)
set -o errexit
# source lib/*
source ${MONASCA_EVENTS_API_DIR}/devstack/lib/utils.sh
source ${MONASCA_EVENTS_API_DIR}/devstack/lib/zookeeper.sh
source ${MONASCA_EVENTS_API_DIR}/devstack/lib/kafka.sh
source ${MONASCA_EVENTS_API_DIR}/devstack/lib/elasticsearch.sh
source ${MONASCA_EVENTS_API_DIR}/devstack/lib/events-persister.sh
source ${MONASCA_EVENTS_API_DIR}/devstack/lib/events-api.sh
source ${MONASCA_EVENTS_API_DIR}/devstack/lib/events-agent.sh
function pre_install_monasca_events {
echo_summary "Pre-Installing Monasca Events Dependency Components"
find_nearest_apache_mirror
install_zookeeper
install_kafka
install_elasticsearch
}
function install_monasca_events {
echo_summary "Installing Core Monasca Events Components"
install_events_persister
install_events_api
install_events_agent
}
function configure_monasca_events {
echo_summary "Configuring Monasca Events Dependency Components"
configure_zookeeper
configure_kafka
configure_elasticsearch
echo_summary "Configuring Monasca Events Core Components"
configure_log_dir ${MONASCA_EVENTS_LOG_DIR}
configure_events_persister
configure_events_api
configure_events_agent
}
function init_monasca_events {
echo_summary "Initializing Monasca Events Components"
start_zookeeper
start_kafka
start_elasticsearch
# wait for all services to start
sleep 10s
create_kafka_topic monevents
}
function start_monasca_events {
echo_summary "Starting Monasca Events Components"
start_events_persister
start_events_api
start_events_agent
}
function unstack_monasca_events {
echo_summary "Unstacking Monasca Events Components"
stop_events_agent
stop_events_api
stop_events_persister
stop_elasticsearch
stop_kafka
stop_zookeeper
}
function clean_monasca_events {
echo_summary "Cleaning Monasca Events Components"
clean_events_agent
clean_events_api
clean_events_persister
clean_elasticsearch
clean_kafka
clean_zookeeper
}
# check for service enabled
if is_service_enabled monasca-events; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up system services
echo_summary "Configuring Monasca Events system services"
pre_install_monasca_events
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing Monasca Events"
install_monasca_events
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring Monasca Events"
configure_monasca_events
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize and start the Monasca service
echo_summary "Initializing Monasca Events"
init_monasca_events
start_monasca_events
fi
if [[ "$1" == "unstack" ]]; then
# Shut down Monasca services
echo_summary "Unstacking Monasca Events"
unstack_monasca_events
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
echo_summary "Cleaning Monasca Events"
clean_monasca_events
fi
fi
# Restore errexit & xtrace
${_EVENTS_ERREXIT}
${_EVENTS_XTRACE}

View File

@ -1,28 +0,0 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
# (C) Copyright 2016-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sleep some time until all services are starting
sleep 6
function load_devstack_utilities {
source $BASE/new/devstack/stackrc
source $BASE/new/devstack/functions
source $BASE/new/devstack/openrc admin admin
# print OS_ variables
env | grep OS_
}

View File

@ -13,4 +13,75 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Monasca infrastructure services
enable_service monasca-zookeeper
enable_service monasca-kafka
enable_service monasca-elasticsearch
# Monasca Events services
enable_service monasca-events
enable_service monasca-events-api
enable_service monasca-events-persister
enable_service monasca-events-agent
# Dependent Software Versions
BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-0.9.0.1}
SCALA_VERSION=${SCALA_VERSION:-2.11}
KAFKA_VERSION=${KAFKA_VERSION:-${SCALA_VERSION}-${BASE_KAFKA_VERSION}}
ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-2.4.2}
# Path settings
MONASCA_BASE=${DEST}
# Repository settings
MONASCA_EVENTS_API_REPO=${MONASCA_EVENTS_API_REPO:-${GIT_BASE}/openstack/monasca-events-api.git}
MONASCA_EVENTS_API_BRANCH=${MONASCA_EVENTS_API_BRANCH:-master}
MONASCA_EVENTS_API_DIR=${MONASCA_BASE}/monasca-events-api
MONASCA_EVENTS_PERSISTER_REPO=${MONASCA_EVENTS_PERSISTER_REPO:-${GIT_BASE}/openstack/monasca-persister.git}
MONASCA_EVENTS_PERSISTER_BRANCH=${MONASCA_EVENTS_PERSISTER_BRANCH:-master}
MONASCA_EVENTS_PERSISTER_DIR=${MONASCA_BASE}/monasca-persister
MONASCA_EVENTS_AGENT_REPO=${MONASCA_EVENTS_AGENT_REPO:-${GIT_BASE}/openstack/monasca-events-agent.git}
MONASCA_EVENTS_AGENT_BRANCH=${MONASCA_EVENTS_AGENT_BRANCH:-master}
MONASCA_EVENTS_AGENT_DIR=${MONASCA_BASE}/monasca-events-agent
# Dependencies settings
ELASTICSEARCH_BIND_HOST=${ELASTICSEARCH_BIND_HOST:-${SERVICE_HOST}}
ELASTICSEARCH_BIND_PORT=${ELASTICSEARCH_BIND_PORT:-9200}
ELASTICSEARCH_PUBLISH_HOST=${ELASTICSEARCH_PUBLISH_HOST:-${SERVICE_HOST}}
ELASTICSEARCH_PUBLISH_PORT=${ELASTICSEARCH_PUBLISH_PORT:-9300}
ELASTICSEARCH_DIR=$DEST/elasticsearch
ELASTICSEARCH_CFG_DIR=$ELASTICSEARCH_DIR/config
ELASTICSEARCH_LOG_DIR=$LOGDIR/elasticsearch
ELASTICSEARCH_DATA_DIR=$DATA_DIR/elasticsearch
KAFKA_SERVICE_HOST=${KAFKA_SERVICE_HOST:-${SERVICE_HOST}}
KAFKA_SERVICE_PORT=${KAFKA_SERVICE_PORT:-9092}
# configuration
MONASCA_EVENTS_LOG_DIR=${MONASCA_EVENTS_LOG_DIR:-/var/log/monasca}
MONASCA_EVENTS_DEVSTACK_DIR=${MONASCA_EVENTS_DEVSTACK_DIR:-$MONASCA_EVENTS_API_DIR/devstack}
MONASCA_EVENTS_API_CONF_DIR=${MONASCA_EVENTS_API_CONF_DIR:-/etc/monasca}
MONASCA_EVENTS_API_CONF=${MONASCA_EVENTS_API_CONF:-$MONASCA_EVENTS_API_CONF_DIR/events-api.conf}
MONASCA_EVENTS_API_PASTE=${MONASCA_EVENTS_API_PASTE:-$MONASCA_EVENTS_API_CONF_DIR/events-api-paste.ini}
MONASCA_EVENTS_API_LOGGING_CONF=${MONASCA_EVENTS_API_LOGGING_CONF:-$MONASCA_EVENTS_API_CONF_DIR/events-api-logging.conf}
MONASCA_EVENTS_API_CACHE_DIR=${MONASCA_EVENTS_API_CACHE_DIR:-/var/cache/monasca-events-api}
MONASCA_EVENTS_API_SERVICE_HOST=${MONASCA_EVENTS_API_SERVICE_HOST:-${SERVICE_HOST}}
MONASCA_EVENTS_API_SERVICE_PORT=${MONASCA_EVENTS_API_SERVICE_PORT:-5670}
MONASCA_EVENTS_API_SERVICE_PROTOCOL=${MONASCA_EVENTS_API_SERVICE_PROTOCOL:-${SERVICE_PROTOCOL}}
MONASCA_EVENTS_PERSISTER_CONF_DIR=${MONASCA_EVENTS_PERSISTER_CONF_DIR:-/etc/monasca}
MONASCA_EVENTS_PERSISTER_CONF=${MONASCA_EVENTS_PERSISTER_CONF:-${MONASCA_EVENTS_PERSISTER_CONF_DIR}/events-persister.conf}
MONASCA_EVENTS_PERSISTER_LOGGING_CONF=${MONASCA_EVENTS_PERSISTER_LOGGING_CONF:-${MONASCA_EVENTS_PERSISTER_CONF_DIR}/events-persister-logging.conf}
# Other settings
PLUGIN_FILES=$MONASCA_EVENTS_API_DIR/devstack/files
DOWNLOAD_FILE_TIMEOUT=${DOWNLOAD_FILE_TIMEOUT:-300}

View File

@ -0,0 +1,34 @@
[loggers]
keys = root, kafka
[handlers]
keys = console, file
[formatters]
keys = context
[logger_root]
level = DEBUG
handlers = console, file
[logger_kafka]
qualname = kafka
level = DEBUG
handlers = console, file
propagate = 0
[handler_console]
class = logging.StreamHandler
args = (sys.stderr,)
level = DEBUG
formatter = context
[handler_file]
class = logging.handlers.RotatingFileHandler
level = DEBUG
formatter = context
# store up to 5*100MB of logs
args = ('monasca-events-api.log', 'a', 104857600, 5)
[formatter_context]
class = oslo_log.formatters.ContextFormatter

View File

@ -17,15 +17,39 @@ name = main
[composite:main]
use = egg:Paste#urlmap
/: ea_version
/v1.0: ea_version_v1
/: events_version
[pipeline:ea_version_v1]
pipeline = request_id auth
[pipeline:events_version]
pipeline = error_trap versionapp
[app:versionapp]
paste.app_factory = monasca_events_api.app.api:create_version_app
[filter:auth]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
[filter:roles]
paste.filter_factory = monasca_events_api.middleware.role_middleware:RoleMiddleware.factory
[filter:request_id]
paste.filter_factory = oslo_middleware.request_id:RequestId.factory
[filter:auth]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
# NOTE(trebskit) this is optional
# insert this into either pipeline to get some WSGI environment debug output
[filter:debug]
paste.filter_factory = oslo_middleware.debug:Debug.factory
[filter:error_trap]
paste.filter_factory = oslo_middleware.catch_errors:CatchErrors.factory
[server:main]
use = egg:gunicorn#main
bind = 127.0.0.1:5670
workers = 9
worker-connections = 2000
worker-class = eventlet
timeout = 30
backlog = 2048
keepalive = 2
proc_name = monasca-events-api
loglevel = DEBUG

View File

@ -0,0 +1,58 @@
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Module initializes various applications of monasca-events-api."""
import falcon
from oslo_config import cfg
from oslo_log import log
LOG = log.getLogger(__name__)
CONF = cfg.CONF
_CONF_LOADED = False
class Versions(object):
"""Versions API.
Versions returns information about API itself.
"""
def __init__(self):
"""Init the Version App."""
LOG.info('Initializing VersionsAPI!')
def on_get(self, req, res):
"""On get method."""
res.status = falcon.HTTP_200
res.body = '{"version": "v1.0"}'
def create_version_app(global_conf, **local_conf):
"""Create Version application."""
ctrl = Versions()
controllers = {
'/': ctrl, # redirect http://host:port/ down to Version app
# avoid conflicts with actual pipelines and 404 error
'/version': ctrl, # list all the versions
}
wsgi_app = falcon.API()
for route, ctrl in controllers.items():
wsgi_app.add_route(route, ctrl)
return wsgi_app

View File

@ -0,0 +1,100 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
# (C) Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
sleep 6
function load_devstack_utilities {
source $BASE/new/devstack/stackrc
source $BASE/new/devstack/functions
source $BASE/new/devstack/openrc admin admin
}
function setup_monasca_events_api {
local constraints="-c ${REQUIRMENTS_DIR}/upper-constraints.txt"
pushd $TEMPEST_DIR
sudo -EH pip install $constraints -r requirements.txt -r test-requirements.txt
popd;
pushd $MONASCA_EVENTS_API_DIR
sudo -EH pip install $constraints -r requirements.txt -r test-requirements.txt
sudo -EH python setup.py install
popd;
}
function set_tempest_conf {
local conf_file=$TEMPEST_DIR/etc/tempest.conf
pushd $TEMPEST_DIR
oslo-config-generator \
--config-file tempest/cmd/config-generator.tempest.conf \
--output-file $conf_file
popd
cp -f $DEST/tempest/etc/logging.conf.sample $DEST/tempest/etc/logging.conf
# set identity section
iniset $conf_file identity admin_domain_scope True
iniset $conf_file identity user_unique_last_password_count 2
iniset $conf_file identity user_locakout_duration 5
iniset $conf_file identity user_lockout_failure_attempts 2
iniset $conf_file identity uri $OS_AUTH_URL/v2.0
iniset $conf_file identity uri_v3 $OS_AUTH_URL/v3
iniset $conf_file identity auth_version v$OS_IDENTITY_API_VERSION
# set auth section
iniset $conf_file auth use_dynamic_credentials True
iniset $conf_file auth admin_username $OS_USERNAME
iniset $conf_file auth admin_password $OS_PASSWORD
iniset $conf_file auth admin_domain_name $OS_PROJECT_DOMAIN_ID
iniset $conf_file auth admin_project_name $OS_PROJECT_NAME
}
function function_exists {
declare -f -F $1 > /dev/null
}
if ! function_exists echo_summary; then
function echo_summary {
echo $@
}
fi
XTRACE=$(set +o | grep xtrace)
set -o xtrace
echo_summary "monasca's events post_test_hook.sh was called..."
(set -o posix; set)
# save ref to monasca-api dir
export MONASCA_EVENTS_API_DIR="$BASE/new/monasca-events-api"
export TEMPEST_DIR="$BASE/new/tempest"
sudo chown -R $USER:stack $MONASCA_EVENTS_API_DIR
sudo chown -R $USER:stack $TEMPEST_DIR
load_devstack_utilities
setup_monasca_events_api
set_tempest_conf
(cd $TEMPEST_DIR; testr init)
(cd $TEMPEST_DIR; testr list-tests monasca_events_api/tests/functional > monasca_tempest_tests)
(cd $TEMPEST_DIR; cat monasca_tempest_tests)
(cd $TEMPEST_DIR; cat monasca_tempest_tests | grep gate > monasca_tempest_tests_gate)
(cd $TEMPEST_DIR; testr run --subunit --load-list=monasca_tempest_tests_gate | subunit-trace --fails)

View File

@ -0,0 +1,15 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -0,0 +1,72 @@
- hosts: all
name: Autoconverted job legacy-tempest-dsvm-monasca-python-mysql-full from old job
gate-tempest-dsvm-monasca-python-mysql-full-ubuntu-xenial-nv
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack-infra/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
git://git.openstack.org \
openstack-infra/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
enable_plugin monasca-events-api git://git.openstack.org/openstack/monasca-events-api
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PYTHONUNBUFFERED=true
export ENABLED_SERVICES=tempest
export DEVSTACK_GATE_NEUTRON=1
export DEVSTACK_GATE_EXERCISES=0
export DEVSTACK_GATE_POSTGRES=0
export PROJECTS="openstack/monasca-events-api $PROJECTS"
export PROJECTS="openstack/monasca-persister $PROJECTS"
export PROJECTS="openstack/monasca-common $PROJECTS"
export PROJECTS="openstack/python-monascaclient $PROJECTS"
function pre_test_hook {
source $BASE/new/monasca-events-api/monasca_events_api/tests/functional/contrib/gate_hook.sh
}
export -f pre_test_hook
function post_test_hook {
# Configure and run tempest on monasca-api installation
source $BASE/new/monasca-events-api/monasca_events_api/tests/functional/contrib/post_test_hook.sh
}
export -f post_test_hook
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'