Add devstack plugin

This patch provides devstack plugin.

Co-Authored-By: Daisuke Fujita <fuzita.daisuke@jp.fujitsu.com>
Change-Id: Id0de403b05f174176e92cc7d6287d38fcd043d02
This commit is contained in:
Hisashi Osanai 2016-06-10 11:28:42 +00:00
parent abdabfe54b
commit 30c75d34ba
18 changed files with 1167 additions and 1 deletions

63
devstack/README.md Normal file
View File

@ -0,0 +1,63 @@
# Monasca Analytics DevStack Plugin
The Monasca Analytics DevStack plugin currently only works on Ubuntu 14.04 (Trusty).
More Linux Distributions will be supported in the future.
Running the Monasca Analytics DevStack plugin requires a machine with 8GB of RAM.
Directions for installing and running Devstack can be found here:
http://docs.openstack.org/developer/devstack/
To run Monasca Analytics in DevStack, do the following three steps.
1. Clone the DevStack repo.
git clone https://git.openstack.org/openstack-dev/devstack
2. Add the following to the DevStack local.conf file in the root of the devstack directory. You may
need to create the local.conf if it does not already exist.
\# BEGIN DEVSTACK LOCAL.CONF CONTENTS
[[local|localrc]]
MYSQL_PASSWORD=secretmysql
DATABASE_PASSWORD=secretdatabase
RABBIT_PASSWORD=secretrabbit
ADMIN_PASSWORD=secretadmin
SERVICE_PASSWORD=secretservice
SERVICE_TOKEN=111222333444
LOGFILE=$DEST/logs/stack.sh.log
LOGDIR=$DEST/logs
LOG_COLOR=False
\# This line will enable all of Monasca Analytics.
enable_plugin monasca-analytics git://git.openstack.org/openstack/monasca-analytics
\# END DEVSTACK LOCAL.CONF CONTENTS
3. Run './stack.sh' from the root of the devstack directory.
If you want to run Monasca Analytics with the bare mininum of OpenStack components
you can add the following two lines to the local.conf file.
disable_all_services
enable_service rabbit mysql key
```
# (C) Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```

View File

@ -0,0 +1,40 @@
#!/bin/bash
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] server.properties"
exit 1
fi
base_dir=$(dirname $0)
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
export KAFKA_HEAP_OPTS="-Xms256m -Xmx256m"
EXTRA_ARGS="-name kafkaServer -loggc"
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka $@

View File

@ -0,0 +1,35 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
description "Kafka"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
limit nofile 32768 32768
# If zookeeper is running on this box also give it time to start up properly
pre-start script
if [ -e /etc/init.d/zookeeper ]; then
/etc/init.d/zookeeper start || true
fi
end script
# Rather than using setuid/setgid sudo is used because the pre-start task must run as root
exec sudo -Hu kafka -g kafka KAFKA_HEAP_OPTS="-Xmx128m" /opt/kafka/bin/kafka-server-start.sh /etc/kafka/server.properties

View File

@ -0,0 +1,75 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
log4j.rootLogger=WARN, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
log4j.appender.kafkaAppender.MaxFileSize=50MB
log4j.appender.kafkaAppender.MaxBackupIndex=4
log4j.appender.kafkaAppender.File=/var/log/kafka/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
log4j.appender.stateChangeAppender.MaxFileSize=50MB
log4j.appender.stateChangeAppender.MaxBackupIndex=4
log4j.appender.stateChangeAppender.File=/var/log/kafka/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.controllerAppender.MaxFileSize=50MB
log4j.appender.controllerAppender.MaxBackupIndex=4
log4j.appender.controllerAppender.File=/var/log/kafka/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
# Turn on all our debugging info
#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
#log4j.logger.kafka.perf=DEBUG, kafkaAppender
#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
log4j.logger.kafka=WARN, kafkaAppender
# Tracing requests results in large logs
#log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
#log4j.appender.requestAppender.MaxFileSize=50MB
#log4j.appender.requestAppender.MaxBackupIndex=4
#log4j.appender.requestAppender.File=/var/log/kafka/kafka-request.log
#log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
#log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
#
#log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
#log4j.additivity.kafka.network.RequestChannel$=false
#
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
#log4j.logger.kafka.request.logger=TRACE, requestAppender
#log4j.additivity.kafka.request.logger=false
log4j.logger.kafka.controller=TRACE, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.state.change.logger=TRACE, stateChangeAppender
log4j.additivity.state.change.logger=false

View File

@ -0,0 +1,119 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The port the socket server listens on
port=9092
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
host.name=127.0.0.1
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
#advertised.host.name=<hostname routable by clients>
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>
# The number of threads handling network requests
num.network.threads=2
# The number of threads doing disk I/O
num.io.threads=2
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=1048576
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=1048576
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/var/kafka
auto.create.topics.enable=false
# The number of logical partitions per topic per server. More partitions allow greater parallelism
# for consumption, but also mean more files.
num.partitions=2
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
log.retention.hours=24
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
log.retention.bytes=104857600
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=104857600
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=60000
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=127.0.0.1:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=1000000

View File

@ -0,0 +1,24 @@
{
"version": 1,
"disable_existing_loggers": false,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
}
},
"handlers": {
"default": {
"level": "DEBUG",
"class": "logging.FileHandler",
"filename": "/var/log/monasca/analytics/monasca_analytics.log"
"formatter": "standard"
}
},
"loggers": {
"": {
"handlers": ["default"],
"level": "DEBUG",
"propagate": true
}
}
}

View File

@ -0,0 +1,114 @@
{
"spark_config": {
"appName": "testApp",
"streaming": {
"batch_interval": 1
}
},
"server": {
"port": 3000,
"debug": false
},
"sources": {
"src1": {
"module": "CloudMarkovChainSource",
"params": {
"server_sleep_in_seconds": 0.01
},
"transitions": {
"web_service": {
"run=>slow": {
"0": 0.001,
"8": 0.02,
"12": 0.07,
"14": 0.07,
"22": 0.03,
"24": 0.001
},
"slow=>run": {
"0": 0.99,
"8": 0.7,
"12": 0.1,
"14": 0.1,
"22": 0.8,
"24": 0.99
},
"stop=>run": 0.7
},
"host": {
"on=>off": 0.005,
"off=>on": 0.5
},
"switch": {
"on=>off": 0.01,
"off=>on": 0.7
}
},
"triggers": {
"support": {
"get_called": {
"0": 0.1,
"8": 0.2,
"12": 0.8,
"14": 0.8,
"22": 0.5,
"24": 0.0
}
}
},
"graph": {
"h1:host": ["s1"],
"h2:host": ["s1"],
"s1:switch": [],
"w1:web_service": ["h1"],
"w2:web_service": ["h2"]
}
}
},
"ingestors": {
"ing1": {
"module": "CloudIngestor"
}
},
"smls": {
"sml1": {
"module": "LiNGAM",
"params": {
"threshold": 0.5
}
}
},
"voters": {
"vot1": {
"module": "PickIndexVoter",
"params": {
"index": 0
}
}
},
"sinks": {
"snk1": {
"module": "KafkaSink",
"params": {
"host": "localhost",
"port": 9092,
"topic": "transformed_alerts"
}
}
},
"ldps": {
"ldp1": {
"module": "CloudCausalityLDP"
}
},
"connections": {
"src1": ["ing1", "ldp1"],
"sml1": ["vot1"],
"ing1": [],
"vot1": ["ldp1"],
"ldp1": ["snk1"],
"snk1": []
},
"feedback": {}
}

View File

@ -0,0 +1,27 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
[Unit]
Description=Monasca Analytics Daemon
After=zookeeper.service
[Service]
Type=simple
LimitNOFILE=32768
ExecStart=/etc/monasca/analytics/init/start-monasca-analytics.sh
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,29 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
description "Monasca Analytics"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
limit nofile 32768 32768
expect daemon
exec /etc/monasca/analytics/init/start-monasca-analytics.sh

View File

@ -0,0 +1,14 @@
<settings xmlns="http://maven.apache.org/SETTINGS/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.1.0 http://maven.apache.org/xsd/settings-1.1.0.xsd">
<proxies>
<proxy>
<active>true</active>
<protocol>http</protocol>
<username></username>
<password></password>
<host></host>
<port>8080</port>
<nonProxyHosts>localhost</nonProxyHosts>
</proxy>
</proxies>
</settings>

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
start-stop-daemon -c monasca-analytics:monasca-analytics -m\
--pidfile /var/run/monasca/analytics/analytics.pid \
--start --exec python /opt/stack/monasca-analytics/run.py -p /opt/spark/spark-1.6.1/ -c /etc/monasca/analytics/markov_source_config.json -l /etc/monasca/analytics/logging.json

View File

@ -0,0 +1,36 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified from http://packages.ubuntu.com/saucy/zookeeperd
NAME=zookeeper
ZOOCFGDIR=/etc/zookeeper/conf
# seems, that log4j requires the log4j.properties file to be in the classpath
CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar"
ZOOCFG="$ZOOCFGDIR/zoo.cfg"
ZOO_LOG_DIR=/var/log/zookeeper
USER=$NAME
GROUP=$NAME
PIDDIR=/var/run/$NAME
PIDFILE=$PIDDIR/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
JAVA=/usr/bin/java
ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
JMXLOCALONLY=false
JAVA_OPTS=""

View File

@ -0,0 +1,69 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# From http://packages.ubuntu.com/saucy/zookeeperd
# ZooKeeper Logging Configuration
#
# Format is "<default threshold> (, <appender>)+
log4j.rootLogger=${zookeeper.root.logger}
# Example: console appender only
# log4j.rootLogger=INFO, CONSOLE
# Example with rolling log file
#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
# Example with rolling log file and tracing
#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
#
# Log INFO level and above messages to the console
#
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=INFO
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
#
# Add ROLLINGFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.ROLLINGFILE.Threshold=WARN
log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log
# Max log file size of 10MB
log4j.appender.ROLLINGFILE.MaxFileSize=10MB
# uncomment the next line to limit number of backup files
#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
#
# Add TRACEFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
log4j.appender.TRACEFILE.Threshold=TRACE
log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log
log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
### Notice we are including log4j's NDC here (%x)
log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

View File

@ -0,0 +1 @@
0

View File

@ -0,0 +1,74 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
dataDir=/var/lib/zookeeper
# Place the dataLogDir to a separate physical disc for better performance
# dataLogDir=/disk2/zookeeper
# the port at which the clients will connect
clientPort=2181
# Maximum number of clients that can connect from one client
maxClientCnxns=60
# specify all zookeeper servers
# The fist port is used by followers to connect to the leader
# The second one is used for leader election
server.0=127.0.0.1:2888:3888
# To avoid seeks ZooKeeper allocates space in the transaction log file in
# blocks of preAllocSize kilobytes. The default block size is 64M. One reason
# for changing the size of the blocks is to reduce the block size if snapshots
# are taken more often. (Also, see snapCount).
#preAllocSize=65536
# Clients can submit requests faster than ZooKeeper can process them,
# especially if there are a lot of clients. To prevent ZooKeeper from running
# out of memory due to queued requests, ZooKeeper will throttle clients so that
# there is no more than globalOutstandingLimit outstanding requests in the
# system. The default limit is 1,000.ZooKeeper logs transactions to a
# transaction log. After snapCount transactions are written to a log file a
# snapshot is started and a new transaction log file is started. The default
# snapCount is 10,000.
#snapCount=1000
# If this option is defined, requests will be will logged to a trace file named
# traceFile.year.month.day.
#traceFile=
# Leader accepts client connections. Default value is "yes". The leader machine
# coordinates updates. For higher update throughput at thes slight expense of
# read throughput the leader can be configured to not accept clients and focus
# on coordination.
#leaderServes=yes
# Autopurge every hour to avoid using lots of disk in bursts
# Order of the next 2 properties matters.
# autopurge.snapRetainCount must be before autopurge.purgeInterval.
autopurge.snapRetainCount=3
autopurge.purgeInterval=1

367
devstack/plugin.sh Executable file
View File

@ -0,0 +1,367 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Monasca-analytics DevStack plugin
#
# Install and start Monasca-analytics service in devstack
#
# To enable Monasca-analytics in devstack add an entry to local.conf that
# looks like
#
# [[local|localrc]]
# enable_plugin monasca-analytics https://git.openstack.org/openstack/monasca-analytics
#
# By default all Monasca services are started (see
# devstack/settings). To disable a specific service use the
# disable_service function. For example to turn off notification:
#
# disable_service monasca-notification
#
# Several variables set in the localrc section adjust common behaviors
# of Monasca (see within for additional settings):
#
# EXAMPLE VARS HERE
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set -o xtrace
ERREXIT=$(set +o | grep errexit)
set -o errexit
# Determine if we are running in devstack-gate or devstack.
if [[ $DEST ]]; then
# We are running in devstack-gate.
export MONASCA_BASE=${MONASCA_BASE:-"${DEST}"}
else
# We are running in devstack.
export MONASCA_BASE=${MONASCA_BASE:-"/opt/stack"}
fi
export MONASCA_ANALYTICS_BASE=${MONASCA_ANALYTICS_BASE:-"${MONASCA_BASE}/monasca-analytics"}
###
function pre_install_spark {
:
}
###
function pre_install_monasca_analytics {
:
}
###
function unstack_monasca_analytics {
echo_summary "Unstack Monasca-analytics"
sudo service monasca-analytics stop || true
delete_monasca_analytics_files
sudo userdel monasca-analytics || true
sudo groupdel monasca-analytics || true
unstack_spark
}
###
function delete_monasca_analytics_files {
sudo rm -rf /opt/monasca/analytics || true
sudo rm -rf /etc/monasca/analytics || true
sudo rm /etc/init/monasca-analytics.conf || true
MONASCA_ANALYTICS_DIRECTORIES=("/var/log/monasca/analytics" "/var/run/monasca/analytics" "/etc/monasca/analytics/init")
for MONASCA_ANALYTICS_DIRECTORY in "${MONASCA_ANALYTICS_DIRECTORIES[@]}"
do
sudo rm -rf ${MONASCA_ANALYTICS_DIRECTORY} || true
done
}
###
function unstack_spark {
echo_summary "Unstack Spark"
delete_spark_directories
sudo rm -rf /opt/spark/download || true
}
###
function clean_monasca_analytics {
set +o errexit
unstack_monasca_analytics
clean_monasca_analytics
set -o errexit
}
###
function delete_spark_directories {
for SPARK_DIRECTORY in "${SPARK_DIRECTORIES[@]}"
do
sudo rm -rf ${SPARK_DIRECTORY} || true
done
sudo rm -rf /var/log/spark-events || true
}
###
function install_monasca_analytics {
echo_summary "Install Monasca-analytics"
sudo groupadd --system monasca-analytics || true
sudo useradd --system -g monasca-analytics monasca-analytics || true
create_monasca_analytics_directories
copy_monasca_analytics_files
}
###
function create_monasca_analytics_directories {
MONASCA_ANALYTICS_DIRECTORIES=("/var/log/monasca/analytics" "/etc/monasca/analytics/init")
for MONASCA_ANALYTICS_DIRECTORY in "${MONASCA_ANALYTICS_DIRECTORIES[@]}"
do
sudo mkdir -p ${MONASCA_ANALYTICS_DIRECTORY}
sudo chown monasca-analytics:monasca-analytics ${MONASCA_ANALYTICS_DIRECTORY}
sudo chmod 755 ${MONASCA_ANALYTICS_DIRECTORY}
done
}
###
function copy_monasca_analytics_files {
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/monasca-analytics/markov_source_config.json /etc/monasca/analytics
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/monasca-analytics/logging.json /etc/monasca/analytics
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/monasca-analytics/monasca_analytics_init.conf /etc/init/monasca-analytics.conf
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/monasca-analytics/start-monasca-analytics.sh /etc/monasca/analytics/init/.
sudo chmod +x /etc/monasca/analytics/init/start-monasca-analytics.sh
}
###
function install_pkg {
## JDK
sudo -E apt-get -y install $JDK_PKG
## SCALA
sudo -E curl $SCALA_URL -o $SPARK_DOWNLOAD/$SCALA
sudo -E dpkg -i $SPARK_DOWNLOAD/$SCALA
echo "deb https://dl.bintray.com/sbt/debian /" | sudo -E tee -a /etc/apt/sources.list.d/sbt.list
sudo -E apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 642AC823
sudo -E apt-get update
sudo -E apt-get -y install sbt
## other pkg
sudo -E apt-get -y install python-setuptools
sudo -E apt-get -y install python-numpy python-scipy ipython
}
###
function build_spark {
## install maven
sudo -E curl $MAVEN_URL -o $SPARK_DOWNLOAD/$MAVEN_TARBAL
sudo chown stack:stack $SPARK_DOWNLOAD/$MAVEN_TARBAL
sudo -u stack tar -xzf $SPARK_DOWNLOAD/$MAVEN_TARBAL -C $SPARK_DIR
if [ ${http_proxy} ];then
read HTTP_PROXY_USER_NAME HTTP_PROXY_PASSWORD HTTP_PROXY_HOST<< END
`echo ${http_proxy:7} | awk -F: '{sub("@", ":");print $1, $2, $3}'`
END
if [ -z $HTTP_PROXY_HOST ];then
LENGTH_FOR_HOST=`expr match "$http_proxy" 'http://[\.A-Za-z\-]*'`-7
sed -e '7,8d' \
-e "s/<host><\/host>/<host>${http_proxy:7:$LENGTH_FOR_HOST}<\/host>/g" \
${MONASCA_ANALYTICS_BASE}/devstack/files/monasca-analytics/settings.xml > ~/.m2/settings.xml
else
sed -e "s/<username><\/username>/<username>${HTTP_PROXY_USER_NAME}<\/username>/g" \
-e "s/<password><\/password>/<password>${HTTP_PROXY_PASSWORD}<\/password>/g" \
-e "s/<host><\/host>/<host>${HTTP_PROXY_HOST}<\/host>/g" \
${MONASCA_ANALYTICS_BASE}/devstack/files/monasca-analytics/settings.xml > ~/.m2/settings.xml
fi
fi
## Build Spark
sudo -E curl $SPARK_URL -o $SPARK_DOWNLOAD/${SPARK_TARBALL_NAME}
sudo chown stack:stack $SPARK_DOWNLOAD/${SPARK_TARBALL_NAME}
sudo -u stack tar -xzf $SPARK_DOWNLOAD/${SPARK_TARBALL_NAME} -C $SPARK_DIR
DEVSTACK_DIR=`pwd`
cd $SPARK_DIR/spark-${SPARK_VERSION}
$SPARK_DIR/$MAVEN/bin/mvn -DskipTests clean package
sudo cp -pf ./conf/log4j.properties.template ./conf/log4j.properties
sudo sed -i 's/log4j.rootCategory=INFO/log4j.rootCategory=ERROR/g' ./conf/log4j.properties
cd $DEVSTACK_DIR
}
###
function install_zookeeper {
if [ ! -e /etc/init.d/zookeeper ]; then
echo_summary "Install Monasca Zookeeper"
sudo apt-get -y install zookeeperd
sudo cp "${MONASCA_ANALYTICS_BASE}"/devstack/files/zookeeper/myid /etc/zookeeper/conf/myid
sudo cp "${MONASCA_ANALYTICS_BASE}"/devstack/files/zookeeper/environment /etc/zookeeper/conf/environment
sudo cp "${MONASCA_ANALYTICS_BASE}"/devstack/files/zookeeper/log4j.properties /etc/zookeeper/conf/log4j.properties
sudo cp "${MONASCA_ANALYTICS_BASE}"/devstack/files/zookeeper/zoo.cfg /etc/zookeeper/conf/zoo.cfg
if [[ ${SERVICE_HOST} ]]; then
sudo sed -i "s/server\.0=127\.0\.0\.1/server.0=${SERVICE_HOST}/g" /etc/zookeeper/conf/zoo.cfg
fi
sudo mkdir -p /var/log/zookeeper || true
sudo chmod 755 /var/log/zookeeper
sudo start zookeeper || sudo restart zookeeper
else
echo_summary "SKIP:Install Monasca Zookeeper"
fi
}
###
function install_kafka {
if [ ! -e /etc/init/kafka.conf ];then
echo_summary "Install Monasca Kafka"
sudo groupadd --system kafka || true
sudo useradd --system -g kafka kafka || true
sudo -E curl $KAFKA_URL -o $SPARK_DOWNLOAD/$KAFKA_TARBALL
sudo tar -xzf $SPARK_DOWNLOAD/$KAFKA_TARBALL -C /opt
sudo ln -sf /opt/kafka_${KAFKA_VERSION} /opt/kafka
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/kafka/kafka-server-start.sh /opt/kafka_${KAFKA_VERSION}/bin/kafka-server-start.sh
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/kafka/kafka.conf /etc/init/kafka.conf
sudo chown root:root /etc/init/kafka.conf
sudo chmod 644 /etc/init/kafka.conf
sudo mkdir -p /var/kafka || true
sudo chown kafka:kafka /var/kafka
sudo chmod 755 /var/kafka
sudo rm -rf /var/kafka/lost+found
sudo mkdir -p /var/log/kafka || true
sudo chown kafka:kafka /var/log/kafka
sudo chmod 755 /var/log/kafka
sudo ln -sf /opt/kafka/config /etc/kafka
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/kafka/log4j.properties /etc/kafka/log4j.properties
sudo chown kafka:kafka /etc/kafka/log4j.properties
sudo chmod 644 /etc/kafka/log4j.properties
sudo cp -f "${MONASCA_ANALYTICS_BASE}"/devstack/files/kafka/server.properties /etc/kafka/server.properties
sudo chown kafka:kafka /etc/kafka/server.properties
sudo chmod 644 /etc/kafka/server.properties
if [[ ${SERVICE_HOST} ]]; then
sudo sed -i "s/host\.name=127\.0\.0\.1/host.name=${SERVICE_HOST}/g" /etc/kafka/server.properties
sudo sed -i "s/zookeeper\.connect=127\.0\.0\.1:2181/zookeeper.connect=${SERVICE_HOST}:2181/g" /etc/kafka/server.properties
fi
sudo start kafka || sudo restart kafka
else
echo_summary "SKIP:Install Monasca Kafka"
fi
}
###
function install_spark {
echo_summary "Install Spark"
sudo mkdir -p $SPARK_DOWNLOAD
sudo chown -R stack:stack $SPARK_DIR
sudo chmod -R 755 $SPARK_DIR
mkdir -p ~/.m2
sudo -E apt-get update
install_pkg
build_spark
install_zookeeper
install_kafka
}
###
function extra_spark {
:
}
###
function post_config_monasca_analytics {
:
}
###
function extra_monasca_analytics {
sudo service monasca-analytics start
}
# check for service enabled
echo_summary "Monasca-analytics plugin with service enabled = `is_service_enabled monasca-analytics`"
if is_service_enabled monasca-analytics; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up system services
echo_summary "Configuring Spark system services"
pre_install_spark
echo_summary "Configuring Monasca-analytics system services"
pre_install_monasca_analytics
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing Spark"
install_spark
echo_summary "Installing Monasca-analytics"
install_monasca_analytics
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring Monasca-analytics"
post_config_monasca_analytics
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize and start the Monasca service
echo_summary "Initializing Monasca-analytics"
extra_monasca_analytics
fi
if [[ "$1" == "unstack" ]]; then
echo_summary "Unstacking Monasca-analytics"
unstack_monasca_analytics
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
echo_summary "Cleaning Monasca-analytics"
clean_monasca_analytics
fi
else
echo_summary "Monasca-analytics not enabled"
fi
#Restore errexit
$ERREXIT
# Restore xtrace
$XTRACE

58
devstack/settings Normal file
View File

@ -0,0 +1,58 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
enable_service monasca-analytics
#
# Monasca infrastructure services
#
# databases
# MySQL is already enabled in devstack
#
# Dependent Software Versions
#
# spark vars
SPARK_DIRECTORIES=("/var/spark" "/var/log/spark" "/var/run/spark/work" "/etc/spark/conf" "/etc/spark/init" )
JDK_PKG="openjdk-7-jre-headless openjdk-7-jdk"
MAVEN="apache-maven-3.3.9"
MAVEN_TARBAL="$MAVEN-bin.tar.gz"
MAVEN_URL="ftp://mirror.reverse.net/pub/apache/maven/maven-3/3.3.9/binaries/$MAVEN_TARBAL"
SCALA_VERSION=${SCALA_VERSION:-2.11}
SCALA_MIN_VERSION=${SCALA_MIN_VERSION:-.7}
SCALA="scala-${SCALA_VERSION}${SCALA_MIN_VERSION}.deb"
SCALA_URL="https://downloads.typesafe.com/scala/${SCALA_VERSION}${SCALA_MIN_VERSION}/$SCALA"
SPARK_DIR="/opt/spark"
SPARK_DOWNLOAD="$SPARK_DIR/download"
SPARK_VERSION=${SPARK_VERSION:-1.6.1}
SPARK_TARBALL_NAME="spark-${SPARK_VERSION}.tgz"
SPARK_URL="http://apache.claz.org/spark/spark-$SPARK_VERSION/$SPARK_TARBALL_NAME"
BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-0.9.0.0}
KAFKA_DIR="/opt/kafka"
KAFKA_DOWNLOAD="$KAFKA_DIR/download"
KAFKA_VERSION=${KAFKA_VERSION:-${SCALA_VERSION}-${BASE_KAFKA_VERSION}}
KAFKA_TARBALL="kafka_$KAFKA_VERSION.tgz"
KAFKA_URL="http://ftp.riken.jp/net/apache/kafka/$BASE_KAFKA_VERSION/$KAFKA_TARBALL"
#KAFKA_URL="http://apache.arvixe.com/kafka/$BASE_KAFKA_VERSION/$KAFKA_TARBALL"
KAFKA_URL="http://ftp.riken.jp/net/apache/kafka/$BASE_KAFKA_VERSION/$KAFKA_TARBALL"

View File

@ -9,7 +9,7 @@ setenv =
PYTHONUNBUFFERED=1
VIRTUAL_ENV={envdir}
DISCOVER_DIRECTORY=tests
PYTHONPATH=/home/vagrant/spark/python:/home/vagrant/spark/python/lib/py4j-0.9-src.zip:
PYTHONPATH=/opt/spark/spark-1.6.1/python:/opt/spark/spark-1.6.1/python/lib/py4j-0.9-src.zip:
install_command = pip install -U {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt