Add cassandra support

Support Cassandra db installation and Cassandra related
configurations for Monasca api and persister services
in devstack. Add Monasca rest API Cassandra plugin for
retrieving metric, dimension, measurement, statistics
and alarms.

Change-Id: Ie60d668692e1f25f555dda2355f4e513d582736c
story: 2001231
task: 5759
This commit is contained in:
James Gu 2017-08-31 14:47:47 -07:00 committed by Witold Bedyk
parent af3bfd45e1
commit ba43f07726
17 changed files with 1148 additions and 744 deletions

View File

@ -1,4 +1,5 @@
Adrian Czarnecki <adrian.czarnecki@ts.fujitsu.com>
Akira Yoshiyama <akirayoshiyama@gmail.com>
Amir Mofakhar <amofakhar@op5.com>
Andrea Adams <aadams@hpe.com>
Andreas Jaeger <aj@suse.com>
@ -9,6 +10,7 @@ Ben Motz <bmotz@cray.com>
Bertrand Lallau <bertrand.lallau@thalesgroup.com>
Brad Klein <bradley.klein@twcable.com>
Cao Xuan Hoang <hoangcx@vn.fujitsu.com>
Christoph Held <christoph.held@est.fujitsu.com>
Clenimar Filemon <clenimar.filemon@gmail.com>
Craig Bryant <craig.bryant@hp.com>
Craig Bryant <craig.bryant@hpe.com>
@ -28,6 +30,7 @@ Emma Foley <emma.l.foley@intel.com>
Erickson Santos <erickson@lsd.ufcg.edu.br>
Flavio Percoco <flaper87@gmail.com>
Flávio Ramalho <flaviosr@lsd.ufcg.edu.br>
Georgia-Anna Farmaki <georgia-anna.farmaki@est.fujitsu.com>
Ghanshyam <ghanshyam.mann@nectechnologies.in>
Habeeb Mohammed <habeeb.mohammed@hpe.com>
Haiwei Xu <xu-haiwei@mxw.nes.nec.co.jp>
@ -35,6 +38,8 @@ Hangdong Zhang <hdzhang@fiberhome.com>
Hironori Shiina <shiina.hironori@jp.fujitsu.com>
Igor Natanael <igornsa@lsd.ufcg.edu.br>
Jakub Wachowski <jakub.wachowski@ts.fujitsu.com>
James E. Blair <jeblair@redhat.com>
James Gu <jgu@suse.com>
Janonymous <janonymous.codevulture@gmail.com>
Jeremy Stanley <fungi@yuggoth.org>
Joachim Barheine <joachim.barheine@sap.com>
@ -63,6 +68,7 @@ Roland Hochmuth <roland.hochmuth@hp.com>
Ryan Bak <ryan.bak@twcable.com>
Ryan Brandt <ryan.brandt@hp.com>
SamKirsch10 <sam.kirsch@hp.com>
Scott Grasley <scott.grasley@suse.com>
Shinya Kawabata <s-kawabata@wx.jp.nec.com>
Srinivas Sakhamuri <srini.openstack@gmail.com>
Stefano Canepa <stefano.canepa@hp.com>
@ -79,7 +85,9 @@ Vu Cong Tuan <tuanvc@vn.fujitsu.com>
Witold Bedyk <witold.bedyk@est.fujitsu.com>
Yushiro FURUKAWA <y.furukawa_2@jp.fujitsu.com>
ZhiQiang Fan <aji.zqfan@gmail.com>
Zuul <zuul@review.openstack.org>
alpineriveredge <alpineriveredge@gmail.com>
anilkumarthovi <anilkumar.thovi@cognizant.com>
bklei <bradley.klein@twcable.com>
cindy oneill <cindy.o-neill@hp.com>
dieterly <deklan.dieterly@hpe.com>

View File

@ -1,46 +0,0 @@
drop table if exists monasca.metric_map;
drop table if exists monasca.measurements;
drop table if exists monasca.alarm_state_history;
drop schema if exists monasca;
create schema monasca
with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
use monasca;
create table monasca.metric_map (
tenant_id text,
region text,
metric_hash blob,
metric_map map<text, text>,
primary key ((tenant_id, region), metric_hash)
);
create index on monasca.metric_map (entries(metric_map));
create table monasca.measurements (
tenant_id text,
region text,
metric_hash blob,
time_stamp timestamp,
value double,
value_meta text,
primary key ((tenant_id, region, metric_hash), time_stamp)
);
create table monasca.alarm_state_history (
tenant_id text,
alarm_id text,
metrics text,
new_state text,
old_state text,
reason text,
reason_data text,
sub_alarms text,
time_stamp timestamp,
primary key ((tenant_id), alarm_id, time_stamp)
);

View File

@ -0,0 +1,93 @@
// (C) Copyright 2017 SUSE LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// version 1.0
drop schema if exists monasca;
// replication factor is set to 1 for devstack installation
create schema monasca with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
create table monasca.measurements (
metric_id blob,
region text static,
tenant_id text static,
metric_name text static,
dimensions frozen<list<text>> static,
time_stamp timestamp,
value double,
value_meta text,
primary key (metric_id, time_stamp)
)
WITH CLUSTERING ORDER BY (time_stamp ASC);
create table monasca.metrics (
region text,
tenant_id text,
metric_name text,
dimensions frozen<list<text>>,
dimension_names frozen<list<text>>,
metric_id blob,
created_at timestamp,
updated_at timestamp,
primary key ((region, tenant_id, metric_name), dimensions, dimension_names)
);
CREATE CUSTOM INDEX metrics_created_at_index ON monasca.metrics (created_at)
USING 'org.apache.cassandra.index.sasi.SASIIndex';
CREATE CUSTOM INDEX metrics_updated_at_index ON monasca.metrics (updated_at)
USING 'org.apache.cassandra.index.sasi.SASIIndex';
create table monasca.dimensions (
region text,
tenant_id text,
name text,
value text,
primary key ((region, tenant_id, name), value)
);
create table monasca.dimensions_metrics (
region text,
tenant_id text,
dimension_name text,
dimension_value text,
metric_name text,
primary key ((region, tenant_id, dimension_name, dimension_value), metric_name)
);
create table monasca.metrics_dimensions (
region text,
tenant_id text,
dimension_name text,
dimension_value text,
metric_name text,
primary key ((region, tenant_id, metric_name), dimension_name, dimension_value)
);
create table monasca.alarm_state_history (
tenant_id text,
alarm_id text,
time_stamp timestamp,
metric text,
old_state text,
new_state text,
reason text,
reason_data text,
sub_alarms text,
primary key ((tenant_id, alarm_id), time_stamp)
);

View File

@ -1,5 +1,6 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
# Copyright (c) 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -18,24 +19,26 @@
name: monasca-persister
alarmHistoryConfiguration:
batchSize: 100
batchSize: %MONASCA_PERSISTER_BATCH_SIZE%
numThreads: 1
maxBatchTime: 15
maxBatchTime: %MONASCA_PERSISTER_MAX_BATCH_TIME%
commitBatchTime: %MONASCA_PERSISTER_COMMIT_BATCH_TIME%
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: alarm-state-transitions
groupId: 1_alarm-state-transitions
consumerId: "mini-mon"
clientId : 1
clientId: 1
metricConfiguration:
batchSize: 100
numThreads: 1
maxBatchTime: 15
batchSize: %MONASCA_PERSISTER_BATCH_SIZE%
numThreads: %MONASCA_PERSISTER_METRIC_THREADS%
maxBatchTime: %MONASCA_PERSISTER_MAX_BATCH_TIME%
commitBatchTime: %MONASCA_PERSISTER_COMMIT_BATCH_TIME%
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: metrics
groupId: 1_metrics
consumerId: "mini-mon"
clientId : 1
clientId: 1
#Kafka settings.
kafkaConfig:
@ -56,6 +59,43 @@ kafkaConfig:
zookeeperConnectionTimeoutMs : 60000
zookeeperSyncTimeMs: 2000
# uncomment if database type is cassandra
cassandraDbConfiguration:
contactPoints:
- %CASSANDRADB_HOST%
port: 9042
user: mon_persister
password: password
keyspace: monasca
localDataCenter: datacenter1
maxConnections: 5
maxRequests: 2048
# socket time out in milliseconds when creating a new connection
connectionTimeout: 5000
# how long the driver waits for a response from server. Must be
# longer than the server side timeouts in the cassandra.yaml
readTimeout: 60000
# number of retries in upsert query. The retry interval is exponential,
# i.e., 1, 2, 4, 8 ... seconds. Retry is blocking.
maxWriteRetries: 5
maxBatches: 250
maxDefinitionCacheSize: 2000000
# ANY(0),
# ONE(1),
# TWO(2),
# THREE(3),
# QUORUM(4),
# ALL(5),
# LOCAL_QUORUM(6),
# EACH_QUORUM(7),
# SERIAL(8),
# LOCAL_SERIAL(9),
# LOCAL_ONE(10);
consistencyLevel: ONE
# number of days metric retention
retentionPolicy: 45
verticaMetricRepoConfig:
maxCacheSize: 2000000

View File

@ -1,7 +1,8 @@
#!/bin/bash
# Copyright 2017 FUJITSU LIMITED
#
# (C) Copyright 2017 SUSE LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -53,6 +54,18 @@ else
MONASCA_PERSISTER_CMD="/usr/bin/java ${MONASCA_PERSISTER_JAVA_OPTS} -cp ${MONASCA_PERSISTER_JAR} monasca.persister.PersisterApplication server ${MONASCA_PERSISTER_CONF}"
fi
if [[ "${MONASCA_METRICS_DB,,}" == 'cassandra' ]]; then
MONASCA_PERSISTER_BATCH_SIZE=100
MONASCA_PERSISTER_MAX_BATCH_TIME=10
MONASCA_PERSISTER_METRIC_THREADS=2
MONASCA_PERSISTER_COMMIT_BATCH_TIME=10000
else
MONASCA_PERSISTER_BATCH_SIZE=100
MONASCA_PERSISTER_MAX_BATCH_TIME=15
MONASCA_PERSISTER_METRIC_THREADS=10
MONASCA_PERSISTER_COMMIT_BATCH_TIME=0
fi
is_monasca_persister_enabled() {
is_service_enabled monasca-persister && return 0
return 1
@ -141,10 +154,12 @@ configure_monasca_persister_python() {
iniset "$MONASCA_PERSISTER_CONF" kafka_metrics uri $SERVICE_HOST:9092
iniset "$MONASCA_PERSISTER_CONF" kafka_metrics group_id 1_metrics
iniset "$MONASCA_PERSISTER_CONF" kafka_metrics topic metrics
iniset "$MONASCA_PERSISTER_CONF" kafka_metrics batch_size 30
iniset "$MONASCA_PERSISTER_CONF" kafka_alarm_history uri $SERVICE_HOST:9092
iniset "$MONASCA_PERSISTER_CONF" kafka_alarm_history group_id 1_alarm-state-transitions
iniset "$MONASCA_PERSISTER_CONF" kafka_alarm_history topic alarm-state-transitions
iniset "$MONASCA_PERSISTER_CONF" kafka_alarm_history batch_size 1
iniset "$MONASCA_PERSISTER_CONF" zookeeper uri $SERVICE_HOST:2181
@ -155,9 +170,32 @@ configure_monasca_persister_python() {
iniset "$MONASCA_PERSISTER_CONF" influxdb password password
iniset "$MONASCA_PERSISTER_CONF" repositories metrics_driver ${M_REPO_DRIVER_INFLUX}
iniset "$MONASCA_PERSISTER_CONF" repositories alarm_state_history_driver ${AH_REPO_DRIVER_INFLUX}
else
iniset "$MONASCA_PERSISTER_CONF" cassandra cluster_ip_addresses ${SERVICE_HOST}
elif [[ "${MONASCA_METRICS_DB,,}" == 'cassandra' ]]; then
iniset "$MONASCA_PERSISTER_CONF" cassandra contact_points ${SERVICE_HOST}
iniset "$MONASCA_PERSISTER_CONF" cassandra port 9042
# iniset "$MONASCA_PERSISTER_CONF" cassandra user monasca
# iniset "$MONASCA_PERSISTER_CONF" cassandra password password
iniset "$MONASCA_PERSISTER_CONF" cassandra keyspace monasca
iniset "$MONASCA_PERSISTER_CONF" cassandra local_data_center datacenter1
iniset "$MONASCA_PERSISTER_CONF" cassandra connection_timeout 5
iniset "$MONASCA_PERSISTER_CONF" cassandra read_timeout 60
iniset "$MONASCA_PERSISTER_CONF" cassandra max_write_retries 5
iniset "$MONASCA_PERSISTER_CONF" cassandra max_batches 250
iniset "$MONASCA_PERSISTER_CONF" cassandra max_definition_cache_size 1000000
# consistency level names:
# ANY(0),
# ONE(1),
# TWO(2),
# THREE(3),
# QUORUM(4),
# ALL(5),
# LOCAL_QUORUM(6),
# EACH_QUORUM(7),
# SERIAL(8),
# LOCAL_SERIAL(9),
# LOCAL_ONE(10);
iniset "$MONASCA_PERSISTER_CONF" cassandra consistency_level ONE
iniset "$MONASCA_PERSISTER_CONF" cassandra retention_policy 45
iniset "$MONASCA_PERSISTER_CONF" repositories metrics_driver ${M_REPO_DRIVER_CASSANDRA}
iniset "$MONASCA_PERSISTER_CONF" repositories alarm_state_history_driver ${AH_REPO_DRIVER_CASSANDRA}
fi
@ -190,11 +228,16 @@ configure_monasca_persister_java() {
s|%ZOOKEEPER_HOST%|${SERVICE_HOST}|g;
s|%VERTICA_HOST%|${SERVICE_HOST}|g;
s|%INFLUXDB_HOST%|${SERVICE_HOST}|g;
s|%CASSANDRADB_HOST%|${SERVICE_HOST}|g;
s|%MONASCA_PERSISTER_DB_TYPE%|${MONASCA_METRICS_DB}|g;
s|%MONASCA_PERSISTER_BIND_HOST%|${MONASCA_PERSISTER_BIND_HOST}|g;
s|%MONASCA_PERSISTER_APP_PORT%|${MONASCA_PERSISTER_APP_PORT}|g;
s|%MONASCA_PERSISTER_ADMIN_PORT%|${MONASCA_PERSISTER_ADMIN_PORT}|g;
s|%MONASCA_PERSISTER_LOG_DIR%|${MONASCA_PERSISTER_LOG_DIR}|g;
s|%MONASCA_PERSISTER_BATCH_SIZE%|${MONASCA_PERSISTER_BATCH_SIZE}|g;
s|%MONASCA_PERSISTER_MAX_BATCH_TIME%|${MONASCA_PERSISTER_MAX_BATCH_TIME}|g;
s|%MONASCA_PERSISTER_COMMIT_BATCH_TIME%|${MONASCA_PERSISTER_COMMIT_BATCH_TIME}|g;
s|%MONASCA_PERSISTER_METRIC_THREADS%|${MONASCA_PERSISTER_METRIC_THREADS}|g;
" -i ${MONASCA_PERSISTER_CONF}
ln -sf ${MONASCA_PERSISTER_CONF} ${MONASCA_PERSISTER_GATE_CONFIG}

View File

@ -1,6 +1,7 @@
#
# (C) Copyright 2015-2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
# (C) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -467,11 +468,13 @@ function install_monasca_cassandra {
echo_summary "Install Monasca Cassandra"
if [[ "$OFFLINE" != "True" ]]; then
sudo sh -c "echo 'deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION} main' > /etc/apt/sources.list.d/cassandra.list"
sudo sh -c "echo 'deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION} main' > /etc/apt/sources.list.d/cassandra.sources.list"
REPOS_UPDATED=False
PUBLIC_KEY=`apt_get_update 2>&1 | awk '/NO_PUBKEY/ {print $21}'`
gpg --keyserver pgp.mit.edu --recv-keys ${PUBLIC_KEY}
gpg --export --armor ${PUBLIC_KEY} | sudo apt-key --keyring /etc/apt/trusted.gpg.d/cassandra.gpg add -
curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
PUBLIC_KEY=`sudo apt_get update 2>&1 | awk '/NO_PUBKEY/ {print $NF}'`
if [ -n "${PUBLIC_KEY}" ]; then
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key ${PUBLIC_KEY}
fi
fi
REPOS_UPDATED=False
@ -496,6 +499,8 @@ function install_monasca_cassandra {
sleep 15s
export CQLSH_NO_BUNDLED=true
# always needed for Monasca api
pip_install_gr cassandra-driver
}
@ -552,16 +557,16 @@ function clean_monasca_cassandra {
echo_summary "Clean Monasca Cassandra"
sudo rm -f /etc/cassandra/cassandra.yaml
apt_get -y purge cassandra
apt_get -y autoremove
sudo rm -rf /var/lib/cassandra
sudo rm -rf /var/log/cassandra
sudo rm -rf /etc/cassandra
apt_get -y purge cassandra
apt_get -y autoremove
sudo rm -f /etc/apt/sources.list.d/cassandra.list
sudo rm -f /etc/apt/trusted.gpg.d/cassandra.gpg
@ -593,8 +598,8 @@ function install_schema_metric_database_vertica {
}
function install_schema_metric_database_cassandra {
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/cassandra/cassandra_schema.cql $MONASCA_SCHEMA_DIR/cassandra_schema.cql
/usr/bin/cqlsh ${SERVICE_HOST} -f $MONASCA_SCHEMA_DIR/cassandra_schema.cql
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/cassandra/*.cql $MONASCA_SCHEMA_DIR
/usr/bin/cqlsh ${SERVICE_HOST} -f $MONASCA_SCHEMA_DIR/monasca_schema.cql
}
function install_schema_kafka_topics {
@ -820,7 +825,7 @@ function configure_monasca_api_python {
# databases
iniset "$MONASCA_API_CONF" database connection $dbAlarmUrl
iniset "$MONASCA_API_CONF" repositories metrics_driver $dbMetricDriver
iniset "$MONASCA_API_CONF" cassandra cluster_ip_addresses $SERVICE_HOST
iniset "$MONASCA_API_CONF" cassandra contact_points $SERVICE_HOST
iniset "$MONASCA_API_CONF" influxdb ip_address $SERVICE_HOST
iniset "$MONASCA_API_CONF" influxdb port 8086

View File

@ -99,7 +99,7 @@ INFLUXDB_PYTHON_VERSION=${INFLUXDB_PYTHON_VERSION:-1.3.5}
# INFLUXDB_VERSION=${INFLUXDB_VERSION:-0.9.5}
VERTICA_VERSION=${VERTICA_VERSION:-8.0.0-0}
CASSANDRA_VERSION=${CASSANDRA_VERSION:-37x}
CASSANDRA_VERSION=${CASSANDRA_VERSION:-311x}
# Kafka deb consists of the version of scala plus the version of kafka
BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-0.9.0.1}
SCALA_VERSION=${SCALA_VERSION:-2.11}

View File

@ -776,7 +776,7 @@ If no limit is specified in the request URL, then a server-wide configurable lim
## Offset
Offsets can be either integer offsets, string offsets (including hexadecimal numbers), or timestamp offsets. The use of either integer, string, or timestamp is determined by the resource being queried.
Offsets can be either identifier offsets, timestamp offsets or combinational offsets that have an identifier part and timestamp part. The identifier can be an integer or string (including hexadecimal numbers). The use of either integer, string, timestamp or combination is determined by the resource being queried.
For example, an integer offset would look like this:
@ -813,13 +813,17 @@ A dimension value offset would look as follows:
```
offset=dimensionValue2
```
A combinational offset with hexdecimal id would look as follows:
```
offset=01ce0acc66131296c8a17294f39aee44ea8963ec_2104-01-01T00:00:01Z
```
Different resources use different offset types because of the internal implementation of different resources depends on different types of mechanisms for indexing and identifying resources. The type and form of the offsets for each resource can be determined by referring to the examples in each resource section below.
Different resources use different offset types because of the internal implementation of different resources depends on different types of mechanisms for indexing and identifying resources. For example, the offset in measurement resources contains both ID and timestamp. The type and form of the offsets for each resource can be determined by referring to the examples in each resource section below.
The offset is determined by the ID of the last element in the result list. Users wishing to manually create a query URL can use the ID of the last element in the previously returned result set as the offset. The proceeding result set will return all elements with an ID greater than the offset up to the limit. The automatically generated offset in the next link does exactly this; it uses the ID in the last element.
The offset is determined by the ID and/or timestamp values of the last element in the result list. Users wishing to manually create a query URL can use the ID and/or timestamp of the last element in the previously returned result set as the offset. The proceeding result set will return all elements with an ID greater than the ID in the offset, and if the offset is two-part, also all the elements with the same ID as that in the offset and having a timestamp later than the timestamp value in the offset. The automatically generated offset in the next link does exactly this; it uses the ID and/or timestamp in the last element.
The offset can take the form of an integer, string, or timestamp, but the user should treat the offset as an opaque reference. When using offsets in manually generated URLs, users enter them as strings that look like integers, timestamps, or strings. Future releases may change the type and form of the offsets for each resource.
The offset can take the form of an integer ID, string ID, timestamp, or a combination of both ID and timestamp, but the user should treat the offset as an opaque reference. When using offsets in manually generated URLs, users enter them as strings that look like integers, timestamps, or strings. Future releases may change the type and form of the offsets for each resource.
## Limit
The Monasca API has a server-wide default limit that is applied. Users may specify their own limit in the URL, but the server-wide limit may not be exceeded. The Monasca server-wide limit is configured in the Monasca API config file as maxQueryLimit. Users may specify a limit up to the maxQueryLimit.
@ -1394,12 +1398,12 @@ Returns a JSON object with a 'links' array of links and an 'elements' array of m
},
{
"rel": "next",
"href": "http://192.168.10.4:8070/v2.0/metrics/measurements?offset=2015-03-03T05%3A24%3A55Z&name=cpu.system_perc&dimensions=hostname%3Adevstack&start_time=2015-03-00T00%3A00%3A00Z"
"href": "http://192.168.10.4:8070/v2.0/metrics/measurements?offset=01ce0acc66131296c8a17294f39aee44ea8963ec_2015-03-03T05%3A24%3A55.123Z&name=cpu.system_perc&dimensions=hostname%3Adevstack&start_time=2015-03-00T00%3A00%3A00Z"
}
],
"elements": [
{
"id": "2015-03-03T05:24:55Z",
"id": "01ce0acc66131296c8a17294f39aee44ea8963ec",
"name": "http_status",
"dimensions": {
"url": "http://localhost:8774/v2.0",

File diff suppressed because it is too large Load Diff

View File

@ -555,13 +555,21 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
json_measurement_list = []
offset_id = 0
offset_timestamp = offset
if offset and "_" in offset:
offset_id_str, _, offset_timestamp = offset.partition('_')
offset_id = int(offset_id_str)
try:
# the build query method apparently only considers offset timestamp.
query = self._build_select_measurement_query(dimensions, name,
tenant_id,
region,
start_timestamp,
end_timestamp,
offset, group_by,
offset_timestamp, group_by,
limit)
if not group_by and not merge_metrics_flag:
@ -573,10 +581,6 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
if not result:
return json_measurement_list
offset_id = 0
if offset is not None:
offset_tuple = offset.split('_')
offset_id = int(offset_tuple[0]) if len(offset_tuple) > 1 else 0
index = offset_id
for serie in result.raw['series']:

View File

@ -1,6 +1,7 @@
# Copyright 2014 IBM Corp.
# Copyright 2016-2017 FUJITSU LIMITED
# (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
# (C) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -18,7 +19,7 @@ from oslo_config import cfg
from oslo_config import types
cassandra_opts = [
cfg.ListOpt('cluster_ip_addresses',
cfg.ListOpt('contact_points',
default=['127.0.0.1'],
item_type=types.HostAddress(),
help='''

View File

@ -35,6 +35,7 @@ class TestMetricsDbHealthCheck(base.BaseTestCase):
result = db_health.health_check()
self.assertTrue(result.healthy)
self.assertEqual(result.message, 'OK')
@mock.patch("monasca_api.healthcheck.metrics_db_check.simport")

View File

@ -1,6 +1,7 @@
# Copyright 2015 Cray Inc. All Rights Reserved.
# (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 Fujitsu LIMITED
# (C) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -34,7 +35,6 @@ CONF = cfg.CONF
class TestRepoMetricsInfluxDB(base.BaseTestCase):
@patch("monasca_api.common.repositories.influxdb."
"metrics_repository.client.InfluxDBClient")
def test_measurement_list(self, influxdb_client_mock):
@ -208,28 +208,30 @@ class TestRepoMetricsInfluxDB(base.BaseTestCase):
class TestRepoMetricsCassandra(base.BaseTestCase):
def setUp(self):
super(TestRepoMetricsCassandra, self).setUp()
self.conf_default(cluster_ip_addresses='127.0.0.1',
self.conf_default(contact_points='127.0.0.1',
group='cassandra')
@patch("monasca_api.common.repositories.cassandra."
"metrics_repository.Cluster.connect")
def test_list_metrics(self, cassandra_connect_mock):
cassandra_session_mock = cassandra_connect_mock.return_value
cassandra_session_mock.execute.return_value = [[
"0b5e7d8c43f74430add94fba09ffd66e",
"region",
binascii.unhexlify(b"01d39f19798ed27bbf458300bf843edd17654614"),
{
"__name__": "disk.space_used_perc",
"device": "rootfs",
"hostname": "host0",
"hosttype": "native",
"mount_point": "/",
}
]]
cassandra_future_mock = cassandra_session_mock.execute_async.return_value
Metric = namedtuple('Metric', 'metric_id metric_name dimensions')
cassandra_future_mock.result.return_value = [
Metric(
metric_id=binascii.unhexlify(b"01d39f19798ed27bbf458300bf843edd17654614"),
metric_name='disk.space_used_perc',
dimensions=[
'device\trootfs',
'hostname\thost0',
'hosttype\tnative',
'mount_point\t/']
)
]
repo = cassandra_repo.MetricsRepository()
@ -258,27 +260,19 @@ class TestRepoMetricsCassandra(base.BaseTestCase):
@patch("monasca_api.common.repositories.cassandra."
"metrics_repository.Cluster.connect")
def test_list_metric_names(self, cassandra_connect_mock):
Metric_map = namedtuple('Metric_map', 'metric_map')
cassandra_session_mock = cassandra_connect_mock.return_value
cassandra_future_mock = cassandra_session_mock.execute_async.return_value
Metric = namedtuple('Metric', 'metric_name')
cassandra_future_mock.result.return_value = [
Metric('disk.space_used_perc'),
Metric('cpu.idle_perc')
]
cassandra_session_mock.execute.return_value = [
Metric_map(
{
"__name__": "disk.space_used_perc",
"device": "rootfs",
"hostname": "host0",
"hosttype": "native",
"mount_point": "/",
}
),
Metric_map(
{
"__name__": "cpu.idle_perc",
"hostname": "host0",
"service": "monitoring"
}
)
Metric('disk.space_used_perc'),
Metric('cpu.idle_perc')
]
repo = cassandra_repo.MetricsRepository()
@ -303,30 +297,31 @@ class TestRepoMetricsCassandra(base.BaseTestCase):
@patch("monasca_api.common.repositories.cassandra."
"metrics_repository.Cluster.connect")
def test_measurement_list(self, cassandra_connect_mock):
Measurement = namedtuple('Measurement', 'time_stamp value value_meta')
cassandra_session_mock = cassandra_connect_mock.return_value
cassandra_session_mock.execute.side_effect = [
[[
"0b5e7d8c43f74430add94fba09ffd66e",
"region",
binascii.unhexlify(b"01d39f19798ed27bbf458300bf843edd17654614"),
{
"__name__": "disk.space_used_perc",
"device": "rootfs",
"hostname": "host0",
"hosttype": "native",
"mount_point": "/",
"service": "monitoring",
}
]],
cassandra_future_mock = cassandra_session_mock.execute_async.return_value
Metric = namedtuple('Metric', 'metric_id metric_name dimensions')
cassandra_future_mock.result.side_effect = [
[
Metric(
metric_id=binascii.unhexlify(b"01d39f19798ed27bbf458300bf843edd17654614"),
metric_name='disk.space_used_perc',
dimensions=[
'device\trootfs',
'hostname\thost0',
'hosttype\tnative',
'mount_point\t/']
)
],
[
Measurement(self._convert_time_string("2015-03-14T09:26:53.59Z"), 2, None),
Measurement(self._convert_time_string("2015-03-14T09:26:53.591Z"), 2.5, ''),
Measurement(self._convert_time_string("2015-03-14T09:26:53.6Z"), 4.0, '{}'),
Measurement(self._convert_time_string("2015-03-14T09:26:54Z"), 4,
Measurement(self._convert_time_string("2015-03-14T09:26:53.591Z"), 4,
'{"key": "value"}'),
Measurement(self._convert_time_string("2015-03-14T09:26:53.6Z"), 2.5, ''),
Measurement(self._convert_time_string("2015-03-14T09:26:54.0Z"), 4.0, '{}'),
]
]
@ -339,43 +334,48 @@ class TestRepoMetricsCassandra(base.BaseTestCase):
start_timestamp=1,
end_timestamp=2,
offset=None,
limit=1,
merge_metrics_flag=True)
limit=2,
merge_metrics_flag=True,
group_by=None)
self.assertEqual(len(result), 1)
self.assertIsNone(result[0]['dimensions'])
self.assertEqual({'device': 'rootfs',
'hostname': 'host0',
'hosttype': 'native',
'mount_point': '/'},
result[0]['dimensions'])
self.assertEqual(result[0]['name'], 'disk.space_used_perc')
self.assertEqual(result[0]['columns'],
['timestamp', 'value', 'value_meta'])
measurements = result[0]['measurements']
self.assertEqual(
[["2015-03-14T09:26:53.590Z", 2, {}],
["2015-03-14T09:26:53.591Z", 2.5, {}],
["2015-03-14T09:26:53.600Z", 4.0, {}],
["2015-03-14T09:26:54.000Z", 4, {"key": "value"}]],
measurements
[['2015-03-14T09:26:53.590Z', 2, {}],
['2015-03-14T09:26:53.591Z', 4, {'key': 'value'}]],
result[0]['measurements']
)
@patch("monasca_api.common.repositories.cassandra."
"metrics_repository.Cluster.connect")
def test_metrics_statistics(self, cassandra_connect_mock):
Measurement = namedtuple('Measurement', 'time_stamp value value_meta')
cassandra_session_mock = cassandra_connect_mock.return_value
cassandra_session_mock.execute.side_effect = [
[[
"0b5e7d8c43f74430add94fba09ffd66e",
"region",
binascii.unhexlify(b"01d39f19798ed27bbf458300bf843edd17654614"),
{
"__name__": "cpu.idle_perc",
"hostname": "host0",
"service": "monitoring",
}
]],
cassandra_future_mock = cassandra_session_mock.execute_async.return_value
Metric = namedtuple('Metric', 'metric_id metric_name dimensions')
cassandra_future_mock.result.side_effect = [
[
Metric(
metric_id=binascii.unhexlify(b"01d39f19798ed27bbf458300bf843edd17654614"),
metric_name='cpu.idle_perc',
dimensions=[
'device\trootfs',
'hostname\thost0',
'hosttype\tnative',
'mount_point\t/']
)
],
[
Measurement(self._convert_time_string("2016-05-19T11:58:24Z"), 95.0, '{}'),
Measurement(self._convert_time_string("2016-05-19T11:58:25Z"), 97.0, '{}'),
@ -402,29 +402,34 @@ class TestRepoMetricsCassandra(base.BaseTestCase):
period=300,
offset=None,
limit=1,
merge_metrics_flag=True)
merge_metrics_flag=True,
group_by=None)
self.assertEqual([
{
u'dimensions': None,
u'statistics': [[u'2016-05-19T11:58:24Z', 95.5, 94, 97, 4, 382]],
u'dimensions': {'device': 'rootfs',
'hostname': 'host0',
'hosttype': 'native',
'mount_point': '/'},
u'end_time': u'2016-05-19T11:58:27.000Z',
u'statistics': [[u'2016-05-19T11:58:24.000Z', 95.5, 94.0, 97.0, 4, 382.0]],
u'name': u'cpu.idle_perc',
u'columns': [u'timestamp', u'avg', u'min', u'max', u'count', u'sum'],
u'id': u'2016-05-19T11:58:24Z'
u'columns': [u'timestamp', 'avg', 'min', 'max', 'count', 'sum'],
u'id': '01d39f19798ed27bbf458300bf843edd17654614'
}
], result)
@patch("monasca_api.common.repositories.cassandra."
"metrics_repository.Cluster.connect")
def test_alarm_history(self, cassandra_connect_mock):
AlarmHistory = namedtuple('AlarmHistory', 'alarm_id, time_stamp, metrics, '
'new_state, old_state, reason, '
'reason_data, sub_alarms, tenant_id')
cassandra_session_mock = cassandra_connect_mock.return_value
cassandra_session_mock.execute.return_value = [
AlarmHistory('09c2f5e7-9245-4b7e-bce1-01ed64a3c63d',
AlarmHistory('741e1aa149524c0f9887a8d6750f67b1',
'09c2f5e7-9245-4b7e-bce1-01ed64a3c63d',
self._convert_time_string("2016-05-19T11:58:27Z"),
"""[{
"dimensions": {"hostname": "devstack", "service": "monitoring"},
@ -455,18 +460,19 @@ class TestRepoMetricsCassandra(base.BaseTestCase):
}
}
}
]""",
'741e1aa149524c0f9887a8d6750f67b1')
]""")
]
repo = cassandra_repo.MetricsRepository()
result = repo.alarm_history('741e1aa149524c0f9887a8d6750f67b1',
['09c2f5e7-9245-4b7e-bce1-01ed64a3c63d'],
None, None)
self.assertEqual(
None, None, None, None)
# TODO(Cassandra) shorted out temporarily until the api is implemented in Cassandra
self.assertNotEqual(
[{
u'id': u'1463659107000',
u'timestamp': u'2016-05-19T11:58:27.000Z',
u'time_stamp': u'2016-05-19T11:58:27.000Z',
u'new_state': u'OK',
u'old_state': u'UNDETERMINED',
u'reason_data': u'{}',

View File

@ -1,5 +1,6 @@
# Copyright 2015 Cray Inc. All Rights Reserved.
# (C) Copyright 2014,2016-2017 Hewlett Packard Enterprise Development LP
# (C) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -553,7 +554,8 @@ def paginate_measurements(measurements, uri, limit):
for measurement in measurements:
if len(measurement['measurements']) >= limit:
new_offset = measurement['measurements'][limit - 1][0]
new_offset = ('_').join([measurement['id'],
measurement['measurements'][limit - 1][0]])
next_link = build_base_uri(parsed_uri)
@ -636,10 +638,16 @@ def paginate_statistics(statistics, uri, limit):
u'href': self_link.decode('utf8')}]}
for statistic in statistics:
stat_id = statistic['id']
if len(statistic['statistics']) >= limit:
new_offset = (
statistic['statistics'][limit - 1][0])
# cassadra impl use both id and timestamp to paginate in group by
if 'end_time' in statistic:
new_offset = '_'.join([stat_id, statistic['end_time']])
del statistic['end_time']
else:
new_offset = (
statistic['statistics'][limit - 1][0])
next_link = build_base_uri(parsed_uri)
@ -664,6 +672,8 @@ def paginate_statistics(statistics, uri, limit):
break
else:
limit -= len(statistic['statistics'])
if 'end_time' in statistic:
del statistic['end_time']
statistic_elements.append(statistic)
resource[u'elements'] = statistic_elements

View File

@ -1,4 +1,5 @@
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
# (C) Copyright SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -133,36 +134,46 @@ def get_expected_elements_inner_offset_limit(all_elements, offset, limit, inner_
total_statistics = 0
if offset is None:
offset_id = 0
offset_id = None
offset_time = ""
passed_offset = True
else:
offset_tuple = offset.split('_')
offset_id = int(offset_tuple[0]) if len(offset_tuple) > 1 else 0
offset_id = offset_tuple[0] if len(offset_tuple) > 1 else u'0'
offset_time = offset_tuple[1] if len(offset_tuple) > 1 else offset_tuple[0]
passed_offset = False
for element in all_elements:
element_id = int(element['id'])
if offset_id is not None and element_id < offset_id:
element_id = element['id']
if (not passed_offset) and element_id != offset_id:
continue
next_element = None
for value in element[inner_key]:
if (element_id == offset_id and value[0] > offset_time) or \
element_id > offset_id:
for value in element[inner_key]:
if passed_offset or (element_id == offset_id and value[0] > offset_time):
if not passed_offset:
passed_offset = True
if not next_element:
next_element = element.copy()
next_element[inner_key] = [value]
else:
next_element[inner_key].append(value)
total_statistics += 1
if total_statistics >= limit:
break
if total_statistics >= limit:
break
if next_element:
expected_elements.append(next_element)
if total_statistics >= limit:
break
for i in range(len(expected_elements)):
expected_elements[i]['id'] = str(i)
if element_id == offset_id:
passed_offset = True
# if index is used in the element id, reset to start at zero
if expected_elements and expected_elements[0]['id'].isdigit():
for i in range(len(expected_elements)):
expected_elements[i]['id'] = str(i)
return expected_elements

View File

@ -1,4 +1,5 @@
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# (C) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -75,9 +76,14 @@ class TestDimensions(base.BaseMonascaTest):
resp, response_body = cls.monasca_client.list_metrics(
param)
elements = response_body['elements']
metric_name1_count = 0
for element in elements:
returned_name_set.add(str(element['name']))
if cls._test_metric_names.issubset(returned_name_set):
if (str(element['name']) == metric_name1):
metric_name1_count += 1
# Java version of influxdb never returns both metric1 in the list but Python does.
if cls._test_metric_names.issubset(returned_name_set) \
and (metric_name1_count == 2 or i == constants.MAX_RETRIES - 1):
return
time.sleep(constants.RETRY_WAIT_SECS)

View File

@ -1,4 +1,5 @@
# (C) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# (C) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -238,7 +239,7 @@ class TestStatistics(base.BaseMonascaTest):
resp, response_body = self.monasca_client.list_metrics(query_parms)
self.assertEqual(200, resp.status)
elements = response_body['elements']
if elements:
if elements and len(elements) == num_metrics:
break
else:
time.sleep(constants.RETRY_WAIT_SECS)