Merge "Add support for InfluxDB 0.11.0 to 1.1.0 in the Python API"

This commit is contained in:
Jenkins 2016-12-14 08:17:08 +00:00 committed by Gerrit Code Review
commit dfd1b2f201
7 changed files with 735 additions and 56 deletions

16
AUTHORS
View File

@ -1,33 +1,45 @@
Andreas Jaeger <aj@suse.com>
Angelo Mendonca <angelomendonca@gmail.com>
Artur Basiak <artur.basiak@ts.fujitsu.com>
Ben Motz <bmotz@cray.com>
Bertrand Lallau <bertrand.lallau@thalesgroup.com>
Brad Klein <bradley.klein@twcable.com>
Cao Xuan Hoang <hoangcx@vn.fujitsu.com>
Clenimar Filemon <clenimar.filemon@gmail.com>
Craig Bryant <craig.bryant@hp.com>
David C Kennedy <david.c.kennedy@hp.com>
David C Kennedy <david.c.kennedy@hpe.com>
Deklan Dieterly <deklan.dieterly@hp.com>
Deklan Dieterly <deklan.dieterly@hpe.com>
Deklan Dieterly <dieterly@gmail.com>
Derrick Johnson <derrick.johnson@hp.com>
Derrick Johnson <johnson.derrick@gmail.com>
Dexter Fryar <dexter.fryar@outlook.com>
Dobroslaw Zybort <dobroslaw.zybort@ts.fujitsu.com>
Erickson Santos <erickson@lsd.ufcg.edu.br>
Ghanshyam <ghanshyam.mann@nectechnologies.in>
Haiwei Xu <xu-haiwei@mxw.nes.nec.co.jp>
Hironori Shiina <shiina.hironori@jp.fujitsu.com>
Igor Natanael <igornsa@lsd.ufcg.edu.br>
Jakub Wachowski <jakub.wachowski@ts.fujitsu.com>
Janonymous <janonymous.codevulture@gmail.com>
Jeremy Stanley <fungi@yuggoth.org>
Joachim Barheine <joachim.barheine@sap.com>
Joe Keen <joe.keen@hp.com>
Jonathan Halterman <jhalterman@gmail.com>
Jonathan Halterman <jhalterman@hp.com>
Kaiyan Sheng <kaiyan.sheng@hp.com>
Koji Nakazono <nakazono_0507@jp.fujitsu.com>
Laszlo Hegedus <laszlo.hegedus@ericsson.com>
LiuNanke <nanke.liu@easystack.cn>
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
Luong Anh Tuan <tuanla@vn.fujitsu.com>
Matteus Silva <silvamatteus@lsd.ufcg.edu.br>
Michael Bielinski <michael.bielinski@hp.com>
Michael James Hoppal <michael.jam.hoppal@hp.com>
Michal Zielonka <michal.zielonka@ts.fujitsu.com>
Monty Taylor <mordred@inaugust.com>
Nam Nguyen Hoai <namnh@vn.fujitsu.com>
Rob Raymond <rob.raymond@hp.com>
Rodolfo Alonso Hernandez <rodolfo.alonso.hernandez@intel.com>
Roland Hochmuth <roland.hochmuth@hp.com>
@ -39,20 +51,24 @@ Srinivas Sakhamuri <srini.openstack@gmail.com>
Swapnil Kulkarni (coolsvap) <me@coolsvap.net>
Thomas Graichen <thomas.graichen@sap.com>
Tim Kuhlman <tim.kuhlman@hp.com>
Tomasz Trębski <kornicameister@gmail.com>
Tomasz Trębski <tomasz.trebski@ts.fujitsu.com>
Tong Li <litong01@us.ibm.com>
Victor Ion Munteanu <victor.munteanu@equillon.ro>
Witold Bedyk <witold.bedyk@est.fujitsu.com>
Yushiro FURUKAWA <y.furukawa_2@jp.fujitsu.com>
ZhiQiang Fan <aji.zqfan@gmail.com>
alpineriveredge <alpineriveredge@gmail.com>
bklei <bradley.klein@twcable.com>
cindy oneill <cindy.o-neill@hp.com>
dieterly <deklan.dieterly@hpe.com>
gary-hessler <gary.hessler@hp.com>
haali1 <haneef.ali@hp.com>
henriquetruta <henrique@lsd.ufcg.edu.br>
hochmuth <roland.hochmuth@hp.com>
kaiyan-sheng <kaiyan.sheng@hp.com>
liu-sheng <liusheng@huawei.com>
melissaml <ma.lei@99cloud.net>
oiskam1 <oiskam1@yandex.ru>
raymondr <raymondr@users.noreply.github.com>
satsuki_fukazu <fukazu.satsuki@po.ntts.co.jp>

View File

@ -0,0 +1,325 @@
### Welcome to the InfluxDB configuration file.
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
# We don't track ip addresses of servers reporting. This is only used
# to track the number of instances running and the versions, which
# is very helpful for us.
# Change this option to true to disable reporting.
reporting-disabled = false
###
### Enterprise registration control
###
[registration]
# enabled = true
# url = "https://enterprise.influxdata.com" # The Enterprise server URL
# token = "" # Registration token for Enterprise server
###
### [meta]
###
### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster.
###
[meta]
dir = "/var/lib/influxdb/meta"
hostname = "localhost"
bind-address = ":8088"
retention-autocreate = true
election-timeout = "1s"
heartbeat-timeout = "1s"
leader-lease-timeout = "500ms"
commit-timeout = "50ms"
cluster-tracing = false
# If enabled, when a Raft cluster loses a peer due to a `DROP SERVER` command,
# the leader will automatically ask a non-raft peer node to promote to a raft
# peer. This only happens if there is a non-raft peer node available to promote.
# This setting only affects the local node, so to ensure if operates correctly, be sure to set
# it in the config of every node.
raft-promotion-enabled = true
###
### [data]
###
### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems.
###
[data]
dir = "/var/lib/influxdb/data"
# Controls the engine type for new shards. Options are b1, bz1, or tsm1.
# b1 is the 0.9.2 storage engine, bz1 is the 0.9.3 and 0.9.4 engine.
# tsm1 is the 0.9.5 engine and is currently EXPERIMENTAL. Until 0.9.5 is
# actually released data written into a tsm1 engine may be need to be wiped
# between upgrades.
# engine ="bz1"
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
# apply to any new shards created after upgrading to a version > 0.9.3.
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB.
wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush.
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed.
# These are the WAL settings for the storage engine >= 0.9.3
wal-dir = "/var/lib/influxdb/wal"
wal-enable-logging = true
# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to
# flush to the index
# wal-ready-series-size = 25600
# Flush and compact a partition once this ratio of series are over the ready size
# wal-compaction-threshold = 0.6
# Force a flush and compaction if any series in a partition gets above this size in bytes
# wal-max-series-size = 2097152
# Force a flush of all series and full compaction if there have been no writes in this
# amount of time. This is useful for ensuring that shards that are cold for writes don't
# keep a bunch of data cached in memory and in the WAL.
# wal-flush-cold-interval = "10m"
# Force a partition to flush its largest series if it reaches this approximate size in
# bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory.
# The more memory you have, the bigger this can be.
# wal-partition-size-threshold = 20971520
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
# log any sensitive data contained within a query.
# query-log-enabled = true
###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###
[hinted-handoff]
enabled = true
dir = "/var/lib/influxdb/hh"
max-size = 1073741824
max-age = "168h"
retry-rate-limit = 0
# Hinted handoff will start retrying writes to down nodes at a rate of once per second.
# If any error occurs, it will backoff in an exponential manner, until the interval
# reaches retry-max-interval. Once writes to all nodes are successfully completed the
# interval will reset to retry-interval.
retry-interval = "1s"
retry-max-interval = "1m"
# Interval between running checks for data that should be purged. Data is purged from
# hinted-handoff queues for two reasons. 1) The data is older than the max age, or
# 2) the target node has been dropped from the cluster. Data is never dropped until
# it has reached max-age however, for a dropped node or not.
purge-interval = "1h"
###
### In v1.0.0 [cluster] was replaced with [coordinator]
### [coordinator]
###
### Controls non-Raft cluster behavior, which generally includes how data is
### shared across shards.
###
[coordinator]
shard-writer-timeout = "10s" # The time within which a shard must respond to write.
write-timeout = "5s" # The time within which a write operation must complete on the cluster.
###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###
[retention]
enabled = true
check-interval = "30m"
###
### [shard-precreation]
###
### Controls the precreation of shards, so they are created before data arrives.
### Only shards that will exist in the future, at time of creation, are precreated.
[shard-precreation]
enabled = true
check-interval = "10m"
advance-period = "30m"
###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.
[monitor]
store-enabled = true # Whether to record statistics internally.
store-database = "_internal" # The destination database for recorded statistics
store-interval = "10s" # The interval at which to record statistics
###
### [admin]
###
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
###
[admin]
enabled = true
bind-address = ":8083"
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###
[http]
enabled = true
bind-address = ":8086"
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = false
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
###
### [[graphite]]
###
### Controls one or many listeners for Graphite data.
###
[[graphite]]
enabled = false
# database = "graphite"
# bind-address = ":2003"
# protocol = "tcp"
# consistency-level = "one"
# name-separator = "."
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
## "name-schema" configures tag names for parsing the metric name from graphite protocol;
## separated by `name-separator`.
## The "measurement" tag is special and the corresponding field will become
## the name of the metric.
## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as
## {
## measurement: "cpu",
## tags: {
## "type": "server",
## "host": "localhost,
## "device": "cpu0"
## }
## }
# name-schema = "type.host.measurement.device"
## If set to true, when the input metric name has more fields than `name-schema` specified,
## the extra fields will be ignored.
## Otherwise an error will be logged and the metric rejected.
# ignore-unnamed = true
###
### In v1.0.0 support for config option [collectd] was replaced with [[collectd]]
### [[collectd]]
###
### Controls the listener for collectd data.
###
[[collectd]]
enabled = false
# bind-address = ""
# database = ""
# typesdb = ""
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
###
### In v1.0.0 support for config option [opentsdb] was replaced with [[opentsdb]]
### [[opentsdb]]
###
### Controls the listener for OpenTSDB data.
###
[[opentsdb]]
enabled = false
# bind-address = ":4242"
# database = "opentsdb"
# retention-policy = ""
# consistency-level = "one"
# tls-enabled = false
# certificate= ""
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Only points
# metrics received over the telnet protocol undergo batching.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
###
### [[udp]]
###
### Controls the listeners for InfluxDB line protocol data via UDP.
###
[[udp]]
enabled = false
# bind-address = ""
# database = "udp"
# retention-policy = ""
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
###
### [continuous_queries]
###
### Controls how continuous queries are run within InfluxDB.
###
[continuous_queries]
log-enabled = true
enabled = true
recompute-previous-n = 2
recompute-no-older-than = "10m"
compute-runs-per-interval = 10
compute-no-more-than = "2m"

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
# (C) Copyright 2015,2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -42,6 +42,18 @@ import six.moves.urllib.parse as urlparse
import urllib2
def format_response(req):
try:
json_value = json.loads(req.read())
if (len(json_value['results'][0]) > 0 and
'values' in json_value['results'][0]['series'][0]):
return json_value['results'][0]['series'][0]['values']
else:
return []
except KeyError:
print "Query returned a non-successful result: {0}".format(json_value['results'])
raise
def influxdb_get(uri, query, db=None):
"""Runs a query via HTTP GET and returns the response as a Python list."""
@ -53,32 +65,49 @@ def influxdb_get(uri, query, db=None):
params = urlparse.urlencode(getparams)
uri = "{}&{}".format(uri,params)
req = urllib2.urlopen(uri)
json_value = json.loads(req.read())
if (len(json_value['results'][0]) > 0 and
'values' in json_value['results'][0]['series'][0]):
return json_value['results'][0]['series'][0]['values']
else:
return []
return format_response(req)
except KeyError:
print "Query returned a non-successful result: {0}".format(json_value['results'])
sys.exit(1)
def influxdb_get_post(uri, query, db=None):
"""Runs a query using HTTP GET or POST and returns the response as a Python list.
At some InfluxDB release several ops changed from using GET to POST. For example,
CREATE DATABASE. To maintain backward compatibility, this function first trys the
query using POST and if that fails it retries again using GET."""
query_params = {"q": query}
if db:
query_params['db'] = db
try:
encoded_params = urlparse.urlencode(query_params)
try:
req = urllib2.urlopen(uri, encoded_params)
return format_response(req)
except urllib2.HTTPError:
uri = "{}&{}".format(uri, encoded_params)
req = urllib2.urlopen(uri)
return format_response(req)
except KeyError:
sys.exit(1)
def main(argv=None):
"""If necessary, create the database, retention policy, and users"""
auth_str = '?u=%s&p=%s' % (ADMIN, ADMIN_PASS)
api_uri = "{0}/query{1}".format(URL, auth_str)
# List current databases
# List current databases
dbs = influxdb_get(uri=api_uri, query="SHOW DATABASES")
if [DBNAME] not in dbs:
print "Creating database '{}'".format(DBNAME)
influxdb_get(uri=api_uri, query="CREATE DATABASE {0}".format(DBNAME))
influxdb_get_post(uri=api_uri, query="CREATE DATABASE {0}".format(DBNAME))
print "...created!"
# Check retention policy
# Check retention policy
policies = influxdb_get(uri=api_uri,
query="SHOW RETENTION POLICIES ON {0}".format(DBNAME))
if not any(pol[0] == SHARDSPACE_NAME for pol in policies):
@ -87,15 +116,15 @@ def main(argv=None):
DBNAME,
RETENTION,
REPLICATION)
influxdb_get(uri=api_uri, db=DBNAME, query=policy)
influxdb_get_post(uri=api_uri, db=DBNAME, query=policy)
# Create the users
# Create the users
users = influxdb_get(uri=api_uri, query="SHOW USERS", db=DBNAME)
for name, password in USERS.iteritems():
if not any(user[0] == name for user in users):
influxdb_get(uri=api_uri,
query=unicode("CREATE USER {0} WITH PASSWORD '{1}'".format(name, password)),
db=DBNAME)
influxdb_get_post(uri=api_uri,
query=unicode("CREATE USER {0} WITH PASSWORD '{1}'".format(name, password)),
db=DBNAME)
if __name__ == "__main__":
sys.exit(main())

View File

@ -51,6 +51,23 @@ export MONASCA_PERSISTER_IMPLEMENTATION_LANG=${MONASCA_PERSISTER_IMPLEMENTATION_
# Set default metrics DB to InfluxDB
export MONASCA_METRICS_DB=${MONASCA_METRICS_DB:-influxdb}
# Set INFLUXDB_VERSION
if [[ "${MONASCA_API_IMPLEMENTATION_LANG,,}" == 'java' ]]; then
INFLUXDB_VERSION=${INFLUXDB_VERSION:-${INFLUXDB_JAVA_VERSION}}
elif [[ "${MONASCA_API_IMPLEMENTATION_LANG,,}" == 'python' ]]; then
INFLUXDB_VERSION=${INFLUXDB_VERSION:-${INFLUXDB_PYTHON_VERSION}}
else
echo "Found invalid value for variable MONASCA_API_IMPLEMENTATION_LANG: $MONASCA_API_IMPLEMENTATION_LANG"
echo "Valid values for MONASCA_API_IMPLEMENTATION_LANG are \"java\" and \"python\""
die "Please set MONASCA_API_IMPLEMENTATION_LANG to either \"java'' or \"python\""
fi
# Determine password for database (copied from devstack/lib/database)
if [ -n "$MYSQL_PASSWORD" ]; then
DATABASE_PASSWORD=$MYSQL_PASSWORD
@ -480,14 +497,31 @@ function install_monasca_influxdb {
echo_summary "Install Monasca Influxdb"
local influxdb_deb=influxdb_${INFLUXDB_VERSION}_amd64.deb
local influxdb_deb_url=http://s3.amazonaws.com/influxdb/${influxdb_deb}
local influxdb_deb_url=https://dl.influxdata.com/influxdb/releases/${influxdb_deb}
local influxdb_deb_dest=${FILES}/${influxdb_deb}
download_file ${influxdb_deb_url} ${influxdb_deb_dest}
sudo dpkg --skip-same-version -i ${influxdb_deb_dest}
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/influxdb/influxdb.conf /etc/influxdb/influxdb.conf
# Validate INFLUXDB_VERSION
validate_version ${INFLUXDB_VERSION}
if [[ $? -ne 0 ]]; then
echo "Found invalid value for variable INFLUXDB_VERSION: $INFLUXDB_VERSION"
echo "Valid values for INFLUXDB_VERSION must be in the form of 1.0.0"
die "Please set INFLUXDB_VERSION to a correct value"
fi
# In InfluxDB v1.0.0 the config options cluster, collectd and opentsdb changed. As a result
# a different config file is deployed. See,
# https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v100-2016-09-08, for more details.
retval=$(compare_versions ${INFLUXDB_VERSION} "1.0.0")
if [[ "$retval" == "lt" ]]; then
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/influxdb/influxdb.conf /etc/influxdb/influxdb.conf
else
sudo cp -f "${MONASCA_API_DIR}"/devstack/files/influxdb/influxdb-1.0.0.conf /etc/influxdb/influxdb.conf
fi
if [[ ${SERVICE_HOST} ]]; then
@ -1878,6 +1912,56 @@ function recreate_users_mysql {
done
}
# Validate a program version string is of the form 1.0.0.
# Return 0 if a valid program version string, otherwise 1.
function validate_version {
version_regex="^([0-9]+\.)?([0-9]+\.)?([0-9]+)$"
if [[ $1 =~ $version_regex ]];
then
return 0
else
return 1
fi
}
# Compares two program version strings of the form 1.0.0.
# Returns "lt" if $1 is less than $2, "eq" if equal, and "gt" if greater than.
function compare_versions {
if [[ $1 == $2 ]]
then
echo eq
return
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++))
do
if [[ -z ${ver2[i]} ]]
then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]}))
then
echo gt
return
fi
if ((10#${ver1[i]} < 10#${ver2[i]}))
then
echo lt
return
fi
done
echo eq
return
}
# Allows this script to be called directly outside of
# the devstack infrastructure code. Uncomment to use.
#if [[ $(type -t is_service_enabled) != 'function' ]]; then

View File

@ -1,5 +1,5 @@
#
# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
# (C) Copyright 2015,2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -81,7 +81,20 @@ enable_service monasca-cli
# Dependent Software Versions
#
INFLUXDB_VERSION=${INFLUXDB_VERSION:-0.9.5}
# Set the InfluxDB version to use for the Java and Python API
# InfluxDB has modified the result sets for SHOW SERIES. The
# Python API has been modified to support those changes, but the
# Java API hasn't yet. These two environment variables allow you
# to deploy either the Java or Python API without having to
# also set the INFLUXDB_VERSION when switching between the two.
INFLUXDB_JAVA_VERSION=${INFLUXDB_JAVA_VERSION:-0.9.5}
INFLUXDB_PYTHON_VERSION=${INFLUXDB_PYTHON_VERSION:-1.1.0}
# To set the same version of InfluxDB for both languages use the
# following variable. This will override both the Java and Python
# specific variables above.
# INFLUXDB_VERSION=${INFLUXDB_VERSION:-0.9.5}
VERTICA_VERSION=${VERTICA_VERSION:-8.0.0-0}
CASSANDRA_VERSION=${CASSANDRA_VERSION:-37x}
# Kafka deb consists of the version of scala plus the version of kafka

View File

@ -15,6 +15,7 @@
# under the License.
from datetime import datetime
from datetime import timedelta
from distutils import version
import json
from influxdb import client
@ -35,17 +36,81 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
def __init__(self):
try:
self.conf = cfg.CONF
self.influxdb_client = client.InfluxDBClient(
self.conf.influxdb.ip_address, self.conf.influxdb.port,
self.conf.influxdb.user, self.conf.influxdb.password,
self.conf.influxdb.database_name)
self._init_serie_builders()
except Exception as ex:
LOG.exception(ex)
raise exceptions.RepositoryException(ex)
def _init_serie_builders(self):
'''Initializes functions for serie builders that are specific to different versions
of InfluxDB.
'''
try:
influxdb_version = self._get_influxdb_version()
if influxdb_version < version.StrictVersion('0.11.0'):
self._init_serie_builders_to_v0_11_0()
else:
self._init_serie_builders_from_v0_11_0()
except Exception as ex:
LOG.exception(ex)
# Initialize the serie builders to v0_11_0. Not sure when SHOW DIAGNOSTICS added
# support for a version string so to address backward compatibility initialize
# InfluxDB serie builders < v0.11.0
self._init_serie_builders_to_v0_11_0()
def _init_serie_builders_to_v0_11_0(self):
'''Initialize function for InfluxDB serie builders < v0.11.0
'''
LOG.info('Initialize InfluxDB serie builders < v0.11.0')
self._build_serie_dimension_names = self._build_serie_dimension_names_to_v0_11_0
self._build_serie_dimension_values = self._build_serie_dimension_values_to_v0_11_0
self._build_serie_metric_list = self._build_serie_metric_list_to_v0_11_0
def _init_serie_builders_from_v0_11_0(self):
'''Initialize function for InfluxDB serie builders >= v0.11.0.
In InfluxDB v0.11.0 the SHOW SERIES output changed. See,
https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v0110-2016-03-22
'''
LOG.info('Initialize InfluxDB serie builders >= v0.11.0')
self._build_serie_dimension_names = self._build_serie_dimension_names_from_v0_11_0
self._build_serie_dimension_values = self._build_serie_dimension_values_from_v0_11_0
self._build_serie_metric_list = self._build_serie_metric_list_from_v0_11_0
def _get_influxdb_version(self):
'''If Version found in the result set, return the InfluxDB Version,
otherwise raise an exception. InfluxDB has changed the format of their
result set and SHOW DIAGNOSTICS was introduced at some point so earlier releases
of InfluxDB might not return a Version.
'''
try:
result = self.influxdb_client.query('SHOW DIAGNOSTICS')
except InfluxDBClientError as ex:
LOG.exception(ex)
raise
if 'series' not in result.raw:
LOG.exception('series not in result.raw')
raise Exception('Series not in SHOW DIAGNOSTICS result set')
for series in result.raw['series']:
if 'columns' not in series:
continue
columns = series['columns']
if u'Version' not in series['columns']:
continue
if u'values' not in series:
continue
for value in series[u'values']:
version_index = columns.index(u'Version')
version_str = value[version_index]
return version.StrictVersion(version_str)
raise Exception('Version not found in SHOW DIAGNOSTICS result set')
def _build_show_series_query(self, dimensions, name, tenant_id, region,
start_timestamp=None, end_timestamp=None):
@ -150,7 +215,7 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
# replace ' with \' to make query parsable
clean_dimension_name = dimension_name.replace("\'", "\\'")
if dimension_value == "":
where_clause += " and \"{}\" =~ /.*/ ".format(
where_clause += " and \"{}\" =~ /.+/ ".format(
clean_dimension_name)
elif '|' in dimension_value:
# replace ' with \' to make query parsable
@ -219,11 +284,16 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
LOG.exception(ex)
raise exceptions.RepositoryException(ex)
def _build_serie_dimension_values(self, series_names, dimension_name):
def _build_serie_dimension_values_to_v0_11_0(self, series_names, dimension_name):
dim_values = []
json_dim_value_list = []
if not series_names:
return json_dim_value_list
if 'series' not in series_names.raw:
return json_dim_value_list
if not dimension_name:
return json_dim_value_list
if 'series' in series_names.raw:
for series in series_names.raw['series']:
@ -244,58 +314,200 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
json_dim_value_list = sorted(json_dim_value_list)
return json_dim_value_list
def _build_serie_dimension_names(self, series_names):
def _build_serie_dimension_values_from_v0_11_0(self, series_names, dimension_name):
'''In InfluxDB v0.11.0 the SHOW SERIES output changed.
See, https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v0110-2016-03-22
'''
dim_value_set = set()
json_dim_value_list = []
if not series_names:
return json_dim_value_list
if 'series' not in series_names.raw:
return json_dim_value_list
if not dimension_name:
return json_dim_value_list
for series in series_names.raw['series']:
if 'columns' not in series:
continue
columns = series['columns']
if 'key' not in columns:
continue
key_index = columns.index('key')
if u'values' not in series:
continue
for value in series[u'values']:
split_value = value[key_index].split(',')
if len(split_value) < 2:
continue
for tag in split_value[1:]:
tag_key_value = tag.split('=')
if len(tag_key_value) != 2:
continue
tag_key = tag_key_value[0]
tag_value = tag_key_value[1]
if tag_key.startswith(u'_'):
continue
if tag_key == dimension_name:
dim_value_set.add(tag_value)
for value in dim_value_set:
json_dim_value_list.append({u'dimension_value': value})
json_dim_value_list = sorted(json_dim_value_list)
return json_dim_value_list
def _build_serie_dimension_names_to_v0_11_0(self, series_names):
dim_names = []
json_dim_name_list = []
if not series_names:
return json_dim_name_list
if 'series' not in series_names.raw:
return json_dim_name_list
for series in series_names.raw['series']:
for name in series[u'columns']:
if name not in dim_names and not name.startswith(u'_'):
dim_names.append(name)
json_dim_name_list.append({u'dimension_name': name})
if 'series' in series_names.raw:
for series in series_names.raw['series']:
for name in series[u'columns']:
if name not in dim_names and not name.startswith(u'_'):
dim_names.append(name)
json_dim_name_list.append({u'dimension_name': name})
json_dim_name_list = sorted(json_dim_name_list)
return json_dim_name_list
def _build_serie_metric_list(self, series_names, tenant_id, region,
start_timestamp, end_timestamp,
offset):
def _build_serie_dimension_names_from_v0_11_0(self, series_names):
'''In InfluxDB v0.11.0 the SHOW SERIES output changed.
See, https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v0110-2016-03-22
'''
dim_name_set = set()
json_dim_name_list = []
if not series_names:
return json_dim_name_list
if 'series' not in series_names.raw:
return json_dim_name_list
for series in series_names.raw['series']:
if 'columns' not in series:
continue
columns = series['columns']
if 'key' not in columns:
continue
key_index = columns.index('key')
if u'values' not in series:
continue
for value in series[u'values']:
split_value = value[key_index].split(',')
if len(split_value) < 2:
continue
for tag in split_value[1:]:
tag_key_value = tag.split('=')
if len(tag_key_value) < 2:
continue
tag_key = tag_key_value[0]
if tag_key.startswith(u'_'):
continue
dim_name_set.add(tag_key)
for name in dim_name_set:
json_dim_name_list.append({u'dimension_name': name})
json_dim_name_list = sorted(json_dim_name_list)
return json_dim_name_list
def _build_serie_metric_list_to_v0_11_0(self, series_names, tenant_id, region,
start_timestamp, end_timestamp,
offset):
json_metric_list = []
if not series_names:
return json_metric_list
if 'series' in series_names.raw:
if 'series' not in series_names.raw:
return json_metric_list
metric_id = 0
if offset:
metric_id = int(offset) + 1
metric_id = 0
if offset:
metric_id = int(offset) + 1
for series in series_names.raw['series']:
for series in series_names.raw['series']:
for tag_values in series[u'values']:
for tag_values in series[u'values']:
dimensions = {
name: value
for name, value in zip(series[u'columns'], tag_values)
if value and not name.startswith(u'_')
}
dimensions = {
name: value
for name, value in zip(series[u'columns'], tag_values)
if value and not name.startswith(u'_')
}
if self._has_measurements(tenant_id,
if self._has_measurements(tenant_id,
region,
series[u'name'],
dimensions,
start_timestamp,
end_timestamp):
metric = {u'id': str(metric_id),
u'name': series[u'name'],
u'dimensions': dimensions}
metric_id += 1
json_metric_list.append(metric)
return json_metric_list
def _build_serie_metric_list_from_v0_11_0(self, series_names, tenant_id, region,
start_timestamp, end_timestamp, offset):
'''In InfluxDB v0.11.0 the SHOW SERIES output changed.
See, https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v0110-2016-03-22
'''
json_metric_list = []
if not series_names:
return json_metric_list
if 'series' not in series_names.raw:
return json_metric_list
metric_id = 0
if offset:
metric_id = int(offset) + 1
for series in series_names.raw['series']:
if 'columns' not in series:
continue
columns = series['columns']
if 'key' not in columns:
continue
key_index = columns.index('key')
if u'values' not in series:
continue
for value in series[u'values']:
split_value = value[key_index].split(',')
if len(split_value) < 2:
continue
serie_name = split_value[0]
dimensions = {}
for tag in split_value[1:]:
tag_key_value = tag.split('=')
if len(tag_key_value) < 2:
continue
tag_key = tag_key_value[0]
tag_value = tag_key_value[1]
if tag_key.startswith(u'_'):
continue
dimensions[tag_key] = tag_value
if not self._has_measurements(tenant_id,
region,
series[u'name'],
serie_name,
dimensions,
start_timestamp,
end_timestamp):
metric = {u'id': str(metric_id),
u'name': series[u'name'],
u'dimensions': dimensions}
metric_id += 1
json_metric_list.append(metric)
continue
metric = {u'id': str(metric_id),
u'name': serie_name,
u'dimensions': dimensions}
metric_id += 1
json_metric_list.append(metric)
return json_metric_list

View File

@ -1,4 +1,4 @@
# (C) Copyright 2015-2016 Hewlett Packard Enterprise Development Company LP
# (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -586,7 +586,7 @@ class TestMetrics(base.BaseMonascaTest):
if i == constants.MAX_RETRIES - 1:
error_msg = "Timeout on waiting for metrics: at least " \
"2 metrics are needed. Current number of " \
"metrics = 0"
"metrics = {}".format(len(elements))
self.fail(error_msg)
def _create_metrics_with_different_dimensions(self, same_name=True):