gnocchi 3.0.0 release
meta:version: 3.0.0 meta:diff-start: - meta:series: independent meta:release-type: release meta:announce: openstack-announce@lists.openstack.org meta:pypi: no meta:first: no meta:release:Author: Julien Danjou <julien@danjou.info> meta:release:Commit: Julien Danjou <julien@danjou.info> meta:release:Change-Id: I5984d3fc68a114b913f29c90496b4a73ec83fa32 meta:release:Code-Review+1: Mehdi Abaakouk (sileht) <sileht@redhat.com> meta:release:Code-Review+2: Davanum Srinivas (dims) <davanum@gmail.com> meta:release:Workflow+1: Davanum Srinivas (dims) <davanum@gmail.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJX4qGQAAoJENljH+rwzGInoHMH/j9+gfevtR9ecspbYvAnj2Up yOgVrej7HuWz0QlwxC2uc/1XwzBQMmx45bCWPZlDeDTiDpX3kk5mkiVEP7JyAPbx KfX6IZBREZlvBDkfCn/JtZTZ03HM+jToDbUuX/qlz6Xi/CGt9NuqJfus/nl8cFO5 KvQQ+Z/IEBVGhUH5EnatboXMw4xP1rE3Ak/SOw3p0w8v+JpqW1BgNWkWuPXSTNJq 0qLTKLZNizkNQOr/KhEpE1CEdrIls91GnBybtbhOLeP37ye4WS3LJfhL8l41FV/4 OuRJ9BMUBI93m+hHf4JLZGVZ9wtbIT3X81emFZzHqzsjNbpF36m9QXW80FUPJsI= =jMMh -----END PGP SIGNATURE----- Merge tag '3.0.0' into debian/newton gnocchi 3.0.0 release * New upstream release. * Fixed (build-)depends for this release. * Using OpenStack's Gerrit as VCS URLs. Change-Id: Ic813bac08ddb9635e0075e6541315977ce98de4a
This commit is contained in:
commit
a8b5a2f228
|
@ -7,6 +7,7 @@ ChangeLog
|
|||
etc/gnocchi/gnocchi.conf
|
||||
doc/build
|
||||
doc/source/rest.rst
|
||||
releasenotes/build
|
||||
cover
|
||||
.coverage
|
||||
dist
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t . ${OS_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
||||
group_regex=(gabbi\.driver.test_gabbi_[^_]+)_
|
||||
group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_([^_]+))_
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
libpq-dev [platform:dpkg]
|
||||
postgresql [platform:dpkg]
|
||||
mysql-client [platform:dpkg]
|
||||
mysql-server [platform:dpkg]
|
||||
build-essential [platform:dpkg]
|
||||
libffi-dev [platform:dpkg]
|
|
@ -1,3 +1,11 @@
|
|||
gnocchi (3.0.0-1) experimental; urgency=medium
|
||||
|
||||
* New upstream release.
|
||||
* Fixed (build-)depends for this release.
|
||||
* Using OpenStack's Gerrit as VCS URLs.
|
||||
|
||||
-- Thomas Goirand <zigo@debian.org> Fri, 23 Sep 2016 16:38:32 +0200
|
||||
|
||||
gnocchi (2.0.2-7) unstable; urgency=medium
|
||||
|
||||
[ Ondřej Nový ]
|
||||
|
|
|
@ -5,7 +5,7 @@ Maintainer: PKG OpenStack <openstack-devel@lists.alioth.debian.org>
|
|||
Uploaders: Thomas Goirand <zigo@debian.org>,
|
||||
Build-Depends: debhelper (>= 9),
|
||||
dh-python,
|
||||
openstack-pkg-tools (>= 40~),
|
||||
openstack-pkg-tools (>= 52~),
|
||||
python-all,
|
||||
python-pbr,
|
||||
python-setuptools,
|
||||
|
@ -15,11 +15,13 @@ Build-Depends-Indep: alembic (>= 0.7.6),
|
|||
postgresql,
|
||||
postgresql-server-dev-all,
|
||||
python-concurrent.futures (>= 2.1.6),
|
||||
python-cotyledon (>= 1.2.2),
|
||||
python-coverage (>= 3.6),
|
||||
python-doc8,
|
||||
python-fixtures,
|
||||
python-future (>= 0.15),
|
||||
python-gabbi (>= 1),
|
||||
python-gabbi (>= 1.21),
|
||||
python-iso8601,
|
||||
python-jsonpatch (>= 1.9),
|
||||
python-keystoneclient (>= 1:1.6.0),
|
||||
python-keystonemiddleware (>= 4.0.0),
|
||||
|
@ -28,13 +30,14 @@ Build-Depends-Indep: alembic (>= 0.7.6),
|
|||
python-msgpack,
|
||||
python-mysqldb,
|
||||
python-numpy,
|
||||
python-os-testr,
|
||||
python-oslo.config (>= 1:2.6.0),
|
||||
python-oslo.db (>= 1.8.0),
|
||||
python-oslo.db (>= 4.8.0),
|
||||
python-oslo.log (>= 1.0.0),
|
||||
python-oslo.middleware,
|
||||
python-oslo.middleware (>= 3.11.0),
|
||||
python-oslo.policy (>= 0.3.0),
|
||||
python-oslo.serialization (>= 1.4.0),
|
||||
python-oslo.utils (>= 1.6.0),
|
||||
python-oslo.utils (>= 3.3.0),
|
||||
python-oslosphinx (>= 2.2.0.0),
|
||||
python-oslotest,
|
||||
python-pandas (>= 0.17),
|
||||
|
@ -53,12 +56,11 @@ Build-Depends-Indep: alembic (>= 0.7.6),
|
|||
python-sqlalchemy,
|
||||
python-sqlalchemy-utils,
|
||||
python-stevedore,
|
||||
python-swiftclient (>= 2.5.0),
|
||||
python-swiftclient (>= 3.1.0),
|
||||
python-sysv-ipc,
|
||||
python-tempest-lib (>= 0.2.0),
|
||||
python-testscenarios,
|
||||
python-testtools (>= 0.9.38),
|
||||
python-tooz (>= 1.34),
|
||||
python-tooz (>= 1.38),
|
||||
python-trollius,
|
||||
python-voluptuous,
|
||||
python-webob (>= 1.4.1),
|
||||
|
@ -68,8 +70,8 @@ Build-Depends-Indep: alembic (>= 0.7.6),
|
|||
subunit (>= 0.0.18),
|
||||
testrepository,
|
||||
Standards-Version: 3.9.8
|
||||
Vcs-Browser: https://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git
|
||||
Vcs-Git: https://anonscm.debian.org/git/openstack/python-gnocchi.git
|
||||
Vcs-Browser: https://git.openstack.org/cgit/openstack/deb-gnocchi
|
||||
Vcs-Git: https://git.openstack.org/openstack/deb-gnocchi -b debian/newton
|
||||
Homepage: https://github.com/openstack/gnocchi
|
||||
|
||||
Package: python-gnocchi
|
||||
|
@ -77,7 +79,9 @@ Section: python
|
|||
Architecture: all
|
||||
Depends: alembic (>= 0.7.6),
|
||||
python-concurrent.futures (>= 2.1.6),
|
||||
python-cotyledon (>= 1.2.2),
|
||||
python-future (>= 0.15),
|
||||
python-iso8601,
|
||||
python-jsonpatch (>= 1.9),
|
||||
python-keystoneclient (>= 1:1.6.0),
|
||||
python-keystonemiddleware (>= 4.0.0),
|
||||
|
@ -85,16 +89,17 @@ Depends: alembic (>= 0.7.6),
|
|||
python-msgpack,
|
||||
python-numpy,
|
||||
python-oslo.config (>= 1:2.6.0),
|
||||
python-oslo.db (>= 1.8.0),
|
||||
python-oslo.db (>= 4.8.0),
|
||||
python-oslo.log (>= 1.0.0),
|
||||
python-oslo.middleware,
|
||||
python-oslo.middleware (>= 3.11.0),
|
||||
python-oslo.policy (>= 0.3.0),
|
||||
python-oslo.serialization (>= 1.4.0),
|
||||
python-oslo.utils (>= 1.6.0),
|
||||
python-oslo.utils (>= 3.3.0),
|
||||
python-oslosphinx (>= 2.2.0.0),
|
||||
python-pandas (>= 0.17),
|
||||
python-paste,
|
||||
python-pastedeploy,
|
||||
python-pbr,
|
||||
python-pecan (>= 0.9),
|
||||
python-prettytable,
|
||||
python-psycopg2,
|
||||
|
@ -106,8 +111,8 @@ Depends: alembic (>= 0.7.6),
|
|||
python-sqlalchemy,
|
||||
python-sqlalchemy-utils,
|
||||
python-stevedore,
|
||||
python-swiftclient (>= 2.5.0),
|
||||
python-tooz (>= 1.34),
|
||||
python-swiftclient (>= 3.1.0),
|
||||
python-tooz (>= 1.38),
|
||||
python-trollius,
|
||||
python-voluptuous,
|
||||
python-webob (>= 1.4.1),
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
#!/usr/bin/make -f
|
||||
|
||||
PYTHONS:=$(shell pyversions -vr)
|
||||
#PYTHON3S:=$(shell py3versions -vr)
|
||||
|
||||
include /usr/share/openstack-pkg-tools/pkgos.make
|
||||
|
||||
export OSLO_PACKAGE_VERSION=$(VERSION)
|
||||
UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*
|
||||
|
||||
%:
|
||||
|
|
|
@ -17,11 +17,16 @@
|
|||
STORAGE_DRIVER="$1"
|
||||
SQL_DRIVER="$2"
|
||||
|
||||
ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd,"
|
||||
ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd,tempest,"
|
||||
|
||||
# Use efficient wsgi web server
|
||||
DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_DEPLOY=uwsgi'
|
||||
DEVSTACK_LOCAL_CONFIG+=$'\nexport KEYSTONE_DEPLOY=uwsgi'
|
||||
|
||||
export DEVSTACK_GATE_INSTALL_TESTONLY=1
|
||||
export DEVSTACK_GATE_NO_SERVICES=1
|
||||
export DEVSTACK_GATE_TEMPEST=0
|
||||
export DEVSTACK_GATE_TEMPEST=1
|
||||
export DEVSTACK_GATE_TEMPEST_NOTESTS=1
|
||||
export DEVSTACK_GATE_EXERCISES=0
|
||||
export KEEP_LOCALRC=1
|
||||
|
||||
|
@ -36,12 +41,8 @@ case $STORAGE_DRIVER in
|
|||
DEVSTACK_GATE_TEMPEST+=$'\nexport SWIFT_USE_MOD_WSGI=True'
|
||||
;;
|
||||
ceph)
|
||||
ENABLED_SERVICES+="ceph"
|
||||
DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=ceph'
|
||||
;;
|
||||
influxdb)
|
||||
DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=influxdb'
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
|
|
|
@ -43,8 +43,30 @@ export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value
|
|||
|
||||
curl -X GET ${GNOCCHI_SERVICE_URL}/v1/archive_policy -H "Content-Type: application/json"
|
||||
|
||||
sudo gnocchi-upgrade
|
||||
|
||||
# Run tests
|
||||
# Just ensure tools still works
|
||||
gnocchi metric create
|
||||
sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2
|
||||
|
||||
# NOTE(sileht): on swift job permissions are wrong, I don't known why
|
||||
sudo chown -R tempest:stack $BASE/new/tempest
|
||||
sudo chown -R tempest:stack $BASE/data/tempest
|
||||
|
||||
# Run tests with tempst
|
||||
cd $BASE/new/tempest
|
||||
set +e
|
||||
sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- gnocchi --concurrency=$TEMPEST_CONCURRENCY
|
||||
TEMPEST_EXIT_CODE=$?
|
||||
set -e
|
||||
if [[ $TEMPEST_EXIT_CODE != 0 ]]; then
|
||||
# Collect and parse result
|
||||
generate_testr_results
|
||||
exit $TEMPEST_EXIT_CODE
|
||||
fi
|
||||
|
||||
# Run tests with tox
|
||||
cd $GNOCCHI_DIR
|
||||
echo "Running gnocchi functional test suite"
|
||||
set +e
|
||||
sudo -E -H -u stack tox -epy27-gate
|
||||
|
|
|
@ -38,6 +38,23 @@ set -o xtrace
|
|||
GITDIR["python-gnocchiclient"]=$DEST/python-gnocchiclient
|
||||
GITREPO["python-gnocchiclient"]=${GNOCCHICLIENT_REPO:-${GIT_BASE}/openstack/python-gnocchiclient.git}
|
||||
|
||||
if [ -z "$GNOCCHI_DEPLOY" ]; then
|
||||
# Default
|
||||
GNOCCHI_DEPLOY=simple
|
||||
|
||||
# Fallback to common wsgi devstack configuration
|
||||
if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then
|
||||
GNOCCHI_DEPLOY=mod_wsgi
|
||||
|
||||
# Deprecated config
|
||||
elif [ -n "$GNOCCHI_USE_MOD_WSGI" ] ; then
|
||||
echo_summary "GNOCCHI_USE_MOD_WSGI is deprecated, use GNOCCHI_DEPLOY instead"
|
||||
if [ "$GNOCCHI_USE_MOD_WSGI" == True ]; then
|
||||
GNOCCHI_DEPLOY=mod_wsgi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Functions
|
||||
# ---------
|
||||
|
||||
|
@ -48,6 +65,13 @@ function is_gnocchi_enabled {
|
|||
return 1
|
||||
}
|
||||
|
||||
# Test if a Ceph services are enabled
|
||||
# _is_ceph_enabled
|
||||
function _is_ceph_enabled {
|
||||
type is_ceph_enabled_for_service >/dev/null 2>&1 && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
# create_gnocchi_accounts() - Set up common required gnocchi accounts
|
||||
|
||||
# Project User Roles
|
||||
|
@ -56,8 +80,12 @@ function is_gnocchi_enabled {
|
|||
# gnocchi_swift gnocchi_swift ResellerAdmin (if Swift is enabled)
|
||||
function create_gnocchi_accounts {
|
||||
# Gnocchi
|
||||
if is_service_enabled key && is_service_enabled gnocchi-api
|
||||
then
|
||||
if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && is_service_enabled gnocchi-api ; then
|
||||
# At this time, the /etc/openstack/clouds.yaml is available,
|
||||
# we could leverage that by setting OS_CLOUD
|
||||
OLD_OS_CLOUD=$OS_CLOUD
|
||||
export OS_CLOUD='devstack-admin'
|
||||
|
||||
create_service_user "gnocchi"
|
||||
|
||||
local gnocchi_service=$(get_or_create_service "gnocchi" \
|
||||
|
@ -74,6 +102,8 @@ function create_gnocchi_accounts {
|
|||
"$SERVICE_PASSWORD" default "gnocchi_swift@example.com")
|
||||
get_or_add_user_project_role "ResellerAdmin" $gnocchi_swift_user "gnocchi_swift"
|
||||
fi
|
||||
|
||||
export OS_CLOUD=$OLD_OS_CLOUD
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -113,32 +143,6 @@ function _gnocchi_install_redis {
|
|||
pip_install_gr redis
|
||||
}
|
||||
|
||||
# install influxdb
|
||||
# NOTE(chdent): InfluxDB is not currently packaged by the distro at the
|
||||
# version that gnocchi needs. Until that is true we're downloading
|
||||
# the debs and rpms packaged by the InfluxDB company. When it is
|
||||
# true this method can be changed to be similar to
|
||||
# _gnocchi_install_redis above.
|
||||
function _gnocchi_install_influxdb {
|
||||
if is_package_installed influxdb; then
|
||||
echo "influxdb already installed"
|
||||
else
|
||||
local file=$(mktemp /tmp/influxpkg-XXXXX)
|
||||
|
||||
if is_ubuntu; then
|
||||
wget -O $file $GNOCCHI_INFLUXDB_DEB_PKG
|
||||
sudo dpkg -i $file
|
||||
elif is_fedora; then
|
||||
wget -O $file $GNOCCHI_INFLUXDB_RPM_PKG
|
||||
sudo rpm -i $file
|
||||
fi
|
||||
rm $file
|
||||
fi
|
||||
|
||||
# restart influxdb via its initscript
|
||||
sudo /opt/influxdb/init.sh restart
|
||||
}
|
||||
|
||||
function _gnocchi_install_grafana {
|
||||
if is_ubuntu; then
|
||||
local file=$(mktemp /tmp/grafanapkg-XXXXX)
|
||||
|
@ -148,20 +152,22 @@ function _gnocchi_install_grafana {
|
|||
elif is_fedora; then
|
||||
sudo yum install "$GRAFANA_RPM_PKG"
|
||||
fi
|
||||
|
||||
git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR}
|
||||
# Grafana-server does not handle symlink :(
|
||||
sudo mkdir -p /usr/share/grafana/public/app/plugins/datasource/gnocchi
|
||||
sudo mount -o bind ${GRAFANA_PLUGINS_DIR}/datasources/gnocchi /usr/share/grafana/public/app/plugins/datasource/gnocchi
|
||||
|
||||
if [ ! "$GRAFANA_PLUGIN_VERSION" ]; then
|
||||
sudo grafana-cli plugins install sileht-gnocchi-datasource
|
||||
elif [ "$GRAFANA_PLUGIN_VERSION" != "git" ]; then
|
||||
tmpfile=/tmp/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz
|
||||
wget https://github.com/sileht/grafana-gnocchi-datasource/releases/download/${GRAFANA_PLUGIN_VERSION}/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz -O $tmpfile
|
||||
sudo -u grafana tar -xzf $tmpfile -C /var/lib/grafana/plugins
|
||||
rm -f $file
|
||||
else
|
||||
git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR}
|
||||
sudo ln -sf ${GRAFANA_PLUGINS_DIR}/dist /var/lib/grafana/plugins/grafana-gnocchi-datasource
|
||||
# NOTE(sileht): This is long and have chance to fail, thx nodejs/npm
|
||||
(cd /var/lib/grafana/plugins/grafana-gnocchi-datasource && npm install && ./run-tests.sh) || true
|
||||
fi
|
||||
sudo service grafana-server restart
|
||||
}
|
||||
|
||||
# remove the influxdb database
|
||||
function _gnocchi_cleanup_influxdb {
|
||||
curl -G 'http://localhost:8086/query' --data-urlencode "q=DROP DATABASE $GNOCCHI_INFLUXDB_DBNAME"
|
||||
}
|
||||
|
||||
function _cleanup_gnocchi_apache_wsgi {
|
||||
sudo rm -f $GNOCCHI_WSGI_DIR/*.wsgi
|
||||
sudo rm -f $(apache_site_config_for gnocchi)
|
||||
|
@ -209,7 +215,7 @@ function _config_gnocchi_apache_wsgi {
|
|||
# cleanup_gnocchi() - Remove residual data files, anything left over from previous
|
||||
# runs that a clean run would need to clean up
|
||||
function cleanup_gnocchi {
|
||||
if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then
|
||||
if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
|
||||
_cleanup_gnocchi_apache_wsgi
|
||||
fi
|
||||
}
|
||||
|
@ -220,13 +226,22 @@ function configure_gnocchi {
|
|||
sudo chown $STACK_USER $GNOCCHI_DATA_DIR
|
||||
|
||||
# Configure logging
|
||||
iniset $GNOCCHI_CONF DEFAULT verbose True
|
||||
iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
||||
|
||||
# Install the configuration files
|
||||
cp $GNOCCHI_DIR/etc/gnocchi/* $GNOCCHI_CONF_DIR
|
||||
|
||||
|
||||
# Set up logging
|
||||
if [ "$SYSLOG" != "False" ]; then
|
||||
iniset $GNOCCHI_CONF DEFAULT use_syslog "True"
|
||||
fi
|
||||
|
||||
# Format logging
|
||||
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$GNOCCHI_DEPLOY" != "mod_wsgi" ]; then
|
||||
setup_colorized_logging $GNOCCHI_CONF DEFAULT
|
||||
fi
|
||||
|
||||
if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then
|
||||
iniset $GNOCCHI_CONF storage coordination_url "$GNOCCHI_COORDINATOR_URL"
|
||||
fi
|
||||
|
@ -241,37 +256,29 @@ function configure_gnocchi {
|
|||
fi
|
||||
|
||||
# Configure the storage driver
|
||||
if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
|
||||
if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
|
||||
iniset $GNOCCHI_CONF storage driver ceph
|
||||
iniset $GNOCCHI_CONF storage ceph_username ${GNOCCHI_CEPH_USER}
|
||||
iniset $GNOCCHI_CONF storage ceph_keyring ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring
|
||||
iniset $GNOCCHI_CONF storage ceph_secret $(awk '/key/{print $3}' ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring)
|
||||
elif is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then
|
||||
iniset $GNOCCHI_CONF storage driver swift
|
||||
iniset $GNOCCHI_CONF storage swift_user gnocchi_swift
|
||||
iniset $GNOCCHI_CONF storage swift_key $SERVICE_PASSWORD
|
||||
iniset $GNOCCHI_CONF storage swift_tenant_name "gnocchi_swift"
|
||||
iniset $GNOCCHI_CONF storage swift_auth_version 2
|
||||
iniset $GNOCCHI_CONF storage swift_authurl $KEYSTONE_SERVICE_URI/v2.0/
|
||||
iniset $GNOCCHI_CONF storage swift_project_name "gnocchi_swift"
|
||||
iniset $GNOCCHI_CONF storage swift_auth_version 3
|
||||
iniset $GNOCCHI_CONF storage swift_authurl $KEYSTONE_SERVICE_URI_V3
|
||||
elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'file' ]] ; then
|
||||
iniset $GNOCCHI_CONF storage driver file
|
||||
iniset $GNOCCHI_CONF storage file_basepath $GNOCCHI_DATA_DIR/
|
||||
elif [[ "$GNOCCHI_STORAGE_BACKEND" == 'influxdb' ]] ; then
|
||||
iniset $GNOCCHI_CONF storage driver influxdb
|
||||
iniset $GNOCCHI_CONF storage influxdb_database $GNOCCHI_INFLUXDB_DBNAME
|
||||
else
|
||||
echo "ERROR: could not configure storage driver"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if is_service_enabled key; then
|
||||
if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then
|
||||
iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth
|
||||
if is_service_enabled gnocchi-grafana; then
|
||||
iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline "cors gnocchi+auth"
|
||||
iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL}
|
||||
iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL}
|
||||
iniset $GNOCCHI_CONF cors allow_methods GET,POST,PUT,DELETE,OPTIONS,HEAD,PATCH
|
||||
iniset $GNOCCHI_CONF cors allow_headers Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token
|
||||
else
|
||||
iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth
|
||||
fi
|
||||
else
|
||||
iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+noauth
|
||||
|
@ -280,8 +287,44 @@ function configure_gnocchi {
|
|||
# Configure the indexer database
|
||||
iniset $GNOCCHI_CONF indexer url `database_connection_url gnocchi`
|
||||
|
||||
if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then
|
||||
if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
|
||||
_config_gnocchi_apache_wsgi
|
||||
elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then
|
||||
# iniset creates these files when it's called if they don't exist.
|
||||
GNOCCHI_UWSGI_FILE=$GNOCCHI_CONF_DIR/uwsgi.ini
|
||||
|
||||
rm -f "$GNOCCHI_UWSGI_FILE"
|
||||
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi http $GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "$GNOCCHI_DIR/gnocchi/rest/app.wsgi"
|
||||
# This is running standalone
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi master true
|
||||
# Set die-on-term & exit-on-reload so that uwsgi shuts down
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi die-on-term true
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi exit-on-reload true
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi threads 32
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi processes $API_WORKERS
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi enable-threads true
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi plugins python
|
||||
# uwsgi recommends this to prevent thundering herd on accept.
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi thunder-lock true
|
||||
# Override the default size for headers from the 4k default.
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi buffer-size 65535
|
||||
# Make sure the client doesn't try to re-use the connection.
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi add-header "Connection: close"
|
||||
# Don't share rados resources and python-requests globals between processes
|
||||
iniset "$GNOCCHI_UWSGI_FILE" uwsgi lazy-apps true
|
||||
fi
|
||||
}
|
||||
|
||||
# configure_keystone_for_gnocchi() - Configure Keystone needs for Gnocchi
|
||||
function configure_keystone_for_gnocchi {
|
||||
if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then
|
||||
if is_service_enabled gnocchi-grafana; then
|
||||
# NOTE(sileht): keystone configuration have to be set before uwsgi
|
||||
# is started
|
||||
iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -294,7 +337,7 @@ function configure_ceph_gnocchi {
|
|||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} crush_ruleset ${RULE_ID}
|
||||
|
||||
fi
|
||||
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GNOCCHI_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GNOCCHI_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring
|
||||
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GNOCCHI_CEPH_USER} mon "allow r" osd "allow rwx pool=${GNOCCHI_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring
|
||||
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring
|
||||
}
|
||||
|
||||
|
@ -309,13 +352,18 @@ function init_gnocchi {
|
|||
if is_service_enabled mysql postgresql; then
|
||||
recreate_database gnocchi
|
||||
fi
|
||||
$GNOCCHI_BIN_DIR/gnocchi-upgrade
|
||||
if is_service_enabled ceilometer; then
|
||||
$GNOCCHI_BIN_DIR/gnocchi-upgrade --create-legacy-resource-types
|
||||
else
|
||||
$GNOCCHI_BIN_DIR/gnocchi-upgrade
|
||||
fi
|
||||
}
|
||||
|
||||
function preinstall_gnocchi {
|
||||
# Needed to build psycopg2
|
||||
if is_ubuntu; then
|
||||
install_package libpq-dev
|
||||
# libpq-dev is needed to build psycopg2
|
||||
# uuid-runtime is needed to use the uuidgen command
|
||||
install_package libpq-dev uuid-runtime
|
||||
else
|
||||
install_package postgresql-devel
|
||||
fi
|
||||
|
@ -331,11 +379,6 @@ function install_gnocchi {
|
|||
_gnocchi_install_redis
|
||||
fi
|
||||
|
||||
if [[ "${GNOCCHI_STORAGE_BACKEND}" == 'influxdb' ]] ; then
|
||||
_gnocchi_install_influxdb
|
||||
pip_install influxdb
|
||||
fi
|
||||
|
||||
if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
|
||||
pip_install cradox
|
||||
fi
|
||||
|
@ -347,13 +390,15 @@ function install_gnocchi {
|
|||
|
||||
install_gnocchiclient
|
||||
|
||||
is_service_enabled key && EXTRA_FLAVOR=,keystonmiddleware
|
||||
[ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystonemiddleware
|
||||
|
||||
# We don't use setup_package because we don't follow openstack/requirements
|
||||
sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}]
|
||||
|
||||
if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then
|
||||
if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
|
||||
install_apache_wsgi
|
||||
elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then
|
||||
pip_install uwsgi
|
||||
fi
|
||||
|
||||
# Create configuration directory
|
||||
|
@ -364,7 +409,7 @@ function install_gnocchi {
|
|||
# start_gnocchi() - Start running processes, including screen
|
||||
function start_gnocchi {
|
||||
|
||||
if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then
|
||||
if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
|
||||
enable_apache_site gnocchi
|
||||
restart_apache_server
|
||||
if [[ -n $GNOCCHI_SERVICE_PORT ]]; then
|
||||
|
@ -378,6 +423,8 @@ function start_gnocchi {
|
|||
tail_log gnocchi /var/log/$APACHE_NAME/error[_\.]log
|
||||
tail_log gnocchi-api /var/log/$APACHE_NAME/access[_\.]log
|
||||
fi
|
||||
elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then
|
||||
run_process gnocchi-api "$GNOCCHI_BIN_DIR/uwsgi $GNOCCHI_UWSGI_FILE"
|
||||
else
|
||||
run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api -d -v --config-file $GNOCCHI_CONF"
|
||||
fi
|
||||
|
@ -391,19 +438,13 @@ function start_gnocchi {
|
|||
fi
|
||||
|
||||
# Create a default policy
|
||||
if ! is_service_enabled key; then
|
||||
if [ "$GNOCCHI_USE_KEYSTONE" == "False" ]; then
|
||||
export OS_AUTH_TYPE=gnocchi-noauth
|
||||
export GNOCCHI_USER_ID=`uuidgen`
|
||||
export GNOCCHI_PROJECT_ID=`uuidgen`
|
||||
export GNOCCHI_ENDPOINT="$(gnocchi_service_url)"
|
||||
fi
|
||||
|
||||
gnocchi archive-policy create -d granularity:5m,points:12 -d granularity:1h,points:24 -d granularity:1d,points:30 low
|
||||
gnocchi archive-policy create -d granularity:60s,points:60 -d granularity:1h,points:168 -d granularity:1d,points:365 medium
|
||||
gnocchi archive-policy create -d granularity:1s,points:86400 -d granularity:1m,points:43200 -d granularity:1h,points:8760 high
|
||||
|
||||
gnocchi archive-policy-rule create -a low -m "*" default
|
||||
|
||||
# run metricd last so we are properly waiting for swift and friends
|
||||
run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d -v --config-file $GNOCCHI_CONF"
|
||||
run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d -v --config-file $GNOCCHI_CONF"
|
||||
|
@ -411,7 +452,7 @@ function start_gnocchi {
|
|||
|
||||
# stop_gnocchi() - Stop running processes
|
||||
function stop_gnocchi {
|
||||
if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then
|
||||
if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then
|
||||
disable_apache_site gnocchi
|
||||
restart_apache_server
|
||||
fi
|
||||
|
@ -419,14 +460,6 @@ function stop_gnocchi {
|
|||
for serv in gnocchi-api; do
|
||||
stop_process $serv
|
||||
done
|
||||
|
||||
if [[ "${GNOCCHI_STORAGE_BACKEND}" == 'influxdb' ]] ; then
|
||||
_gnocchi_cleanup_influxdb
|
||||
fi
|
||||
|
||||
if is_service_enabled gnocchi-grafana; then
|
||||
sudo umount /usr/share/grafana/public/app/plugins/datasource/gnocchi
|
||||
fi
|
||||
}
|
||||
|
||||
if is_service_enabled gnocchi-api; then
|
||||
|
@ -436,11 +469,12 @@ if is_service_enabled gnocchi-api; then
|
|||
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
echo_summary "Installing Gnocchi"
|
||||
stack_install_service gnocchi
|
||||
configure_keystone_for_gnocchi
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
echo_summary "Configuring Gnocchi"
|
||||
configure_gnocchi
|
||||
create_gnocchi_accounts
|
||||
if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
|
||||
if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then
|
||||
echo_summary "Configuring Gnocchi for Ceph"
|
||||
configure_ceph_gnocchi
|
||||
fi
|
||||
|
|
|
@ -11,9 +11,17 @@ GNOCCHI_LOG_DIR=/var/log/gnocchi
|
|||
GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi}
|
||||
GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi}
|
||||
GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi}
|
||||
GNOCCHI_COORDINATOR_URL=${GNOCCHI_COORDINATOR_URL:-redis://localhost:6379}
|
||||
|
||||
# Toggle for deploying Gnocchi under HTTPD + mod_wsgi
|
||||
GNOCCHI_USE_MOD_WSGI=${GNOCCHI_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
|
||||
# GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values:
|
||||
# - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi
|
||||
# - simple : Run gnocchi-api
|
||||
# - uwsgi : Run Gnocchi under uwsgi
|
||||
# - <empty>: Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES
|
||||
GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY}
|
||||
|
||||
# Toggle for deploying Gnocchi with/without Keystone
|
||||
GNOCCHI_USE_KEYSTONE=$(trueorfalse True GNOCCHI_USE_KEYSTONE)
|
||||
|
||||
# Support potential entry-points console scripts and venvs
|
||||
if [[ ${USE_VENV} = True ]]; then
|
||||
|
@ -29,7 +37,7 @@ GNOCCHI_SERVICE_PROTOCOL=http
|
|||
# NOTE(chdent): If you are not using mod wsgi you need to set port!
|
||||
GNOCCHI_SERVICE_PORT=${GNOCCHI_SERVICE_PORT:-8041}
|
||||
GNOCCHI_SERVICE_PREFIX=${GNOCCHI_SERVICE_PREFIX:-'/metric'}
|
||||
GNOCCHI_SERVICE_HOST=$SERVICE_HOST
|
||||
GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}}
|
||||
|
||||
# Gnocchi statsd info
|
||||
GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)}
|
||||
|
@ -45,14 +53,10 @@ GNOCCHI_CEPH_POOL_PGP=${GNOCCHI_CEPH_POOL_PGP:-8}
|
|||
# Gnocchi backend
|
||||
GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-file}
|
||||
|
||||
# InfluxDB Settings
|
||||
GNOCCHI_INFLUXDB_DBNAME=${GNOCCHI_INFLUXDB_DBNAME:-gnocchidevstack}
|
||||
GNOCCHI_INFLUXDB_RPM_PKG=${GNOCCHI_INFLUXDB_RPM_PKG:-https://s3.amazonaws.com/influxdb/influxdb-0.9.4.2-1.x86_64.rpm}
|
||||
GNOCCHI_INFLUXDB_DEB_PKG=${GNOCCHI_INFLUXDB_DEB_PKG:-https://s3.amazonaws.com/influxdb/influxdb_0.9.4.2_amd64.deb}
|
||||
|
||||
# Grafana settings
|
||||
GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm}
|
||||
GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb}
|
||||
GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-plugins}
|
||||
GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/sileht/grafana-plugins-gnocchi.git}
|
||||
GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm}
|
||||
GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb}
|
||||
GRAFANA_PLUGIN_VERSION=${GRAFANA_PLUGIN_VERSION}
|
||||
GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-gnocchi-datasource}
|
||||
GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/sileht/grafana-gnocchi-datasource.git}
|
||||
GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000}
|
||||
|
|
|
@ -32,19 +32,15 @@ responsible for linking resources with metrics.
|
|||
How to choose back-ends
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Gnocchi currently offers 4 storage drivers:
|
||||
Gnocchi currently offers different storage drivers:
|
||||
|
||||
* File
|
||||
* Swift
|
||||
* Ceph (preferred)
|
||||
* InfluxDB (experimental)
|
||||
|
||||
The first three drivers are based on an intermediate library, named
|
||||
*Carbonara*, which handles the time series manipulation, since none of these
|
||||
storage technologies handle time series natively. `InfluxDB`_ does not need
|
||||
this layer since it is itself a time series database. However, The InfluxDB
|
||||
driver is still experimental and suffers from bugs in InfluxDB itself that are
|
||||
yet to be fixed as of this writing.
|
||||
The drivers are based on an intermediate library, named *Carbonara*, which
|
||||
handles the time series manipulation, since none of these storage technologies
|
||||
handle time series natively.
|
||||
|
||||
The three *Carbonara* based drivers are working well and are as scalable as
|
||||
their back-end technology permits. Ceph and Swift are inherently more scalable
|
||||
|
@ -57,45 +53,44 @@ Gnocchi processes. In any case, it is obvious that Ceph and Swift drivers are
|
|||
largely more scalable. Ceph also offers better consistency, and hence is the
|
||||
recommended driver.
|
||||
|
||||
.. _InfluxDB: http://influxdb.com
|
||||
|
||||
How to plan for Gnocchi’s storage
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
---------------------------------
|
||||
|
||||
Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi,
|
||||
a time serie is a collection of points, where a point is a given measure, or
|
||||
sample, in the lifespan of a time serie. The storage format is compressed using
|
||||
various techniques, therefore the computing of a time serie's size can be
|
||||
estimated based on its worst case scenario with the following formula::
|
||||
a time series is a collection of points, where a point is a given measure, or
|
||||
sample, in the lifespan of a time series. The storage format is compressed
|
||||
using various techniques, therefore the computing of a time series' size can be
|
||||
estimated based on its **worst** case scenario with the following formula::
|
||||
|
||||
number of points × 9 bytes = size in bytes
|
||||
number of points × 8 bytes = size in bytes
|
||||
|
||||
The number of points you want to keep is usually determined by the following
|
||||
formula::
|
||||
|
||||
number of points = timespan ÷ granularity
|
||||
number of points = timespan ÷ granularity
|
||||
|
||||
For example, if you want to keep a year of data with a one minute resolution::
|
||||
|
||||
number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute
|
||||
number of points = 525 600
|
||||
number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute
|
||||
number of points = 525 600
|
||||
|
||||
Then::
|
||||
|
||||
size in bytes = 525 600 × 9 = 4 730 400 bytes = 4 620 KiB
|
||||
size in bytes = 525 600 × 8 = 4 204 800 bytes = 4 106 KiB
|
||||
|
||||
This is just for a single aggregated time serie. If your archive policy uses
|
||||
This is just for a single aggregated time series. If your archive policy uses
|
||||
the 8 default aggregation methods (mean, min, max, sum, std, median, count,
|
||||
95pct) with the same "one year, one minute aggregations" resolution, the space
|
||||
used will go up to a maximum of 8 × 4.5 MiB = 36 MiB.
|
||||
used will go up to a maximum of 8 × 4.1 MiB = 32.8 MiB.
|
||||
|
||||
|
||||
How to set the archive policy and granularity
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
---------------------------------------------
|
||||
|
||||
In Gnocchi, the archive policy is expressed in number of points. If your
|
||||
archive policy defines a policy of 10 points with a granularity of 1 second,
|
||||
the time serie archive will keep up to 10 seconds, each representing an
|
||||
aggregation over 1 second. This means the time serie will at maximum retain 10
|
||||
the time series archive will keep up to 10 seconds, each representing an
|
||||
aggregation over 1 second. This means the time series will at maximum retain 10
|
||||
seconds of data (sometimes a bit more) between the more recent point and the
|
||||
oldest point. That does not mean it will be 10 consecutive seconds: there might
|
||||
be a gap if data is fed irregularly.
|
||||
|
@ -109,9 +104,34 @@ policies. A typical low grained use case could be::
|
|||
|
||||
3600 points with a granularity of 1 second = 1 hour
|
||||
1440 points with a granularity of 1 minute = 24 hours
|
||||
1800 points with a granularity of 1 hour = 30 days
|
||||
720 points with a granularity of 1 hour = 30 days
|
||||
365 points with a granularity of 1 day = 1 year
|
||||
|
||||
This would represent 7205 points × 17.92 = 126 KiB per aggregation method. If
|
||||
you use the 8 standard aggregation method, your metric will take up to 8 × 126
|
||||
KiB = 0.98 MiB of disk space.
|
||||
This would represent 6125 points × 9 = 54 KiB per aggregation method. If
|
||||
you use the 8 standard aggregation method, your metric will take up to 8 × 54
|
||||
KiB = 432 KiB of disk space.
|
||||
|
||||
Default archive policies
|
||||
------------------------
|
||||
|
||||
By default, 3 archive policies are created using the default archive policy
|
||||
list (listed in `default_aggregation_methods`, i.e. mean, min, max, sum, std,
|
||||
median, count, 95pct):
|
||||
|
||||
- low (maximum estimated size per metric: 5 KiB)
|
||||
|
||||
* 5 minutes granularity over 1 hour
|
||||
* 1 hour granularity over 1 day
|
||||
* 1 day granularity over 1 month
|
||||
|
||||
- medium (maximum estimated size per metric: 139 KiB)
|
||||
|
||||
* 1 minute granularity over 1 day
|
||||
* 1 hour granularity over 1 week
|
||||
* 1 day granularity over 1 year
|
||||
|
||||
- high (maximum estimated size per metric: 1 578 KiB)
|
||||
|
||||
* 1 second granularity over 1 hour
|
||||
* 1 minute granularity over 1 week
|
||||
* 1 hour granularity over 1 year
|
||||
|
|
|
@ -32,6 +32,7 @@ extensions = [
|
|||
'gnocchi.gendoc',
|
||||
'sphinxcontrib.httpdomain',
|
||||
'sphinx.ext.autodoc',
|
||||
'reno.sphinxext',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
|
|
|
@ -10,19 +10,7 @@ easily created by running:
|
|||
|
||||
::
|
||||
|
||||
tox -e genconfig
|
||||
|
||||
This command will create an `etc/gnocchi/gnocchi.conf` file which can be used
|
||||
as a base for the default configuration file at `/etc/gnocchi/gnocchi.conf`. If
|
||||
you're using _devstack_, this file is already generated and put in place.
|
||||
|
||||
If you installed Gnocchi using pip, you can create a sample `gnocchi.conf` file
|
||||
using the following commands:
|
||||
|
||||
::
|
||||
|
||||
curl -O "https://raw.githubusercontent.com/openstack/gnocchi/master/etc/gnocchi/gnocchi-config-generator.conf"
|
||||
oslo-config-generator --config-file=gnocchi-config-generator.conf --output-file=gnocchi.conf
|
||||
oslo-config-generator --config-file=/etc/gnocchi/gnocchi-config-generator.conf --output-file=/etc/gnocchi/gnocchi.conf
|
||||
|
||||
The configuration file should be pretty explicit, but here are some of the base
|
||||
options you want to change and configure:
|
||||
|
@ -51,7 +39,6 @@ Gnocchi provides these storage drivers:
|
|||
- File (default)
|
||||
- `Swift`_
|
||||
- `Ceph`_
|
||||
- `InfluxDB`_ (experimental)
|
||||
|
||||
Gnocchi provides these indexer drivers:
|
||||
|
||||
|
@ -62,7 +49,6 @@ Gnocchi provides these indexer drivers:
|
|||
.. _`Ceph`: http://ceph.com/
|
||||
.. _`PostgreSQL`: http://postgresql.org
|
||||
.. _`MySQL`: http://mysql.com
|
||||
.. _`InfluxDB`: http://influxdb.com
|
||||
|
||||
Configuring the WSGI pipeline
|
||||
-----------------------------
|
||||
|
@ -77,15 +63,7 @@ installed the `keystone` flavor using `pip` (see :ref:`installation`), you can
|
|||
edit the `api-paste.ini` file to add the Keystone authentication middleware::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = keystone_authtoken gnocchi
|
||||
|
||||
Also, if you're planning on using `CORS`_ (e.g. to use `Grafana`_), you an also
|
||||
add the CORS middleware in the server pipeline::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = keystone_authtoken cors gnocchi
|
||||
|
||||
With or without Keystone support.
|
||||
pipeline = gnocchi+auth
|
||||
|
||||
.. _`Paste Deployment`: http://pythonpaste.org/deploy/
|
||||
.. _`OpenStack Keystone`: http://launchpad.net/keystone
|
||||
|
@ -111,7 +89,7 @@ For a more robust multi-nodes deployment, the coordinator may be changed via
|
|||
the `storage.coordination_url` configuration option to one of the other `tooz
|
||||
backends`_.
|
||||
|
||||
For example to use Redis backend::
|
||||
For example, to use Redis backend::
|
||||
|
||||
coordination_url = redis://<sentinel host>?sentinel=<master name>
|
||||
|
||||
|
@ -129,8 +107,8 @@ Ceph driver implementation details
|
|||
Each batch of measurements to process is stored into one rados object.
|
||||
These objects are named `measures_<metric_id>_<random_uuid>_<timestamp>`
|
||||
|
||||
Also a special empty object called `measures` has the list of measures to
|
||||
process stored in its xattr attributes.
|
||||
Also a special empty object called `measure` has the list of measures to
|
||||
process stored in its omap attributes.
|
||||
|
||||
Because of the asynchronous nature of how we store measurements in Gnocchi,
|
||||
`gnocchi-metricd` needs to know the list of objects that are waiting to be
|
||||
|
@ -141,11 +119,12 @@ processed:
|
|||
- Using a custom format into a rados object, would force us to use a lock
|
||||
each time we would change it.
|
||||
|
||||
Instead, the xattrs of one empty rados object are used. No lock is needed to
|
||||
add/remove a xattr.
|
||||
Instead, the omaps of one empty rados object are used. No lock is needed to
|
||||
add/remove an omap attribute.
|
||||
|
||||
But depending on the filesystem used by ceph OSDs, this xattrs can have a
|
||||
limitation in terms of numbers and size if Ceph is not correctly configured.
|
||||
Also xattrs attributes are used to store the list of aggregations used for a
|
||||
metric. So depending on the filesystem used by ceph OSDs, xattrs can have
|
||||
a limitation in terms of numbers and size if Ceph is not correctly configured.
|
||||
See `Ceph extended attributes documentation`_ for more details.
|
||||
|
||||
Then, each Carbonara generated file is stored in *one* rados object.
|
||||
|
@ -168,7 +147,7 @@ So, in realistic scenarios, the direct relation between the archive policy and
|
|||
the size of the rados objects created by Gnocchi is not a problem.
|
||||
|
||||
|
||||
Also Gnocchi can use `cradox`_ Python libary if installed. This library is a
|
||||
Also Gnocchi can use `cradox`_ Python library if installed. This library is a
|
||||
Python binding to librados written with `Cython`_, aiming to replace the one
|
||||
written with `ctypes`_ provided by Ceph.
|
||||
This new library will be part of next Ceph release (10.0.4).
|
||||
|
@ -185,3 +164,14 @@ installed to improve the Ceph backend performance.
|
|||
.. _`Cython`: http://cython.org/
|
||||
.. _`ctypes`: https://docs.python.org/2/library/ctypes.html
|
||||
.. _`rados.py`: https://docs.python.org/2/library/ctypes.htm://github.com/ceph/ceph/blob/hammer/src/pybind/rados.py
|
||||
|
||||
|
||||
Swift driver implementation details
|
||||
-----------------------------------
|
||||
|
||||
The Swift driver leverages the bulk delete functionality provided by the bulk_
|
||||
middleware to minimise the amount of requests made to clean storage data. This
|
||||
middleware must be enabled to ensure Gnocchi functions correctly. By default,
|
||||
Swift has this middleware enabled in its pipeline.
|
||||
|
||||
.. _bulk: http://docs.openstack.org/liberty/config-reference/content/object-storage-bulk-delete.html
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
==========
|
||||
Devstack
|
||||
==========
|
||||
|
||||
To enable Gnocchi in devstack, add the following to local.conf:
|
||||
|
||||
::
|
||||
|
||||
enable_plugin gnocchi https://github.com/openstack/gnocchi master
|
||||
enable_service gnocchi-api,gnocchi-metricd
|
||||
|
||||
To enable Grafana support in devstack, you can also enable `gnocchi-grafana`::
|
||||
|
||||
enable_service gnocchi-grafana
|
||||
|
||||
Then, you can start devstack:
|
||||
|
||||
::
|
||||
|
||||
./stack.sh
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
========
|
||||
Glossary
|
||||
========
|
||||
|
||||
.. glossary::
|
||||
|
||||
Resource
|
||||
An entity representing anything in your infrastructure that you will
|
||||
associate metric(s) with. It is identified by a unique ID and can contain
|
||||
attributes.
|
||||
|
||||
Metric
|
||||
An entity storing measures identified by an UUID. It can be attached to a
|
||||
resource using a name. How a metric stores its measure is defined by the
|
||||
archive policy it is associated to.
|
||||
|
||||
Measure
|
||||
A datapoint tuple composed of timestamp and a value.
|
||||
|
||||
Archive policy
|
||||
A measure storage policy attached to a metric. It determines how long
|
||||
measures will be kept in a metric and how they will be aggregated.
|
||||
|
||||
Granularity
|
||||
The time between two measures in an aggregated timeseries of a metric.
|
||||
|
||||
Timeseries
|
||||
A list of measures.
|
||||
|
||||
Aggregation method
|
||||
Function used to aggregate multiple measures in one. For example, the
|
||||
`min` aggregation method will aggregate the values of different measures
|
||||
to the minimum value of all the measures in time range.
|
|
@ -2,14 +2,12 @@
|
|||
Grafana support
|
||||
=================
|
||||
|
||||
`Grafana`_ has support for Gnocchi through a plugin. The repository named
|
||||
`grafana-plugins`_ contains this plugin. You can enable the plugin by following
|
||||
the instructions in the `Grafana documentation`_.
|
||||
`Grafana`_ has support for Gnocchi through a plugin. It can be installed with
|
||||
grafana-cli::
|
||||
|
||||
.. note::
|
||||
A `pull request`_ has been made to merge this plugin directly into Grafana
|
||||
main tree, but it has unfortunately being denied for the time being. Feel
|
||||
free to post a comment there requesting its reopening.
|
||||
sudo grafana-cli plugins install sileht-gnocchi-datasource
|
||||
|
||||
`Source`_ and `Documentation`_ are also available.
|
||||
|
||||
Grafana has 2 modes of operation: proxy or direct mode. In proxy mode, your
|
||||
browser only communicates with Grafana, and Grafana communicates with Gnocchi.
|
||||
|
@ -29,26 +27,18 @@ In order to use Gnocchi with Grafana in proxy mode, you just need to:
|
|||
In order to use Gnocchi with Grafana in direct mode, you need to do a few more
|
||||
steps:
|
||||
|
||||
1. Enable the `CORS`_ middleware. This can be done easily by modifying the
|
||||
Gnocchi `api-paste.ini` configuration file and adding `cors` into the main
|
||||
pipeline::
|
||||
|
||||
[pieline:main]
|
||||
pipeline = cors keystone_authtoken gnocchi
|
||||
|
||||
This will authorize your browser to make requests to Gnocchi on behalf of
|
||||
Grafana.
|
||||
|
||||
2. Configure the CORS middleware in `gnocchi.conf` to allow request from
|
||||
1. Configure the CORS middleware in `gnocchi.conf` to allow request from
|
||||
Grafana::
|
||||
|
||||
[cors]
|
||||
allowed_origin = http://example.com/grafana
|
||||
allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token
|
||||
|
||||
3. Configure the CORS middleware in Keystone in the same fashion.
|
||||
2. Configure the CORS middleware in Keystone to allow request from Grafana too:
|
||||
|
||||
4. Configure a new datasource in Grafana with the Keystone URL, a user, a
|
||||
[cors]
|
||||
allowed_origin = http://example.com/grafana
|
||||
|
||||
3. Configure a new datasource in Grafana with the Keystone URL, a user, a
|
||||
project and a password. Your browser will query Keystone for a token, and
|
||||
then query Gnocchi based on what Grafana needs.
|
||||
|
||||
|
@ -57,7 +47,6 @@ steps:
|
|||
:alt: Grafana screenshot
|
||||
|
||||
.. _`Grafana`: http://grafana.org
|
||||
.. _`grafana-plugins`: https://github.com/grafana/grafana-plugins
|
||||
.. _`pull request`: https://github.com/grafana/grafana/pull/2716
|
||||
.. _`Grafana documentation`: http://docs.grafana.org/
|
||||
.. _`Documentation`: https://grafana.net/plugins/sileht-gnocchi-datasource
|
||||
.. _`Source`: https://github.com/sileht/grafana-gnocchi-datasource
|
||||
.. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing
|
||||
|
|
|
@ -46,6 +46,7 @@ Key Features
|
|||
- Archiving policy
|
||||
- Metric value search
|
||||
- Structured resources
|
||||
- Resource history
|
||||
- Queryable resource indexer
|
||||
- Multi-tenant
|
||||
- Grafana support
|
||||
|
@ -66,6 +67,7 @@ Documentation
|
|||
rest
|
||||
statsd
|
||||
grafana
|
||||
resource_types
|
||||
glossary
|
||||
releasenotes/index.rst
|
||||
|
||||
.. _`OpenStack`: http://openstack.org
|
||||
|
|
|
@ -45,9 +45,10 @@ The list of variants available is:
|
|||
* keystone – provides Keystone authentication support
|
||||
* mysql - provides MySQL indexer support
|
||||
* postgresql – provides PostgreSQL indexer support
|
||||
* influxdb – provides InfluxDB storage support
|
||||
* swift – provides OpenStack Swift storage support
|
||||
* ceph – provides Ceph storage support
|
||||
* ceph – provides common part of Ceph storage support
|
||||
* ceph_recommended_lib – provides Ceph (>=0.80) storage support
|
||||
* ceph_alternative_lib – provides Ceph (>=10.1.0) storage support
|
||||
* file – provides file driver support
|
||||
* doc – documentation building support
|
||||
* test – unit and functional tests support
|
||||
|
@ -60,7 +61,18 @@ procedure::
|
|||
Again, depending on the drivers and features you want to use, you need to
|
||||
install extra variants using, for example::
|
||||
|
||||
pip install -e .[postgresql,ceph]
|
||||
pip install -e .[postgresql,ceph,ceph_recommended_lib]
|
||||
|
||||
|
||||
Ceph requirements
|
||||
-----------------
|
||||
|
||||
Gnocchi leverages omap API of librados, but this is available in python binding
|
||||
only since python-rados >= 9.1.0. To handle this, Gnocchi uses 'cradox' python
|
||||
library which has exactly the same API but works with Ceph >= 0.80.0.
|
||||
|
||||
If Ceph and python-rados are >= 9.1.0, cradox python library becomes optional
|
||||
but is still recommended.
|
||||
|
||||
|
||||
Initialization
|
||||
|
@ -79,36 +91,17 @@ Upgrading
|
|||
In order to upgrade from a previous version of Gnocchi, you need to make sure
|
||||
that your indexer and storage are properly upgraded. Run the following:
|
||||
|
||||
1. Stop the old version of Gnocchi API server and metric daemon
|
||||
1. Stop the old version of Gnocchi API server and `gnocchi-statsd` daemon
|
||||
|
||||
2. Install the new version of Gnocchi
|
||||
2. Make sure that the processing backlog is empty (`gnocchi status`)
|
||||
|
||||
2. Run `gnocchi-upgrade`
|
||||
3. Stop the old version of `gnocchi-metricd` daemon
|
||||
|
||||
4. Install the new version of Gnocchi
|
||||
|
||||
5. Run `gnocchi-upgrade`
|
||||
This can take several hours depending on the size of your index and
|
||||
storage.
|
||||
|
||||
3. Start the new Gnocchi API server and metric daemon
|
||||
|
||||
Minimal interruption upgrade
|
||||
============================
|
||||
Gnocchi supports online upgrade of its storage system, which avoids
|
||||
interrupting Gnocchi for a long time. In order to upgrade from previous
|
||||
versions, you need to follow the following steps:
|
||||
|
||||
1. Stop the old Gnocchi API server and metric daemon
|
||||
|
||||
2. Run `gnocchi-upgrade --skip-storage` with the new version of Gnocchi.
|
||||
This can take several minutes depending on the size of your index.
|
||||
|
||||
3. Start the new Gnocchi API server.
|
||||
|
||||
4. Run `gnocchi-upgrade` with the new version of Gnocchi
|
||||
This can take several hours depending on the size of your storage.
|
||||
|
||||
5. Start the new Gnocchi metric daemon.
|
||||
|
||||
This will upgrade the indexer and storage in two passes. While a new version of
|
||||
Gnocchi API cannot run with an old version of the indexer, it can run with an
|
||||
old version of its storage back-end. For performance reasons, _metricd_ needs
|
||||
to run an upgraded storage back-end, otherwise it would spend too much time
|
||||
checking for upgrade pattern on each run.
|
||||
6. Start the new Gnocchi API server, `gnocchi-metricd`
|
||||
and `gnocchi-statsd` daemons
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
===================================
|
||||
2.1 Series Release Notes
|
||||
===================================
|
||||
|
||||
.. release-notes::
|
||||
:branch: origin/stable/2.1
|
|
@ -0,0 +1,6 @@
|
|||
===================================
|
||||
2.2 Series Release Notes
|
||||
===================================
|
||||
|
||||
.. release-notes::
|
||||
:branch: origin/stable/2.2
|
|
@ -0,0 +1,9 @@
|
|||
Release Notes
|
||||
=============
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
unreleased
|
||||
2.2
|
||||
2.1
|
|
@ -0,0 +1,5 @@
|
|||
============================
|
||||
Current Series Release Notes
|
||||
============================
|
||||
|
||||
.. release-notes::
|
|
@ -1,108 +0,0 @@
|
|||
================
|
||||
Resource Types
|
||||
================
|
||||
|
||||
Gnocchi offers different resource types to manage your resources. Each resource
|
||||
type has strongly typed attributes. All resource types are subtypes of the
|
||||
`generic` type.
|
||||
|
||||
Immutable attributes are attributes that cannot be modified after the resource
|
||||
has been created.
|
||||
|
||||
|
||||
generic
|
||||
=======
|
||||
|
||||
+------------+----------------+-----------+
|
||||
| Attribute | Type | Immutable |
|
||||
+============+================+===========+
|
||||
| user_id | UUID | Yes |
|
||||
+------------+----------------+-----------+
|
||||
| project_id | UUID | Yes |
|
||||
+------------+----------------+-----------+
|
||||
| started_at | Timestamp | Yes |
|
||||
+------------+----------------+-----------+
|
||||
| ended_at | Timestamp | No |
|
||||
+------------+----------------+-----------+
|
||||
| type | String | Yes |
|
||||
+------------+----------------+-----------+
|
||||
| metrics | {String: UUID} | No |
|
||||
+------------+----------------+-----------+
|
||||
|
||||
|
||||
ceph_account
|
||||
============
|
||||
|
||||
No specific attributes.
|
||||
|
||||
|
||||
identity
|
||||
========
|
||||
|
||||
No specific attributes.
|
||||
|
||||
|
||||
image
|
||||
=====
|
||||
|
||||
+------------------+---------+-----------+
|
||||
| Attribute | Type | Immutable |
|
||||
+==================+=========+===========+
|
||||
| name | String | No |
|
||||
+------------------+---------+-----------+
|
||||
| container_format | String | No |
|
||||
+------------------+---------+-----------+
|
||||
| disk_format | String | No |
|
||||
+------------------+---------+-----------+
|
||||
|
||||
|
||||
instance
|
||||
========
|
||||
|
||||
+--------------+---------+-----------+
|
||||
| Attribute | Type | Immutable |
|
||||
+==============+=========+===========+
|
||||
| flavor_id | String | No |
|
||||
+--------------+---------+-----------+
|
||||
| image_ref | String | No |
|
||||
+--------------+---------+-----------+
|
||||
| host | String | No |
|
||||
+--------------+---------+-----------+
|
||||
| display_name | String | No |
|
||||
+--------------+---------+-----------+
|
||||
| server_group | String | No |
|
||||
+--------------+---------+-----------+
|
||||
|
||||
|
||||
ipmi
|
||||
====
|
||||
|
||||
No specific attributes.
|
||||
|
||||
|
||||
network
|
||||
=======
|
||||
|
||||
No specific attributes.
|
||||
|
||||
|
||||
stack
|
||||
=====
|
||||
|
||||
No specific attributes.
|
||||
|
||||
|
||||
swift_account
|
||||
=============
|
||||
|
||||
No specific attributes.
|
||||
|
||||
|
||||
volume
|
||||
======
|
||||
|
||||
+--------------+---------+-----------+
|
||||
| Attribute | Type | Immutable |
|
||||
+==============+=========+===========+
|
||||
| display_name | String | No |
|
||||
+--------------+---------+-----------+
|
|
@ -12,11 +12,11 @@ these headers in your HTTP requests:
|
|||
* X-Project-Id
|
||||
|
||||
The `X-Roles` header can also be provided in order to match role based ACL
|
||||
specified in `policy.json`.
|
||||
specified in `policy.json`, as `X-Domain-Id` to match domain based ACL.
|
||||
|
||||
If you enable the OpenStack Keystone middleware, you only need to authenticate
|
||||
against Keystone and provide `X-Auth-Token` header with a valid token for each
|
||||
request sent to Gnocchi. The headers mentionned above will be filled
|
||||
request sent to Gnocchi. The headers mentioned above will be filled
|
||||
automatically based on your Keystone authorizations.
|
||||
|
||||
Metrics
|
||||
|
@ -41,6 +41,19 @@ To retrieve the list of all the metrics created, use the following request:
|
|||
|
||||
{{ scenarios['list-metric']['doc'] }}
|
||||
|
||||
.. note::
|
||||
|
||||
Considering the large volume of metrics Gnocchi will store, query results are
|
||||
limited to `max_limit` value set in the configuration file. Returned results
|
||||
are ordered by metrics' id values. To retrieve the next page of results, the
|
||||
id of a metric should be given as `marker` for the beginning of the next page
|
||||
of results.
|
||||
|
||||
Default ordering and limits as well as page start can be modified
|
||||
using query parameters:
|
||||
|
||||
{{ scenarios['list-metric-pagination']['doc'] }}
|
||||
|
||||
It is possible to send measures to the metric:
|
||||
|
||||
{{ scenarios['post-measures']['doc'] }}
|
||||
|
@ -60,6 +73,17 @@ endpoint:
|
|||
|
||||
{{ scenarios['get-measures']['doc'] }}
|
||||
|
||||
Depending on the driver, there may be some lag after POSTing measures before
|
||||
they are processed and queryable. To ensure your query returns all measures
|
||||
that have been POSTed, you can force any unprocessed measures to be handled:
|
||||
|
||||
{{ scenarios['get-measures-refresh']['doc'] }}
|
||||
|
||||
.. note::
|
||||
|
||||
Depending on the amount of data that is unprocessed, `refresh` may add
|
||||
some overhead to your query.
|
||||
|
||||
The list of points returned is composed of tuples with (timestamp, granularity,
|
||||
value) sorted by timestamp. The granularity is the timespan covered by
|
||||
aggregation for this point.
|
||||
|
@ -159,10 +183,27 @@ It is also possible to list archive policies:
|
|||
|
||||
{{ scenarios['list-archive-policy']['doc'] }}
|
||||
|
||||
Existing archive policies can be modified to retain more or less data depending
|
||||
on requirements. If the policy coverage is expanded, measures are not
|
||||
retroactively calculated as backfill to accommodate the new timespan:
|
||||
|
||||
{{ scenarios['update-archive-policy']['doc'] }}
|
||||
|
||||
.. note::
|
||||
|
||||
Granularities cannot be changed to a different rate. Also, granularities
|
||||
cannot be added or dropped from a policy.
|
||||
|
||||
It is possible to delete an archive policy if it is not used by any metric:
|
||||
|
||||
{{ scenarios['delete-archive-policy']['doc'] }}
|
||||
|
||||
.. note::
|
||||
|
||||
An archive policy cannot be deleted until all metrics associated with it
|
||||
are removed by a metricd daemon.
|
||||
|
||||
|
||||
Archive Policy Rule
|
||||
===================
|
||||
|
||||
|
@ -251,14 +292,29 @@ And to retrieve its modification history:
|
|||
|
||||
{{ scenarios['get-patched-instance-history']['doc'] }}
|
||||
|
||||
It possible to delete a resource altogether:
|
||||
It is possible to delete a resource altogether:
|
||||
|
||||
{{ scenarios['delete-resource-generic']['doc'] }}
|
||||
|
||||
It is also possible to delete a batch of resources based on attribute values, and
|
||||
returns a number of deleted resources.
|
||||
|
||||
To delete resources based on ids:
|
||||
|
||||
{{ scenarios['delete-resources-by-ids']['doc'] }}
|
||||
|
||||
or delete resources based on time:
|
||||
|
||||
{{ scenarios['delete-resources-by-time']['doc']}}
|
||||
|
||||
.. IMPORTANT::
|
||||
|
||||
When a resource is deleted, all its associated metrics are deleted at the
|
||||
same time.
|
||||
When a resource is deleted, all its associated metrics are deleted at the
|
||||
same time.
|
||||
|
||||
When a batch of resources are deleted, an attribute filter is required to
|
||||
avoid deletion of the entire database.
|
||||
|
||||
|
||||
All resources can be listed, either by using the `generic` type that will list
|
||||
all types of resources, or by filtering on their resource type:
|
||||
|
@ -275,6 +331,16 @@ or using `details=true` in the query parameter:
|
|||
|
||||
{{ scenarios['list-resource-generic-details']['doc'] }}
|
||||
|
||||
.. note::
|
||||
|
||||
Similar to metric list, query results are limited to `max_limit` value set in
|
||||
the configuration file.
|
||||
|
||||
Returned results represent a single page of data and are ordered by resouces'
|
||||
revision_start time and started_at values:
|
||||
|
||||
{{ scenarios['list-resource-generic-pagination']['doc'] }}
|
||||
|
||||
Each resource can be linked to any number of metrics. The `metrics` attributes
|
||||
is a key/value field where the key is the name of the relationship and
|
||||
the value is a metric:
|
||||
|
@ -296,6 +362,43 @@ The same endpoint can be used to append metrics to a resource:
|
|||
|
||||
.. _Nova: http://launchpad.net/nova
|
||||
|
||||
Resource Types
|
||||
==============
|
||||
|
||||
Gnocchi is able to manage resource types with custom attributes.
|
||||
|
||||
To create a new resource type:
|
||||
|
||||
{{ scenarios['create-resource-type']['doc'] }}
|
||||
|
||||
Then to retrieve its description:
|
||||
|
||||
{{ scenarios['get-resource-type']['doc'] }}
|
||||
|
||||
All resource types can be listed like this:
|
||||
|
||||
{{ scenarios['list-resource-type']['doc'] }}
|
||||
|
||||
It can also be deleted if no more resources are associated to it:
|
||||
|
||||
{{ scenarios['delete-resource-type']['doc'] }}
|
||||
|
||||
Attributes can be added or removed:
|
||||
|
||||
{{ scenarios['patch-resource-type']['doc'] }}
|
||||
|
||||
Creating resource type means creation of new tables on the indexer backend.
|
||||
This is heavy operation that will lock some tables for a short amount of times.
|
||||
When the resource type is created, its initial `state` is `creating`. When the
|
||||
new tables have been created, the state switches to `active` and the new
|
||||
resource type is ready to be used. If something unexpected occurs during this
|
||||
step, the state switches to `creation_error`.
|
||||
|
||||
The same behavior occurs when the resource type is deleted. The state starts to
|
||||
switch to `deleting`, the resource type is no more usable. Then the tables are
|
||||
removed and the finally the resource_type is really deleted from the database.
|
||||
If some unexpected occurs the state switches to `deletion_error`.
|
||||
|
||||
Searching for resources
|
||||
=======================
|
||||
|
||||
|
@ -307,6 +410,10 @@ values:
|
|||
|
||||
{{ scenarios['search-resource-for-user']['doc'] }}
|
||||
|
||||
Or even:
|
||||
|
||||
{{ scenarios['search-resource-for-host-like']['doc'] }}
|
||||
|
||||
Complex operators such as `and` and `or` are also available:
|
||||
|
||||
{{ scenarios['search-resource-for-user-after-timestamp']['doc'] }}
|
||||
|
@ -380,6 +487,12 @@ It can also be done by providing the list of metrics to aggregate:
|
|||
of this already aggregated data may not have sense for certain kind of
|
||||
aggregation method (e.g. stdev).
|
||||
|
||||
By default, the measures are aggregated using the aggregation method provided,
|
||||
e.g. you'll get a mean of means, or a max of maxs. You can specify what method
|
||||
to use over the retrieved aggregation by using the `reaggregate` parameter:
|
||||
|
||||
{{ scenarios['get-across-metrics-measures-by-metric-ids-reaggregate']['doc'] }}
|
||||
|
||||
It's also possible to do that aggregation on metrics linked to resources. In
|
||||
order to select these resources, the following endpoint accepts a query such as
|
||||
the one described in `Searching for resources`_.
|
||||
|
@ -391,6 +504,10 @@ requested resource type, and the compute the aggregation:
|
|||
|
||||
{{ scenarios['get-across-metrics-measures-by-attributes-lookup-groupby']['doc'] }}
|
||||
|
||||
Similar to retrieving measures for a single metric, the `refresh` parameter
|
||||
can be provided to force all POSTed measures to be processed across all
|
||||
metrics before computing the result.
|
||||
|
||||
Also aggregation across metrics have different behavior depending
|
||||
on if boundary are set ('start' and 'stop') and if 'needed_overlap' is set.
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "low",
|
||||
"name": "short",
|
||||
"back_window": 0,
|
||||
"definition": [
|
||||
{
|
||||
|
@ -24,7 +24,7 @@
|
|||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "low-without-max",
|
||||
"name": "short-without-max",
|
||||
"aggregation_methods": ["-max", "-min"],
|
||||
"back_window": 0,
|
||||
"definition": [
|
||||
|
@ -45,13 +45,31 @@
|
|||
- name: list-archive-policy
|
||||
request: GET /v1/archive_policy HTTP/1.1
|
||||
|
||||
- name: update-archive-policy
|
||||
request: |
|
||||
PATCH /v1/archive_policy/{{ scenarios['create-archive-policy']['response'].json['name'] }} HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"definition": [
|
||||
{
|
||||
"granularity": "1s",
|
||||
"timespan": "1 hour"
|
||||
},
|
||||
{
|
||||
"points": 48,
|
||||
"timespan": "1 day"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- name: create-archive-policy-to-delete
|
||||
request: |
|
||||
POST /v1/archive_policy HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "medium",
|
||||
"name": "some-archive-policy",
|
||||
"back_window": 0,
|
||||
"definition": [
|
||||
{
|
||||
|
@ -74,7 +92,7 @@
|
|||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"archive_policy_name": "low"
|
||||
"archive_policy_name": "high"
|
||||
}
|
||||
|
||||
- name: create-metric-2
|
||||
|
@ -124,6 +142,9 @@
|
|||
- name: list-metric
|
||||
request: GET /v1/metric HTTP/1.1
|
||||
|
||||
- name: list-metric-pagination
|
||||
request: GET /v1/metric?limit=100&sort=name:asc HTTP/1.1
|
||||
|
||||
- name: post-measures
|
||||
request: |
|
||||
POST /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures HTTP/1.1
|
||||
|
@ -193,6 +214,9 @@
|
|||
- name: get-measures-granularity
|
||||
request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1 HTTP/1.1
|
||||
|
||||
- name: get-measures-refresh
|
||||
request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?refresh=true HTTP/1.1
|
||||
|
||||
- name: create-resource-generic
|
||||
request: |
|
||||
POST /v1/resource/generic HTTP/1.1
|
||||
|
@ -216,6 +240,22 @@
|
|||
"metrics": {"temperature": {"archive_policy_name": "low"}}
|
||||
}
|
||||
|
||||
- name: create-resource-type-instance
|
||||
request: |
|
||||
POST /v1/resource_type HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "instance",
|
||||
"attributes": {
|
||||
"display_name": {"type": "string", "required": true},
|
||||
"flavor_id": {"type": "string", "required": true},
|
||||
"image_ref": {"type": "string", "required": true},
|
||||
"host": {"type": "string", "required": true},
|
||||
"server_group": {"type": "string", "required": false}
|
||||
}
|
||||
}
|
||||
|
||||
- name: create-resource-instance
|
||||
request: |
|
||||
POST /v1/resource/instance HTTP/1.1
|
||||
|
@ -243,6 +283,9 @@
|
|||
- name: list-resource-generic-details
|
||||
request: GET /v1/resource/generic?details=true HTTP/1.1
|
||||
|
||||
- name: list-resource-generic-pagination
|
||||
request: GET /v1/resource/generic?limit=2&sort=id:asc HTTP/1.1
|
||||
|
||||
- name: search-resource-for-user
|
||||
request: |
|
||||
POST /v1/search/resource/instance HTTP/1.1
|
||||
|
@ -250,6 +293,13 @@
|
|||
|
||||
{"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}}
|
||||
|
||||
- name: search-resource-for-host-like
|
||||
request: |
|
||||
POST /v1/search/resource/instance HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{"like": {"host": "compute%"}}
|
||||
|
||||
- name: search-resource-for-user-details
|
||||
request: |
|
||||
POST /v1/search/resource/generic?details=true HTTP/1.1
|
||||
|
@ -309,6 +359,57 @@
|
|||
- name: get-patched-instance
|
||||
request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1
|
||||
|
||||
|
||||
- name: create-resource-type
|
||||
request: |
|
||||
POST /v1/resource_type HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "my_custom_type",
|
||||
"attributes": {
|
||||
"myid": {"type": "uuid"},
|
||||
"display_name": {"type": "string", "required": true},
|
||||
"prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3},
|
||||
"size": {"type": "number", "min": 5, "max": 32.8},
|
||||
"enabled": {"type": "bool", "required": false}
|
||||
}
|
||||
}
|
||||
|
||||
- name: create-resource-type-2
|
||||
request: |
|
||||
POST /v1/resource_type HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{"name": "my_other_type"}
|
||||
|
||||
- name: get-resource-type
|
||||
request: GET /v1/resource_type/my_custom_type HTTP/1.1
|
||||
|
||||
- name: list-resource-type
|
||||
request: GET /v1/resource_type HTTP/1.1
|
||||
|
||||
- name: patch-resource-type
|
||||
request: |
|
||||
PATCH /v1/resource_type/my_custom_type HTTP/1.1
|
||||
Content-Type: application/json-patch+json
|
||||
|
||||
[
|
||||
{
|
||||
"op": "add",
|
||||
"path": "/attributes/awesome-stuff",
|
||||
"value": {"type": "bool", "required": false}
|
||||
},
|
||||
{
|
||||
"op": "remove",
|
||||
"path": "/attributes/prefix"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
- name: delete-resource-type
|
||||
request: DELETE /v1/resource_type/my_custom_type HTTP/1.1
|
||||
|
||||
- name: search-resource-history
|
||||
request: |
|
||||
POST /v1/search/resource/instance?history=true HTTP/1.1
|
||||
|
@ -400,6 +501,97 @@
|
|||
- name: delete-resource-generic
|
||||
request: DELETE /v1/resource/generic/{{ scenarios['create-resource-generic']['response'].json['id'] }} HTTP/1.1
|
||||
|
||||
- name: create-resources-a
|
||||
request: |
|
||||
POST /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"id": "340102AA-AA19-BBE0-E1E2-2D3JDC7D289R",
|
||||
"user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
|
||||
"project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
|
||||
}
|
||||
|
||||
- name: create-resources-b
|
||||
request: |
|
||||
POST /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"id": "340102AA-AAEF-AA90-E1E2-2D3JDC7D289R",
|
||||
"user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
|
||||
"project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
|
||||
}
|
||||
|
||||
- name: create-resources-c
|
||||
request: |
|
||||
POST /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"id": "340102AA-AAEF-BCEF-E112-2D3JDC7D289R",
|
||||
"user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
|
||||
"project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
|
||||
}
|
||||
|
||||
- name: create-resources-d
|
||||
request: |
|
||||
POST /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"id": "340102AA-AAEF-BCEF-E112-2D15DC7D289R",
|
||||
"user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
|
||||
"project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
|
||||
}
|
||||
|
||||
- name: create-resources-e
|
||||
request: |
|
||||
POST /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"id": "340102AA-AAEF-BCEF-E112-2D3JDC30289R",
|
||||
"user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
|
||||
"project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
|
||||
}
|
||||
|
||||
- name: create-resources-f
|
||||
request: |
|
||||
POST /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"id": "340102AA-AAEF-BCEF-E112-2D15349D109R",
|
||||
"user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ",
|
||||
"project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ"
|
||||
}
|
||||
|
||||
- name: delete-resources-by-ids
|
||||
request: |
|
||||
DELETE /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"in": {
|
||||
"id": [
|
||||
"{{ scenarios['create-resources-a']['response'].json['id'] }}",
|
||||
"{{ scenarios['create-resources-b']['response'].json['id'] }}",
|
||||
"{{ scenarios['create-resources-c']['response'].json['id'] }}"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
- name: delete-resources-by-time
|
||||
request: |
|
||||
DELETE /v1/resource/generic HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
">=": {"started_at": "{{ scenarios['create-resources-f']['response'].json['started_at'] }}"}
|
||||
}
|
||||
|
||||
|
||||
- name: get-resource-named-metrics-measures
|
||||
request: GET /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric/cpu.util/measures?start=2014-10-06T14:34 HTTP/1.1
|
||||
|
||||
|
@ -461,6 +653,10 @@
|
|||
request: |
|
||||
GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&start=2014-10-06T14:34&aggregation=mean HTTP/1.1
|
||||
|
||||
- name: get-across-metrics-measures-by-metric-ids-reaggregate
|
||||
request: |
|
||||
GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&aggregation=mean&reaggregation=min HTTP/1.1
|
||||
|
||||
- name: append-metrics-to-resource
|
||||
request: |
|
||||
POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric HTTP/1.1
|
||||
|
|
|
@ -60,6 +60,9 @@ monitor (see `How many metricd workers do we need to run`_). Making sure that
|
|||
the HTTP server and `gnocchi-metricd` daemon are running and are not writing
|
||||
anything alarming in their logs is a sign of good health of the overall system.
|
||||
|
||||
Total measures for backlog status may not accurately reflect the number of
|
||||
points to be processed when measures are submitted via batch.
|
||||
|
||||
How to backup and restore Gnocchi
|
||||
=================================
|
||||
|
||||
|
|
|
@ -5,13 +5,16 @@ pipeline = gnocchi+noauth
|
|||
[composite:gnocchi+noauth]
|
||||
use = egg:Paste#urlmap
|
||||
/ = gnocchiversions
|
||||
/v1 = gnocchiv1
|
||||
/v1 = gnocchiv1+noauth
|
||||
|
||||
[composite:gnocchi+auth]
|
||||
use = egg:Paste#urlmap
|
||||
/ = gnocchiversions
|
||||
/v1 = gnocchiv1+auth
|
||||
|
||||
[pipeline:gnocchiv1+noauth]
|
||||
pipeline = gnocchiv1
|
||||
|
||||
[pipeline:gnocchiv1+auth]
|
||||
pipeline = keystone_authtoken gnocchiv1
|
||||
|
||||
|
@ -26,7 +29,3 @@ root = gnocchi.rest.V1Controller
|
|||
[filter:keystone_authtoken]
|
||||
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
oslo_config_project = gnocchi
|
||||
|
||||
[filter:cors]
|
||||
paste.filter_factory = oslo_middleware.cors:filter_factory
|
||||
oslo_config_project = gnocchi
|
||||
|
|
|
@ -9,12 +9,20 @@
|
|||
"get resource": "rule:admin_or_creator or rule:resource_owner",
|
||||
"update resource": "rule:admin_or_creator",
|
||||
"delete resource": "rule:admin_or_creator",
|
||||
"delete resources": "rule:admin_or_creator",
|
||||
"list resource": "rule:admin_or_creator or rule:resource_owner",
|
||||
"search resource": "rule:admin_or_creator or rule:resource_owner",
|
||||
|
||||
"create resource type": "role:admin",
|
||||
"delete resource type": "role:admin",
|
||||
"update resource type": "role:admin",
|
||||
"list resource type": "",
|
||||
"get resource type": "",
|
||||
|
||||
"get archive policy": "",
|
||||
"list archive policy": "",
|
||||
"create archive policy": "role:admin",
|
||||
"update archive policy": "role:admin",
|
||||
"delete archive policy": "role:admin",
|
||||
|
||||
"create archive policy rule": "role:admin",
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
#
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Authors: Ana Malagon <atmalagon@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
|
|
@ -207,3 +207,37 @@ class ArchivePolicyItem(dict):
|
|||
datetime.timedelta(seconds=self.granularity)),
|
||||
'points': self.points,
|
||||
}
|
||||
|
||||
|
||||
DEFAULT_ARCHIVE_POLICIES = {
|
||||
'low': ArchivePolicy(
|
||||
"low", 0, [
|
||||
# 5 minutes resolution for an hour
|
||||
ArchivePolicyItem(granularity=300, points=12),
|
||||
# 1 hour resolution for a day
|
||||
ArchivePolicyItem(granularity=3600, points=24),
|
||||
# 1 day resolution for a month
|
||||
ArchivePolicyItem(granularity=3600 * 24, points=30),
|
||||
],
|
||||
),
|
||||
'medium': ArchivePolicy(
|
||||
"medium", 0, [
|
||||
# 1 minute resolution for an day
|
||||
ArchivePolicyItem(granularity=60, points=60 * 24),
|
||||
# 1 hour resolution for a week
|
||||
ArchivePolicyItem(granularity=3600, points=7 * 24),
|
||||
# 1 day resolution for a year
|
||||
ArchivePolicyItem(granularity=3600 * 24, points=365),
|
||||
],
|
||||
),
|
||||
'high': ArchivePolicy(
|
||||
"high", 0, [
|
||||
# 1 second resolution for an hour
|
||||
ArchivePolicyItem(granularity=1, points=3600),
|
||||
# 1 minute resolution for a week
|
||||
ArchivePolicyItem(granularity=60, points=60 * 24 * 7),
|
||||
# 1 hour resolution for a year
|
||||
ArchivePolicyItem(granularity=3600, points=365 * 24),
|
||||
],
|
||||
),
|
||||
}
|
||||
|
|
|
@ -18,19 +18,27 @@
|
|||
|
||||
import datetime
|
||||
import functools
|
||||
import itertools
|
||||
import logging
|
||||
import math
|
||||
import numbers
|
||||
import operator
|
||||
import random
|
||||
import re
|
||||
import struct
|
||||
import time
|
||||
|
||||
import iso8601
|
||||
import lz4
|
||||
import msgpack
|
||||
import pandas
|
||||
import six
|
||||
|
||||
from gnocchi import utils
|
||||
|
||||
# NOTE(sileht): pandas relies on time.strptime()
|
||||
# and often triggers http://bugs.python.org/issue7980
|
||||
# its dues to our heavy threads usage, this is the workaround
|
||||
# to ensure the module is correctly loaded before we use really it.
|
||||
time.strptime("2016-02-19", "%Y-%m-%d")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -45,6 +53,15 @@ class NoDeloreanAvailable(Exception):
|
|||
"%s is before %s" % (bad_timestamp, first_timestamp))
|
||||
|
||||
|
||||
class BeforeEpochError(Exception):
|
||||
"""Error raised when a timestamp before Epoch is used."""
|
||||
|
||||
def __init__(self, timestamp):
|
||||
self.timestamp = timestamp
|
||||
super(BeforeEpochError, self).__init__(
|
||||
"%s is before Epoch" % timestamp)
|
||||
|
||||
|
||||
class UnAggregableTimeseries(Exception):
|
||||
"""Error raised when timeseries cannot be aggregated."""
|
||||
def __init__(self, reason):
|
||||
|
@ -60,17 +77,12 @@ class UnknownAggregationMethod(Exception):
|
|||
"Unknown aggregation method `%s'" % agg)
|
||||
|
||||
|
||||
class SerializableMixin(object):
|
||||
|
||||
@classmethod
|
||||
def unserialize(cls, data):
|
||||
return cls.from_dict(msgpack.loads(data, encoding='utf-8'))
|
||||
|
||||
def serialize(self):
|
||||
return msgpack.dumps(self.to_dict())
|
||||
def round_timestamp(ts, freq):
|
||||
return pandas.Timestamp(
|
||||
(pandas.Timestamp(ts).value // freq) * freq)
|
||||
|
||||
|
||||
class TimeSerie(SerializableMixin):
|
||||
class TimeSerie(object):
|
||||
"""A representation of series of a timestamp with a value.
|
||||
|
||||
Duplicate timestamps are not allowed and will be filtered to use the
|
||||
|
@ -114,43 +126,12 @@ class TimeSerie(SerializableMixin):
|
|||
|
||||
@staticmethod
|
||||
def _timestamps_and_values_from_dict(values):
|
||||
v = tuple(
|
||||
zip(*dict(
|
||||
(pandas.Timestamp(k), v)
|
||||
for k, v in six.iteritems(values)).items()))
|
||||
timestamps = pandas.to_datetime(list(values.keys()), unit='ns')
|
||||
v = list(values.values())
|
||||
if v:
|
||||
return v
|
||||
return timestamps, v
|
||||
return (), ()
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d):
|
||||
"""Build a time series from a dict.
|
||||
|
||||
The dict format must be datetime as key and values as values.
|
||||
|
||||
:param d: The dict.
|
||||
:returns: A TimeSerie object
|
||||
"""
|
||||
return cls.from_data(
|
||||
*cls._timestamps_and_values_from_dict(d['values']))
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'values': dict((timestamp.value, float(v))
|
||||
for timestamp, v
|
||||
in six.iteritems(self.ts.dropna())),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _serialize_time_period(value):
|
||||
if value:
|
||||
return value.nanos / 10e8
|
||||
|
||||
@staticmethod
|
||||
def _round_timestamp(ts, freq):
|
||||
return pandas.Timestamp(
|
||||
(pandas.Timestamp(ts).value // freq) * freq)
|
||||
|
||||
@staticmethod
|
||||
def _to_offset(value):
|
||||
if isinstance(value, numbers.Real):
|
||||
|
@ -171,6 +152,15 @@ class TimeSerie(SerializableMixin):
|
|||
except IndexError:
|
||||
return
|
||||
|
||||
def group_serie(self, granularity, start=None):
|
||||
# NOTE(jd) Our whole serialization system is based on Epoch, and we
|
||||
# store unsigned integer, so we can't store anything before Epoch.
|
||||
# Sorry!
|
||||
if self.ts.index[0].value < 0:
|
||||
raise BeforeEpochError(self.ts.index[0])
|
||||
return self.ts[start:].groupby(functools.partial(
|
||||
round_timestamp, freq=granularity * 10e8))
|
||||
|
||||
|
||||
class BoundTimeSerie(TimeSerie):
|
||||
def __init__(self, ts=None, block_size=None, back_window=0):
|
||||
|
@ -208,9 +198,9 @@ class BoundTimeSerie(TimeSerie):
|
|||
|
||||
def set_values(self, values, before_truncate_callback=None,
|
||||
ignore_too_old_timestamps=False):
|
||||
# NOTE: values must be sorted when passed in.
|
||||
if self.block_size is not None and not self.ts.empty:
|
||||
values = sorted(values, key=operator.itemgetter(0))
|
||||
first_block_timestamp = self._first_block_timestamp()
|
||||
first_block_timestamp = self.first_block_timestamp()
|
||||
if ignore_too_old_timestamps:
|
||||
for index, (timestamp, value) in enumerate(values):
|
||||
if timestamp >= first_block_timestamp:
|
||||
|
@ -230,31 +220,100 @@ class BoundTimeSerie(TimeSerie):
|
|||
before_truncate_callback(self)
|
||||
self._truncate()
|
||||
|
||||
_SERIALIZATION_TIMESTAMP_VALUE_LEN = struct.calcsize("<Qd")
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d):
|
||||
"""Build a time series from a dict.
|
||||
def unserialize(cls, data, block_size, back_window):
|
||||
uncompressed = lz4.loads(data)
|
||||
nb_points = (
|
||||
len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN
|
||||
)
|
||||
deserial = struct.unpack("<" + "Q" * nb_points + "d" * nb_points,
|
||||
uncompressed)
|
||||
start = deserial[0]
|
||||
timestamps = [start]
|
||||
for delta in itertools.islice(deserial, 1, nb_points):
|
||||
ts = start + delta
|
||||
timestamps.append(ts)
|
||||
start = ts
|
||||
return cls.from_data(
|
||||
pandas.to_datetime(timestamps, unit='ns'),
|
||||
deserial[nb_points:],
|
||||
block_size=block_size,
|
||||
back_window=back_window)
|
||||
|
||||
The dict format must be datetime as key and values as values.
|
||||
def serialize(self):
|
||||
# NOTE(jd) Use a double delta encoding for timestamps
|
||||
timestamps = [self.first.value]
|
||||
start = self.first.value
|
||||
for i in self.ts.index[1:]:
|
||||
v = i.value
|
||||
timestamps.append(v - start)
|
||||
start = v
|
||||
values = self.ts.values.tolist()
|
||||
return lz4.dumps(struct.pack(
|
||||
'<' + 'Q' * len(timestamps) + 'd' * len(values),
|
||||
*(timestamps + values)))
|
||||
|
||||
:param d: The dict.
|
||||
:returns: A TimeSerie object
|
||||
"""
|
||||
timestamps, values = cls._timestamps_and_values_from_dict(d['values'])
|
||||
return cls.from_data(timestamps, values,
|
||||
block_size=d.get('block_size'),
|
||||
back_window=d.get('back_window'))
|
||||
@classmethod
|
||||
def benchmark(cls):
|
||||
"""Run a speed benchmark!"""
|
||||
points = SplitKey.POINTS_PER_SPLIT
|
||||
serialize_times = 50
|
||||
|
||||
def to_dict(self):
|
||||
basic = super(BoundTimeSerie, self).to_dict()
|
||||
basic.update({
|
||||
'block_size': self._serialize_time_period(self.block_size),
|
||||
'back_window': self.back_window,
|
||||
})
|
||||
return basic
|
||||
now = datetime.datetime(2015, 4, 3, 23, 11)
|
||||
|
||||
print(cls.__name__)
|
||||
print("=" * len(cls.__name__))
|
||||
|
||||
for title, values in [
|
||||
("Simple continuous range", six.moves.range(points)),
|
||||
("All 0", [float(0)] * points),
|
||||
("All 1", [float(1)] * points),
|
||||
("0 and 1", [0, 1] * (points // 2)),
|
||||
("1 and 0 random",
|
||||
[random.randint(0, 1)
|
||||
for x in six.moves.range(points)]),
|
||||
("Small number random pos/neg",
|
||||
[random.randint(-100000, 10000)
|
||||
for x in six.moves.range(points)]),
|
||||
("Small number random pos",
|
||||
[random.randint(0, 20000) for x in six.moves.range(points)]),
|
||||
("Small number random neg",
|
||||
[random.randint(-20000, 0) for x in six.moves.range(points)]),
|
||||
("Sin(x)", map(math.sin, six.moves.range(points))),
|
||||
("random ", [random.random()
|
||||
for x in six.moves.range(points)]),
|
||||
]:
|
||||
print(title)
|
||||
pts = pandas.Series(values,
|
||||
[now + datetime.timedelta(
|
||||
seconds=i * random.randint(1, 10),
|
||||
microseconds=random.randint(1, 999999))
|
||||
for i in six.moves.range(points)])
|
||||
ts = cls(ts=pts)
|
||||
t0 = time.time()
|
||||
for i in six.moves.range(serialize_times):
|
||||
s = ts.serialize()
|
||||
t1 = time.time()
|
||||
print(" Serialization speed: %.2f MB/s"
|
||||
% (((points * 2 * 8)
|
||||
/ ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0)))
|
||||
print(" Bytes per point: %.2f" % (len(s) / float(points)))
|
||||
|
||||
t0 = time.time()
|
||||
for i in six.moves.range(serialize_times):
|
||||
cls.unserialize(s, 1, 1)
|
||||
t1 = time.time()
|
||||
print(" Unserialization speed: %.2f MB/s"
|
||||
% (((points * 2 * 8)
|
||||
/ ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0)))
|
||||
|
||||
def first_block_timestamp(self):
|
||||
"""Return the timestamp of the first block."""
|
||||
rounded = round_timestamp(self.ts.index[-1],
|
||||
self.block_size.delta.value)
|
||||
|
||||
def _first_block_timestamp(self):
|
||||
rounded = self._round_timestamp(self.ts.index[-1],
|
||||
self.block_size.delta.value)
|
||||
return rounded - (self.block_size * self.back_window)
|
||||
|
||||
def _truncate(self):
|
||||
|
@ -263,17 +322,78 @@ class BoundTimeSerie(TimeSerie):
|
|||
# Change that to remove the amount of block needed to have
|
||||
# the size <= max_size. A block is a number of "seconds" (a
|
||||
# timespan)
|
||||
self.ts = self.ts[self._first_block_timestamp():]
|
||||
self.ts = self.ts[self.first_block_timestamp():]
|
||||
|
||||
|
||||
class SplitKey(pandas.Timestamp):
|
||||
"""A class representing a split key.
|
||||
|
||||
A split key is basically a timestamp that can be used to split
|
||||
`AggregatedTimeSerie` objects in multiple parts. Each part will contain
|
||||
`SplitKey.POINTS_PER_SPLIT` points. The split key for a given granularity
|
||||
are regularly spaced.
|
||||
"""
|
||||
|
||||
POINTS_PER_SPLIT = 3600
|
||||
|
||||
@classmethod
|
||||
def _init(cls, value, sampling):
|
||||
# NOTE(jd) This should be __init__ but it does not work, because of…
|
||||
# Pandas, Cython, whatever.
|
||||
self = cls(value)
|
||||
self._carbonara_sampling = sampling
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_timestamp_and_sampling(cls, timestamp, sampling):
|
||||
return cls._init(
|
||||
round_timestamp(
|
||||
timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8),
|
||||
sampling)
|
||||
|
||||
@classmethod
|
||||
def from_key_string(cls, keystr, sampling):
|
||||
return cls._init(float(keystr) * 10e8, sampling)
|
||||
|
||||
def __next__(self):
|
||||
"""Get the split key of the next split.
|
||||
|
||||
:return: A `SplitKey` object.
|
||||
"""
|
||||
return self._init(
|
||||
self + datetime.timedelta(
|
||||
seconds=(self.POINTS_PER_SPLIT * self._carbonara_sampling)),
|
||||
self._carbonara_sampling)
|
||||
|
||||
next = __next__
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __str__(self):
|
||||
return str(float(self))
|
||||
|
||||
def __float__(self):
|
||||
ts = self.to_datetime()
|
||||
if ts.tzinfo is None:
|
||||
ts = ts.replace(tzinfo=iso8601.iso8601.UTC)
|
||||
return utils.datetime_to_unix(ts)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s / %fs>" % (self.__class__.__name__,
|
||||
pandas.Timestamp.__repr__(self),
|
||||
self._carbonara_sampling)
|
||||
|
||||
|
||||
class AggregatedTimeSerie(TimeSerie):
|
||||
|
||||
_AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct")
|
||||
|
||||
POINTS_PER_SPLIT = 14400
|
||||
PADDED_SERIAL_LEN = struct.calcsize("<?d")
|
||||
COMPRESSED_SERIAL_LEN = struct.calcsize("<Hd")
|
||||
|
||||
def __init__(self, sampling, ts=None, max_size=None,
|
||||
aggregation_method='mean'):
|
||||
def __init__(self, sampling, aggregation_method,
|
||||
ts=None, max_size=None):
|
||||
"""A time serie that is downsampled.
|
||||
|
||||
Used to represent the downsampled timeserie for a single
|
||||
|
@ -282,65 +402,62 @@ class AggregatedTimeSerie(TimeSerie):
|
|||
"""
|
||||
super(AggregatedTimeSerie, self).__init__(ts)
|
||||
|
||||
m = self._AGG_METHOD_PCT_RE.match(aggregation_method)
|
||||
|
||||
if m:
|
||||
self.q = float(m.group(1)) / 100
|
||||
self.aggregation_method_func_name = 'quantile'
|
||||
else:
|
||||
if not hasattr(pandas.core.groupby.SeriesGroupBy,
|
||||
aggregation_method):
|
||||
raise UnknownAggregationMethod(aggregation_method)
|
||||
self.aggregation_method_func_name = aggregation_method
|
||||
self.aggregation_method_func_name, self.q = self._get_agg_method(
|
||||
aggregation_method)
|
||||
|
||||
self.sampling = self._to_offset(sampling).nanos / 10e8
|
||||
self.max_size = max_size
|
||||
self.aggregation_method = aggregation_method
|
||||
self._truncate(quick=True)
|
||||
|
||||
@classmethod
|
||||
def from_data(cls, sampling, timestamps=None, values=None,
|
||||
max_size=None, aggregation_method='mean'):
|
||||
return cls(ts=pandas.Series(values, timestamps),
|
||||
max_size=max_size, sampling=sampling,
|
||||
aggregation_method=aggregation_method)
|
||||
|
||||
@classmethod
|
||||
def get_split_key_datetime(cls, timestamp, sampling):
|
||||
return cls._round_timestamp(
|
||||
timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8)
|
||||
def from_data(cls, sampling, aggregation_method, timestamps=None,
|
||||
values=None, max_size=None):
|
||||
return cls(sampling=sampling,
|
||||
aggregation_method=aggregation_method,
|
||||
ts=pandas.Series(values, timestamps),
|
||||
max_size=max_size)
|
||||
|
||||
@staticmethod
|
||||
def _split_key_to_string(timestamp):
|
||||
ts = timestamp.to_datetime()
|
||||
if ts.tzinfo is None:
|
||||
ts = ts.replace(tzinfo=iso8601.iso8601.UTC)
|
||||
return str(utils.datetime_to_unix(ts))
|
||||
|
||||
@classmethod
|
||||
def get_split_key(cls, timestamp, sampling):
|
||||
return cls._split_key_to_string(
|
||||
cls.get_split_key_datetime(timestamp, sampling))
|
||||
def _get_agg_method(aggregation_method):
|
||||
q = None
|
||||
m = AggregatedTimeSerie._AGG_METHOD_PCT_RE.match(aggregation_method)
|
||||
if m:
|
||||
q = float(m.group(1)) / 100
|
||||
aggregation_method_func_name = 'quantile'
|
||||
else:
|
||||
if not hasattr(pandas.core.groupby.SeriesGroupBy,
|
||||
aggregation_method):
|
||||
raise UnknownAggregationMethod(aggregation_method)
|
||||
aggregation_method_func_name = aggregation_method
|
||||
return aggregation_method_func_name, q
|
||||
|
||||
def split(self):
|
||||
groupby = self.ts.groupby(functools.partial(
|
||||
self.get_split_key_datetime, sampling=self.sampling))
|
||||
keys = sorted(groupby.groups.keys())
|
||||
for i, ts in enumerate(keys):
|
||||
if i + 1 == len(keys):
|
||||
yield self._split_key_to_string(ts), TimeSerie(self.ts[ts:])
|
||||
elif i + 1 < len(keys):
|
||||
t = self.ts[ts:keys[i + 1]]
|
||||
del t[t.index[-1]]
|
||||
yield self._split_key_to_string(ts), TimeSerie(t)
|
||||
SplitKey.from_timestamp_and_sampling, sampling=self.sampling))
|
||||
for group, ts in groupby:
|
||||
yield (SplitKey._init(group, self.sampling),
|
||||
AggregatedTimeSerie(self.sampling, self.aggregation_method,
|
||||
ts))
|
||||
|
||||
@classmethod
|
||||
def from_timeseries(cls, timeseries, sampling, max_size=None,
|
||||
aggregation_method='mean'):
|
||||
def from_timeseries(cls, timeseries, sampling, aggregation_method,
|
||||
max_size=None):
|
||||
ts = pandas.Series()
|
||||
for t in timeseries:
|
||||
ts = ts.combine_first(t.ts)
|
||||
return cls(ts=ts, sampling=sampling, max_size=max_size,
|
||||
aggregation_method=aggregation_method)
|
||||
return cls(sampling=sampling,
|
||||
aggregation_method=aggregation_method,
|
||||
ts=ts, max_size=max_size)
|
||||
|
||||
@classmethod
|
||||
def from_grouped_serie(cls, grouped_serie, sampling, aggregation_method,
|
||||
max_size=None):
|
||||
agg_name, q = cls._get_agg_method(aggregation_method)
|
||||
return cls(sampling, aggregation_method,
|
||||
ts=cls._resample_grouped(grouped_serie, agg_name,
|
||||
q).dropna(),
|
||||
max_size=max_size)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, AggregatedTimeSerie)
|
||||
|
@ -358,93 +475,121 @@ class AggregatedTimeSerie(TimeSerie):
|
|||
self.aggregation_method,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_compressed(serialized_data):
|
||||
"""Check whatever the data was serialized with compression."""
|
||||
return six.indexbytes(serialized_data, 0) == ord("c")
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d):
|
||||
"""Build a time series from a dict.
|
||||
def unserialize(cls, data, start, agg_method, sampling):
|
||||
x, y = [], []
|
||||
start = float(start)
|
||||
if data:
|
||||
if cls.is_compressed(data):
|
||||
# Compressed format
|
||||
uncompressed = lz4.loads(memoryview(data)[1:].tobytes())
|
||||
nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN
|
||||
deserial = struct.unpack(
|
||||
'<' + 'H' * nb_points + 'd' * nb_points,
|
||||
uncompressed)
|
||||
for delta in itertools.islice(deserial, nb_points):
|
||||
ts = start + (delta * sampling)
|
||||
y.append(ts)
|
||||
start = ts
|
||||
x = deserial[nb_points:]
|
||||
else:
|
||||
# Padded format
|
||||
nb_points = len(data) // cls.PADDED_SERIAL_LEN
|
||||
# NOTE(gordc): use '<' for standardized
|
||||
# little-endian byte order
|
||||
deserial = struct.unpack('<' + '?d' * nb_points, data)
|
||||
# alternating split into 2 list and drop items with False flag
|
||||
for i, val in itertools.compress(
|
||||
six.moves.zip(six.moves.range(nb_points),
|
||||
deserial[1::2]),
|
||||
deserial[::2]):
|
||||
x.append(val)
|
||||
y.append(start + (i * sampling))
|
||||
|
||||
The dict format must be datetime as key and values as values.
|
||||
y = pandas.to_datetime(y, unit='s')
|
||||
return cls.from_data(sampling, agg_method, y, x)
|
||||
|
||||
:param d: The dict.
|
||||
:returns: A TimeSerie object
|
||||
def get_split_key(self, timestamp=None):
|
||||
"""Return the split key for a particular timestamp.
|
||||
|
||||
:param timestamp: If None, the first timestamp of the timeserie
|
||||
is used.
|
||||
:return: A SplitKey object.
|
||||
"""
|
||||
sampling = d.get('sampling')
|
||||
if 'first_timestamp' in d:
|
||||
prev_timestamp = pandas.Timestamp(d.get('first_timestamp') * 10e8)
|
||||
timestamps = []
|
||||
for delta in d.get('timestamps'):
|
||||
prev_timestamp = datetime.timedelta(
|
||||
seconds=delta * sampling) + prev_timestamp
|
||||
timestamps.append(prev_timestamp)
|
||||
else:
|
||||
# migrate from v1.3, remove with TimeSerieArchive
|
||||
timestamps, d['values'] = (
|
||||
cls._timestamps_and_values_from_dict(d['values']))
|
||||
if timestamp is None:
|
||||
timestamp = self.first
|
||||
return SplitKey.from_timestamp_and_sampling(
|
||||
timestamp, self.sampling)
|
||||
|
||||
return cls.from_data(
|
||||
timestamps=timestamps,
|
||||
values=d.get('values'),
|
||||
max_size=d.get('max_size'),
|
||||
sampling=sampling,
|
||||
aggregation_method=d.get('aggregation_method', 'mean'))
|
||||
def serialize(self, start, compressed=True):
|
||||
"""Serialize an aggregated timeserie.
|
||||
|
||||
def to_dict(self):
|
||||
if self.ts.empty:
|
||||
The serialization starts with a byte that indicate the serialization
|
||||
format: 'c' for compressed format, '\x00' or '\x01' for uncompressed
|
||||
format. Both format can be unserialized using the `unserialize` method.
|
||||
|
||||
The offset returned indicates at which offset the data should be
|
||||
written from. In the case of compressed data, this is always 0.
|
||||
|
||||
:param start: Timestamp to start serialization at.
|
||||
:param compressed: Serialize in a compressed format.
|
||||
:return: a tuple of (offset, data)
|
||||
|
||||
"""
|
||||
if not self.ts.index.is_monotonic:
|
||||
self.ts = self.ts.sort_index()
|
||||
offset_div = self.sampling * 10e8
|
||||
start = pandas.Timestamp(start).value
|
||||
# calculate how many seconds from start the series runs until and
|
||||
# initialize list to store alternating delimiter, float entries
|
||||
if compressed:
|
||||
# NOTE(jd) Use a double delta encoding for timestamps
|
||||
timestamps = []
|
||||
values = []
|
||||
first_timestamp = 0
|
||||
else:
|
||||
first_timestamp = float(
|
||||
self.get_split_key(self.ts.index[0], self.sampling))
|
||||
timestamps = []
|
||||
prev_timestamp = pandas.Timestamp(
|
||||
first_timestamp * 10e8).to_pydatetime()
|
||||
# Use double delta encoding for timestamps
|
||||
for i in self.ts.index:
|
||||
# Convert to pydatetime because it's faster to compute than
|
||||
# Pandas' objects
|
||||
asdt = i.to_pydatetime()
|
||||
timestamps.append(
|
||||
int((asdt - prev_timestamp).total_seconds()
|
||||
/ self.sampling))
|
||||
prev_timestamp = asdt
|
||||
v = i.value
|
||||
timestamps.append(int((v - start) // offset_div))
|
||||
start = v
|
||||
values = self.ts.values.tolist()
|
||||
return None, b"c" + lz4.dumps(struct.pack(
|
||||
'<' + 'H' * len(timestamps) + 'd' * len(values),
|
||||
*(timestamps + values)))
|
||||
# NOTE(gordc): this binary serializes series based on the split
|
||||
# time. the format is 1B True/False flag which denotes whether
|
||||
# subsequent 8B is a real float or zero padding. every 9B
|
||||
# represents one second from start time. this is intended to be run
|
||||
# on data already split. ie. False,0,True,0 serialization means
|
||||
# start datapoint is padding, and 1s after start time, the
|
||||
# aggregate value is 0. calculate how many seconds from start the
|
||||
# series runs until and initialize list to store alternating
|
||||
# delimiter, float entries
|
||||
first = self.first.value # NOTE(jd) needed because faster
|
||||
e_offset = int(
|
||||
(self.last.value - first) // offset_div) + 1
|
||||
serial = [False] * e_offset * 2
|
||||
for i, v in self.ts.iteritems():
|
||||
# overwrite zero padding with real points and set flag True
|
||||
loc = int((i.value - first) // offset_div) * 2
|
||||
serial[loc] = True
|
||||
serial[loc + 1] = v
|
||||
offset = int((first - start) // offset_div) * self.PADDED_SERIAL_LEN
|
||||
return offset, struct.pack('<' + '?d' * e_offset, *serial)
|
||||
|
||||
return {
|
||||
'first_timestamp': first_timestamp,
|
||||
'aggregation_method': self.aggregation_method,
|
||||
'max_size': self.max_size,
|
||||
'sampling': self.sampling,
|
||||
'timestamps': timestamps,
|
||||
'values': values,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def unserialize(cls, data):
|
||||
return cls.from_dict(msgpack.loads(lz4.loads(data), encoding='utf-8'))
|
||||
|
||||
def serialize(self):
|
||||
return lz4.dumps(msgpack.dumps(self.to_dict()))
|
||||
|
||||
def _truncate(self):
|
||||
def _truncate(self, quick=False):
|
||||
"""Truncate the timeserie."""
|
||||
if self.max_size is not None:
|
||||
# Remove empty points if any that could be added by aggregation
|
||||
self.ts = self.ts.dropna()[-self.max_size:]
|
||||
self.ts = (self.ts[-self.max_size:] if quick
|
||||
else self.ts.dropna()[-self.max_size:])
|
||||
|
||||
def _resample(self, after):
|
||||
# Group by the sampling, and then apply the aggregation method on
|
||||
# the points after `after'
|
||||
groupedby = self.ts[after:].groupby(
|
||||
functools.partial(self._round_timestamp,
|
||||
freq=self.sampling * 10e8))
|
||||
agg_func = getattr(groupedby, self.aggregation_method_func_name)
|
||||
if self.aggregation_method_func_name == 'quantile':
|
||||
aggregated = agg_func(self.q)
|
||||
else:
|
||||
aggregated = agg_func()
|
||||
# Now combine the result with the rest of the point – everything
|
||||
# that is before `after'
|
||||
self.ts = aggregated.combine_first(self.ts[:after][:-1])
|
||||
@staticmethod
|
||||
def _resample_grouped(grouped_serie, agg_name, q=None):
|
||||
agg_func = getattr(grouped_serie, agg_name)
|
||||
return agg_func(q) if agg_name == 'quantile' else agg_func()
|
||||
|
||||
def fetch(self, from_timestamp=None, to_timestamp=None):
|
||||
"""Fetch aggregated time value.
|
||||
|
@ -457,7 +602,7 @@ class AggregatedTimeSerie(TimeSerie):
|
|||
if from_timestamp is None:
|
||||
from_ = None
|
||||
else:
|
||||
from_ = self._round_timestamp(from_timestamp, self.sampling * 10e8)
|
||||
from_ = round_timestamp(from_timestamp, self.sampling * 10e8)
|
||||
points = self[from_:to_timestamp]
|
||||
try:
|
||||
# Do not include stop timestamp
|
||||
|
@ -468,32 +613,88 @@ class AggregatedTimeSerie(TimeSerie):
|
|||
for timestamp, value
|
||||
in six.iteritems(points)]
|
||||
|
||||
def update(self, ts):
|
||||
if ts.ts.empty:
|
||||
return
|
||||
ts.ts = self.clean_ts(ts.ts)
|
||||
index = ts.ts.index
|
||||
first_timestamp = index[0]
|
||||
last_timestamp = index[-1]
|
||||
# Build a new time serie excluding all data points in the range of the
|
||||
# timeserie passed as argument
|
||||
new_ts = self.ts.drop(self.ts[first_timestamp:last_timestamp].index)
|
||||
def merge(self, ts):
|
||||
"""Merge a timeserie into this one.
|
||||
|
||||
# Build a new timeserie where we replaced the timestamp range covered
|
||||
# by the timeserie passed as argument
|
||||
self.ts = ts.ts.combine_first(new_ts)
|
||||
This is equivalent to `update` but is faster as they are is no
|
||||
resampling. Be careful on what you merge.
|
||||
"""
|
||||
self.ts = self.ts.combine_first(ts.ts)
|
||||
|
||||
# Resample starting from the first timestamp we received
|
||||
# TODO(jd) So this only works correctly because we expect that we are
|
||||
# not going to replace a range in the middle of our timeserie. So we re
|
||||
# resample EVERYTHING FROM first timestamp. We should rather resample
|
||||
# from first timestamp AND TO LAST TIMESTAMP!
|
||||
self._resample(first_timestamp)
|
||||
self._truncate()
|
||||
@classmethod
|
||||
def benchmark(cls):
|
||||
"""Run a speed benchmark!"""
|
||||
points = SplitKey.POINTS_PER_SPLIT
|
||||
sampling = 5
|
||||
serialize_times = 50
|
||||
|
||||
now = datetime.datetime(2015, 4, 3, 23, 11)
|
||||
|
||||
print(cls.__name__)
|
||||
print("=" * len(cls.__name__))
|
||||
|
||||
for title, values in [
|
||||
("Simple continuous range", six.moves.range(points)),
|
||||
("All 0", [float(0)] * points),
|
||||
("All 1", [float(1)] * points),
|
||||
("0 and 1", [0, 1] * (points // 2)),
|
||||
("1 and 0 random",
|
||||
[random.randint(0, 1)
|
||||
for x in six.moves.range(points)]),
|
||||
("Small number random pos/neg",
|
||||
[random.randint(-100000, 10000)
|
||||
for x in six.moves.range(points)]),
|
||||
("Small number random pos",
|
||||
[random.randint(0, 20000) for x in six.moves.range(points)]),
|
||||
("Small number random neg",
|
||||
[random.randint(-20000, 0) for x in six.moves.range(points)]),
|
||||
("Sin(x)", map(math.sin, six.moves.range(points))),
|
||||
("random ", [random.random()
|
||||
for x in six.moves.range(points)]),
|
||||
]:
|
||||
print(title)
|
||||
pts = pandas.Series(values,
|
||||
[now + datetime.timedelta(seconds=i*sampling)
|
||||
for i in six.moves.range(points)])
|
||||
ts = cls(ts=pts, sampling=sampling, aggregation_method='mean')
|
||||
t0 = time.time()
|
||||
key = ts.get_split_key()
|
||||
for i in six.moves.range(serialize_times):
|
||||
e, s = ts.serialize(key, compressed=False)
|
||||
t1 = time.time()
|
||||
print(" Uncompressed serialization speed: %.2f MB/s"
|
||||
% (((points * 2 * 8)
|
||||
/ ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0)))
|
||||
print(" Bytes per point: %.2f" % (len(s) / float(points)))
|
||||
|
||||
t0 = time.time()
|
||||
for i in six.moves.range(serialize_times):
|
||||
cls.unserialize(s, key, 'mean', sampling)
|
||||
t1 = time.time()
|
||||
print(" Unserialization speed: %.2f MB/s"
|
||||
% (((points * 2 * 8)
|
||||
/ ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0)))
|
||||
|
||||
t0 = time.time()
|
||||
for i in six.moves.range(serialize_times):
|
||||
o, s = ts.serialize(key, compressed=True)
|
||||
t1 = time.time()
|
||||
print(" Compressed serialization speed: %.2f MB/s"
|
||||
% (((points * 2 * 8)
|
||||
/ ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0)))
|
||||
print(" Bytes per point: %.2f" % (len(s) / float(points)))
|
||||
|
||||
t0 = time.time()
|
||||
for i in six.moves.range(serialize_times):
|
||||
cls.unserialize(s, key, 'mean', sampling)
|
||||
t1 = time.time()
|
||||
print(" Uncompression speed: %.2f MB/s"
|
||||
% (((points * 2 * 8)
|
||||
/ ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0)))
|
||||
|
||||
@staticmethod
|
||||
def aggregated(timeseries, from_timestamp=None, to_timestamp=None,
|
||||
aggregation='mean', needed_percent_of_overlap=100.0):
|
||||
def aggregated(timeseries, aggregation, from_timestamp=None,
|
||||
to_timestamp=None, needed_percent_of_overlap=100.0):
|
||||
|
||||
index = ['timestamp', 'granularity']
|
||||
columns = ['timestamp', 'granularity', 'value']
|
||||
|
@ -591,70 +792,10 @@ class AggregatedTimeSerie(TimeSerie):
|
|||
for __, timestamp, granularity, value in points]
|
||||
|
||||
|
||||
class TimeSerieArchive(SerializableMixin):
|
||||
|
||||
def __init__(self, agg_timeseries):
|
||||
"""A raw data buffer and a collection of downsampled timeseries.
|
||||
|
||||
Used to represent the set of AggregatedTimeSeries for the range of
|
||||
granularities supported for a metric (for a particular aggregation
|
||||
function).
|
||||
|
||||
"""
|
||||
self.agg_timeseries = sorted(agg_timeseries,
|
||||
key=operator.attrgetter("sampling"))
|
||||
|
||||
@classmethod
|
||||
def from_definitions(cls, definitions, aggregation_method='mean'):
|
||||
"""Create a new collection of archived time series.
|
||||
|
||||
:param definition: A list of tuple (sampling, max_size)
|
||||
:param aggregation_method: Aggregation function to use.
|
||||
"""
|
||||
# Limit the main timeserie to a timespan mapping
|
||||
return cls(
|
||||
[AggregatedTimeSerie(
|
||||
max_size=size,
|
||||
sampling=sampling,
|
||||
aggregation_method=aggregation_method)
|
||||
for sampling, size in definitions]
|
||||
)
|
||||
|
||||
def fetch(self, from_timestamp=None, to_timestamp=None,
|
||||
timeserie_filter=None):
|
||||
"""Fetch aggregated time value.
|
||||
|
||||
Returns a sorted list of tuples (timestamp, granularity, value).
|
||||
"""
|
||||
result = []
|
||||
end_timestamp = to_timestamp
|
||||
for ts in reversed(self.agg_timeseries):
|
||||
if timeserie_filter and not timeserie_filter(ts):
|
||||
continue
|
||||
points = ts[from_timestamp:to_timestamp]
|
||||
try:
|
||||
# Do not include stop timestamp
|
||||
del points[end_timestamp]
|
||||
except KeyError:
|
||||
pass
|
||||
result.extend([(timestamp, ts.sampling, value)
|
||||
for timestamp, value
|
||||
in six.iteritems(points)])
|
||||
return result
|
||||
|
||||
def update(self, timeserie):
|
||||
for agg in self.agg_timeseries:
|
||||
agg.update(timeserie)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"archives": [ts.to_dict() for ts in self.agg_timeseries],
|
||||
}
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, TimeSerieArchive)
|
||||
and self.agg_timeseries == other.agg_timeseries)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d):
|
||||
return cls([AggregatedTimeSerie.from_dict(a) for a in d['archives']])
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
args = sys.argv[1:]
|
||||
if not args or "--boundtimeserie" in args:
|
||||
BoundTimeSerie.benchmark()
|
||||
if not args or "--aggregatedtimeserie" in args:
|
||||
AggregatedTimeSerie.benchmark()
|
||||
|
|
309
gnocchi/cli.py
309
gnocchi/cli.py
|
@ -1,5 +1,5 @@
|
|||
# Copyright (c) 2013 Mirantis Inc.
|
||||
# Copyright (c) 2015 Red Hat
|
||||
# Copyright (c) 2015-2016 Red Hat
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -13,24 +13,30 @@
|
|||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import multiprocessing
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import cotyledon
|
||||
from futurist import periodics
|
||||
import msgpack
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
import retrying
|
||||
import six
|
||||
import tooz
|
||||
from tooz import coordination
|
||||
|
||||
from gnocchi import archive_policy
|
||||
from gnocchi import indexer
|
||||
from gnocchi.rest import app
|
||||
from gnocchi import service
|
||||
from gnocchi import statsd as statsd_service
|
||||
from gnocchi import storage
|
||||
from gnocchi import utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def upgrade():
|
||||
|
@ -39,43 +45,47 @@ def upgrade():
|
|||
cfg.BoolOpt("skip-index", default=False,
|
||||
help="Skip index upgrade."),
|
||||
cfg.BoolOpt("skip-storage", default=False,
|
||||
help="Skip storage upgrade.")
|
||||
help="Skip storage upgrade."),
|
||||
cfg.BoolOpt("skip-archive-policies-creation", default=False,
|
||||
help="Skip default archive policies creation."),
|
||||
cfg.BoolOpt("create-legacy-resource-types", default=False,
|
||||
help="Creation of Ceilometer legacy resource types.")
|
||||
])
|
||||
conf = service.prepare_service(conf=conf)
|
||||
index = indexer.get_driver(conf)
|
||||
index.connect()
|
||||
if not conf.skip_index:
|
||||
index = indexer.get_driver(conf)
|
||||
index.connect()
|
||||
LOG.info("Upgrading indexer %s" % index)
|
||||
index.upgrade()
|
||||
index.upgrade(
|
||||
create_legacy_resource_types=conf.create_legacy_resource_types)
|
||||
if not conf.skip_storage:
|
||||
s = storage.get_driver(conf)
|
||||
LOG.info("Upgrading storage %s" % s)
|
||||
s.upgrade(index)
|
||||
|
||||
|
||||
def api():
|
||||
app.build_server()
|
||||
if (not conf.skip_archive_policies_creation
|
||||
and not index.list_archive_policies()
|
||||
and not index.list_archive_policy_rules()):
|
||||
for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
|
||||
index.create_archive_policy(ap)
|
||||
index.create_archive_policy_rule("default", "*", "low")
|
||||
|
||||
|
||||
def statsd():
|
||||
statsd_service.start()
|
||||
|
||||
|
||||
class MetricProcessBase(multiprocessing.Process):
|
||||
def __init__(self, conf, worker_id=0, interval_delay=0):
|
||||
super(MetricProcessBase, self).__init__()
|
||||
class MetricProcessBase(cotyledon.Service):
|
||||
def __init__(self, worker_id, conf, interval_delay=0):
|
||||
super(MetricProcessBase, self).__init__(worker_id)
|
||||
self.conf = conf
|
||||
self.worker_id = worker_id
|
||||
self.startup_delay = worker_id
|
||||
self.interval_delay = interval_delay
|
||||
self._shutdown = threading.Event()
|
||||
self._shutdown_done = threading.Event()
|
||||
|
||||
# Retry with exponential backoff for up to 5 minutes
|
||||
@retrying.retry(wait_exponential_multiplier=500,
|
||||
wait_exponential_max=60000,
|
||||
stop_max_delay=300000)
|
||||
def _configure(self):
|
||||
self.store = storage.get_driver(self.conf)
|
||||
self.store.partition = self.worker_id
|
||||
self.index = indexer.get_driver(self.conf)
|
||||
self.index.connect()
|
||||
|
||||
|
@ -84,15 +94,22 @@ class MetricProcessBase(multiprocessing.Process):
|
|||
# Delay startup so workers are jittered.
|
||||
time.sleep(self.startup_delay)
|
||||
|
||||
while True:
|
||||
try:
|
||||
with timeutils.StopWatch() as timer:
|
||||
self._run_job()
|
||||
time.sleep(max(0, self.interval_delay - timer.elapsed()))
|
||||
except KeyboardInterrupt:
|
||||
# Ignore KeyboardInterrupt so parent handler can kill
|
||||
# all children.
|
||||
pass
|
||||
while not self._shutdown.is_set():
|
||||
with timeutils.StopWatch() as timer:
|
||||
self._run_job()
|
||||
self._shutdown.wait(max(0, self.interval_delay -
|
||||
timer.elapsed()))
|
||||
self._shutdown_done.set()
|
||||
|
||||
def terminate(self):
|
||||
self._shutdown.set()
|
||||
self.close_services()
|
||||
LOG.info("Waiting ongoing metric processing to finish")
|
||||
self._shutdown_done.wait()
|
||||
|
||||
@staticmethod
|
||||
def close_services():
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _run_job():
|
||||
|
@ -100,19 +117,16 @@ class MetricProcessBase(multiprocessing.Process):
|
|||
|
||||
|
||||
class MetricReporting(MetricProcessBase):
|
||||
def __init__(self, conf, worker_id=0, interval_delay=0, queues=None):
|
||||
super(MetricReporting, self).__init__(conf, worker_id, interval_delay)
|
||||
self.queues = queues
|
||||
name = "reporting"
|
||||
|
||||
def __init__(self, worker_id, conf):
|
||||
super(MetricReporting, self).__init__(
|
||||
worker_id, conf, conf.storage.metric_reporting_delay)
|
||||
|
||||
def _run_job(self):
|
||||
try:
|
||||
report = self.store.measures_report(details=False)
|
||||
if self.queues:
|
||||
block_size = max(16, min(
|
||||
256, report['summary']['metrics'] // len(self.queues)))
|
||||
for queue in self.queues:
|
||||
queue.put(block_size)
|
||||
LOG.info("Metricd reporting: %d measurements bundles across %d "
|
||||
LOG.info("%d measurements bundles across %d "
|
||||
"metrics wait to be processed.",
|
||||
report['summary']['measures'],
|
||||
report['summary']['metrics'])
|
||||
|
@ -121,70 +135,171 @@ class MetricReporting(MetricProcessBase):
|
|||
exc_info=True)
|
||||
|
||||
|
||||
class MetricProcessor(MetricProcessBase):
|
||||
def __init__(self, conf, worker_id=0, interval_delay=0, queue=None):
|
||||
super(MetricProcessor, self).__init__(conf, worker_id, interval_delay)
|
||||
class MetricScheduler(MetricProcessBase):
|
||||
name = "scheduler"
|
||||
MAX_OVERLAP = 0.3
|
||||
GROUP_ID = "gnocchi-scheduler"
|
||||
SYNC_RATE = 30
|
||||
TASKS_PER_WORKER = 16
|
||||
BLOCK_SIZE = 4
|
||||
|
||||
def _enable_coordination(self, conf):
|
||||
self._coord = coordination.get_coordinator(
|
||||
conf.storage.coordination_url, self._my_id)
|
||||
self._coord.start(start_heart=True)
|
||||
|
||||
def __init__(self, worker_id, conf, queue):
|
||||
super(MetricScheduler, self).__init__(
|
||||
worker_id, conf, conf.storage.metric_processing_delay)
|
||||
self._my_id = str(uuid.uuid4())
|
||||
self._enable_coordination(conf)
|
||||
self.queue = queue
|
||||
self.block_size = 128
|
||||
self.previously_scheduled_metrics = set()
|
||||
self.workers = conf.metricd.workers
|
||||
self.block_index = 0
|
||||
self.block_size_default = self.workers * self.TASKS_PER_WORKER
|
||||
self.block_size = self.block_size_default
|
||||
self.periodic = None
|
||||
|
||||
def set_block(self, event):
|
||||
get_members_req = self._coord.get_members(self.GROUP_ID)
|
||||
try:
|
||||
members = sorted(get_members_req.get())
|
||||
self.block_index = members.index(self._my_id)
|
||||
reqs = list(self._coord.get_member_capabilities(self.GROUP_ID, m)
|
||||
for m in members)
|
||||
for req in reqs:
|
||||
cap = msgpack.loads(req.get(), encoding='utf-8')
|
||||
max_workers = max(cap['workers'], self.workers)
|
||||
self.block_size = max_workers * self.TASKS_PER_WORKER
|
||||
LOG.info('New set of agents detected. Now working on block: %s, '
|
||||
'with up to %s metrics', self.block_index,
|
||||
self.block_size)
|
||||
except Exception:
|
||||
LOG.warning('Error getting block to work on, defaulting to first')
|
||||
self.block_index = 0
|
||||
self.block_size = self.block_size_default
|
||||
|
||||
@utils.retry
|
||||
def _configure(self):
|
||||
super(MetricScheduler, self)._configure()
|
||||
try:
|
||||
cap = msgpack.dumps({'workers': self.workers})
|
||||
join_req = self._coord.join_group(self.GROUP_ID, cap)
|
||||
join_req.get()
|
||||
LOG.info('Joined coordination group: %s', self.GROUP_ID)
|
||||
self.set_block(None)
|
||||
|
||||
@periodics.periodic(spacing=self.SYNC_RATE, run_immediately=True)
|
||||
def run_watchers():
|
||||
self._coord.run_watchers()
|
||||
|
||||
self.periodic = periodics.PeriodicWorker.create([])
|
||||
self.periodic.add(run_watchers)
|
||||
t = threading.Thread(target=self.periodic.start)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
self._coord.watch_join_group(self.GROUP_ID, self.set_block)
|
||||
self._coord.watch_leave_group(self.GROUP_ID, self.set_block)
|
||||
except coordination.GroupNotCreated as e:
|
||||
create_group_req = self._coord.create_group(self.GROUP_ID)
|
||||
try:
|
||||
create_group_req.get()
|
||||
except coordination.GroupAlreadyExist:
|
||||
pass
|
||||
raise utils.Retry(e)
|
||||
except tooz.NotImplemented:
|
||||
LOG.warning('Configured coordination driver does not support '
|
||||
'required functionality. Coordination is disabled.')
|
||||
except Exception as e:
|
||||
LOG.error('Failed to configure coordination. Coordination is '
|
||||
'disabled: %s', e)
|
||||
|
||||
def _run_job(self):
|
||||
try:
|
||||
if self.queue:
|
||||
while not self.queue.empty():
|
||||
self.block_size = self.queue.get()
|
||||
LOG.debug("Re-configuring worker to handle up to %s "
|
||||
"metrics", self.block_size)
|
||||
self.store.process_background_tasks(self.index, self.block_size)
|
||||
metrics = set(self.store.list_metric_with_measures_to_process(
|
||||
self.block_size, self.block_index))
|
||||
if metrics and not self.queue.empty():
|
||||
# NOTE(gordc): drop metrics we previously process to avoid
|
||||
# handling twice
|
||||
number_of_scheduled_metrics = len(metrics)
|
||||
metrics = metrics - self.previously_scheduled_metrics
|
||||
if (float(number_of_scheduled_metrics - len(metrics)) /
|
||||
self.block_size > self.MAX_OVERLAP):
|
||||
LOG.warning('Metric processing lagging scheduling rate. '
|
||||
'It is recommended to increase the number of '
|
||||
'workers or to lengthen processing interval.')
|
||||
metrics = list(metrics)
|
||||
for i in six.moves.range(0, len(metrics), self.BLOCK_SIZE):
|
||||
self.queue.put(metrics[i:i + self.BLOCK_SIZE])
|
||||
self.previously_scheduled_metrics = set(metrics)
|
||||
LOG.debug("%d metrics scheduled for processing.", len(metrics))
|
||||
except Exception:
|
||||
LOG.error("Unexpected error scheduling metrics for processing",
|
||||
exc_info=True)
|
||||
|
||||
def close_services(self):
|
||||
if self.periodic:
|
||||
self.periodic.stop()
|
||||
self.periodic.wait()
|
||||
self._coord.leave_group(self.GROUP_ID)
|
||||
self._coord.stop()
|
||||
|
||||
|
||||
class MetricJanitor(MetricProcessBase):
|
||||
name = "janitor"
|
||||
|
||||
def __init__(self, worker_id, conf):
|
||||
super(MetricJanitor, self).__init__(
|
||||
worker_id, conf, conf.storage.metric_cleanup_delay)
|
||||
|
||||
def _run_job(self):
|
||||
try:
|
||||
self.store.expunge_metrics(self.index)
|
||||
LOG.debug("Metrics marked for deletion removed from backend")
|
||||
except Exception:
|
||||
LOG.error("Unexpected error during metric cleanup", exc_info=True)
|
||||
|
||||
|
||||
class MetricProcessor(MetricProcessBase):
|
||||
name = "processing"
|
||||
|
||||
def __init__(self, worker_id, conf, queue):
|
||||
super(MetricProcessor, self).__init__(worker_id, conf, 0)
|
||||
self.queue = queue
|
||||
|
||||
def _run_job(self):
|
||||
try:
|
||||
try:
|
||||
metrics = self.queue.get(block=True, timeout=10)
|
||||
except six.moves.queue.Empty:
|
||||
# NOTE(sileht): Allow the process to exit gracefully every
|
||||
# 10 seconds
|
||||
return
|
||||
self.store.process_background_tasks(self.index, metrics)
|
||||
except Exception:
|
||||
LOG.error("Unexpected error during measures processing",
|
||||
exc_info=True)
|
||||
|
||||
|
||||
class MetricdServiceManager(cotyledon.ServiceManager):
|
||||
def __init__(self, conf):
|
||||
super(MetricdServiceManager, self).__init__()
|
||||
self.conf = conf
|
||||
self.queue = multiprocessing.Manager().Queue()
|
||||
|
||||
self.add(MetricScheduler, args=(self.conf, self.queue))
|
||||
self.add(MetricProcessor, args=(self.conf, self.queue),
|
||||
workers=conf.metricd.workers)
|
||||
self.add(MetricReporting, args=(self.conf,))
|
||||
self.add(MetricJanitor, args=(self.conf,))
|
||||
|
||||
def run(self):
|
||||
super(MetricdServiceManager, self).run()
|
||||
self.queue.close()
|
||||
|
||||
|
||||
def metricd():
|
||||
conf = service.prepare_service()
|
||||
if (conf.storage.metric_reporting_delay <
|
||||
conf.storage.metric_processing_delay):
|
||||
LOG.error("Metric reporting must run less frequently then processing")
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGTERM, _metricd_terminate)
|
||||
|
||||
try:
|
||||
queues = []
|
||||
workers = []
|
||||
for worker in range(conf.metricd.workers):
|
||||
queue = multiprocessing.Queue()
|
||||
metric_worker = MetricProcessor(
|
||||
conf, worker, conf.storage.metric_processing_delay, queue)
|
||||
metric_worker.start()
|
||||
queues.append(queue)
|
||||
workers.append(metric_worker)
|
||||
|
||||
metric_report = MetricReporting(
|
||||
conf, 0, conf.storage.metric_reporting_delay, queues)
|
||||
metric_report.start()
|
||||
workers.append(metric_report)
|
||||
|
||||
for worker in workers:
|
||||
worker.join()
|
||||
except KeyboardInterrupt:
|
||||
_metricd_cleanup(workers)
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
LOG.warning("exiting", exc_info=True)
|
||||
_metricd_cleanup(workers)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _metricd_cleanup(workers):
|
||||
for worker in workers:
|
||||
if hasattr(worker, 'queue'):
|
||||
worker.queue.close()
|
||||
worker.terminate()
|
||||
for worker in workers:
|
||||
worker.join()
|
||||
|
||||
|
||||
def _metricd_terminate(signum, frame):
|
||||
_metricd_cleanup(multiprocessing.active_children())
|
||||
sys.exit(0)
|
||||
MetricdServiceManager(conf).run()
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
#
|
||||
# Copyright © 2014 eNovance
|
||||
#
|
||||
# Authors: Julien Danjou <julien@danjou.info>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
|
|
@ -19,6 +19,6 @@ def prehook(cmd):
|
|||
try:
|
||||
from oslo_config import generator
|
||||
generator.main(['--config-file',
|
||||
'gnocchi-config-generator.conf'])
|
||||
'etc/gnocchi/gnocchi-config-generator.conf'])
|
||||
except Exception as e:
|
||||
print("Unable to build sample configuration file: %s" % e)
|
||||
|
|
|
@ -31,8 +31,8 @@ _RUN = False
|
|||
|
||||
def _setup_test_app():
|
||||
t = test_rest.RestTest()
|
||||
t.skip_archive_policies_creation = True
|
||||
t.auth = True
|
||||
t.setUpClass()
|
||||
t.setUp()
|
||||
return t.app
|
||||
|
||||
|
@ -116,6 +116,16 @@ def setup(app):
|
|||
fake_file.write(template.render(scenarios=scenarios).encode('utf-8'))
|
||||
fake_file.seek(0)
|
||||
request = webapp.RequestClass.from_file(fake_file)
|
||||
|
||||
# TODO(jd) Fix this lame bug in webob
|
||||
if request.method in ("DELETE"):
|
||||
# Webob has a bug it does not read the body for DELETE, l4m3r
|
||||
clen = request.content_length
|
||||
if clen is None:
|
||||
request.body = fake_file.read()
|
||||
else:
|
||||
request.body = fake_file.read(clen)
|
||||
|
||||
app.info("Doing request %s: %s" % (entry['name'],
|
||||
six.text_type(request)))
|
||||
with webapp.use_admin_user():
|
||||
|
|
|
@ -13,12 +13,12 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import fnmatch
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
import iso8601
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import fnmatch
|
||||
from oslo_utils import netutils
|
||||
import six
|
||||
from stevedore import driver
|
||||
|
@ -87,7 +87,7 @@ class NoSuchResourceType(IndexerException):
|
|||
"""Error raised when the resource type is unknown."""
|
||||
def __init__(self, type):
|
||||
super(NoSuchResourceType, self).__init__(
|
||||
"Resource type %s does not exist" % str(type))
|
||||
"Resource type %s does not exist" % type)
|
||||
self.type = type
|
||||
|
||||
|
||||
|
@ -95,7 +95,7 @@ class NoSuchMetric(IndexerException):
|
|||
"""Error raised when a metric does not exist."""
|
||||
def __init__(self, metric):
|
||||
super(NoSuchMetric, self).__init__("Metric %s does not exist" %
|
||||
str(metric))
|
||||
metric)
|
||||
self.metric = metric
|
||||
|
||||
|
||||
|
@ -103,7 +103,7 @@ class NoSuchResource(IndexerException):
|
|||
"""Error raised when a resource does not exist."""
|
||||
def __init__(self, resource):
|
||||
super(NoSuchResource, self).__init__("Resource %s does not exist" %
|
||||
str(resource))
|
||||
resource)
|
||||
self.resource = resource
|
||||
|
||||
|
||||
|
@ -111,11 +111,20 @@ class NoSuchArchivePolicy(IndexerException):
|
|||
"""Error raised when an archive policy does not exist."""
|
||||
def __init__(self, archive_policy):
|
||||
super(NoSuchArchivePolicy, self).__init__(
|
||||
"Archive policy %s does not exist" %
|
||||
str(archive_policy))
|
||||
"Archive policy %s does not exist" % archive_policy)
|
||||
self.archive_policy = archive_policy
|
||||
|
||||
|
||||
class UnsupportedArchivePolicyChange(IndexerException):
|
||||
"""Error raised when modifying archive policy if not supported."""
|
||||
def __init__(self, archive_policy, message):
|
||||
super(UnsupportedArchivePolicyChange, self).__init__(
|
||||
"Archive policy %s does not support change: %s" %
|
||||
(archive_policy, message))
|
||||
self.archive_policy = archive_policy
|
||||
self.message = message
|
||||
|
||||
|
||||
class ArchivePolicyInUse(IndexerException):
|
||||
"""Error raised when an archive policy is still being used."""
|
||||
def __init__(self, archive_policy):
|
||||
|
@ -124,12 +133,31 @@ class ArchivePolicyInUse(IndexerException):
|
|||
self.archive_policy = archive_policy
|
||||
|
||||
|
||||
class ResourceTypeInUse(IndexerException):
|
||||
"""Error raised when an resource type is still being used."""
|
||||
def __init__(self, resource_type):
|
||||
super(ResourceTypeInUse, self).__init__(
|
||||
"Resource type %s is still in use" % resource_type)
|
||||
self.resource_type = resource_type
|
||||
|
||||
|
||||
class UnexpectedResourceTypeState(IndexerException):
|
||||
"""Error raised when an resource type state is not expected."""
|
||||
def __init__(self, resource_type, expected_state, state):
|
||||
super(UnexpectedResourceTypeState, self).__init__(
|
||||
"Resource type %s state is %s (expected: %s)" % (
|
||||
resource_type, state, expected_state))
|
||||
self.resource_type = resource_type
|
||||
self.expected_state = expected_state
|
||||
self.state = state
|
||||
|
||||
|
||||
class NoSuchArchivePolicyRule(IndexerException):
|
||||
"""Error raised when an archive policy rule does not exist."""
|
||||
def __init__(self, archive_policy_rule):
|
||||
super(NoSuchArchivePolicyRule, self).__init__(
|
||||
"Archive policy rule %s does not exist" %
|
||||
str(archive_policy_rule))
|
||||
archive_policy_rule)
|
||||
self.archive_policy_rule = archive_policy_rule
|
||||
|
||||
|
||||
|
@ -138,7 +166,7 @@ class NoArchivePolicyRuleMatch(IndexerException):
|
|||
def __init__(self, metric_name):
|
||||
super(NoArchivePolicyRuleMatch, self).__init__(
|
||||
"No Archive policy rule found for metric %s" %
|
||||
str(metric_name))
|
||||
metric_name)
|
||||
self.metric_name = metric_name
|
||||
|
||||
|
||||
|
@ -158,6 +186,14 @@ class ResourceAlreadyExists(IndexerException):
|
|||
self.resource = resource
|
||||
|
||||
|
||||
class ResourceTypeAlreadyExists(IndexerException):
|
||||
"""Error raised when a resource type already exists."""
|
||||
def __init__(self, resource_type):
|
||||
super(ResourceTypeAlreadyExists, self).__init__(
|
||||
"Resource type %s already exists" % resource_type)
|
||||
self.resource_type = resource_type
|
||||
|
||||
|
||||
class ResourceAttributeError(IndexerException, AttributeError):
|
||||
"""Error raised when an attribute does not exist for a resource type."""
|
||||
def __init__(self, resource, attribute):
|
||||
|
@ -238,7 +274,7 @@ class IndexerDriver(object):
|
|||
pass
|
||||
|
||||
@staticmethod
|
||||
def upgrade(nocreate=False):
|
||||
def upgrade(nocreate=False, create_legacy_resource_types=False):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
|
@ -269,6 +305,10 @@ class IndexerDriver(object):
|
|||
def get_archive_policy(name):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def update_archive_policy(name, ap_items):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def delete_archive_policy(name):
|
||||
raise exceptions.NotImplementedError
|
||||
|
@ -291,12 +331,13 @@ class IndexerDriver(object):
|
|||
|
||||
@staticmethod
|
||||
def create_metric(id, created_by_user_id, created_by_project_id,
|
||||
archive_policy_name, name=None, resource_id=None):
|
||||
archive_policy_name, name=None, unit=None,
|
||||
resource_id=None):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def list_metrics(names=None, ids=None, details=False, status='active',
|
||||
**kwargs):
|
||||
limit=None, marker=None, sorts=None, **kwargs):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
|
@ -321,6 +362,11 @@ class IndexerDriver(object):
|
|||
def delete_resource(uuid):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def delete_resources(resource_type='generic',
|
||||
attribute_filter=None):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def delete_metric(id):
|
||||
raise exceptions.NotImplementedError
|
||||
|
@ -336,3 +382,30 @@ class IndexerDriver(object):
|
|||
if fnmatch.fnmatch(metric_name or "", rule.metric_pattern):
|
||||
return self.get_archive_policy(rule.archive_policy_name)
|
||||
raise NoArchivePolicyRuleMatch(metric_name)
|
||||
|
||||
@staticmethod
|
||||
def create_resource_type(resource_type):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def get_resource_type(name):
|
||||
"""Get a resource type from the indexer.
|
||||
|
||||
:param name: name of the resource type
|
||||
"""
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def list_resource_types(attribute_filter=None,
|
||||
limit=None,
|
||||
marker=None,
|
||||
sorts=None):
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def get_resource_attributes_schemas():
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def get_resource_type_schema():
|
||||
raise exceptions.NotImplementedError
|
||||
|
|
|
@ -25,7 +25,6 @@ from alembic import op
|
|||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = ${repr(up_revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Add tablename to resource_type
|
||||
|
||||
Revision ID: 0718ed97e5b3
|
||||
Revises: 828c16f70cce
|
||||
Create Date: 2016-01-20 08:14:04.893783
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '0718ed97e5b3'
|
||||
down_revision = '828c16f70cce'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column("resource_type", sa.Column('tablename', sa.String(18),
|
||||
nullable=True))
|
||||
|
||||
resource_type = sa.Table(
|
||||
'resource_type', sa.MetaData(),
|
||||
sa.Column('name', sa.String(255), nullable=False),
|
||||
sa.Column('tablename', sa.String(18), nullable=True)
|
||||
)
|
||||
op.execute(resource_type.update().where(
|
||||
resource_type.c.name == "instance_network_interface"
|
||||
).values({'tablename': op.inline_literal("'instance_net_int'")}))
|
||||
op.execute(resource_type.update().where(
|
||||
resource_type.c.name != "instance_network_interface"
|
||||
).values({'tablename': resource_type.c.name}))
|
||||
|
||||
op.alter_column("resource_type", "tablename", type_=sa.String(18),
|
||||
nullable=False)
|
||||
op.create_unique_constraint("uniq_resource_type0tablename",
|
||||
"resource_type", ["tablename"])
|
|
@ -0,0 +1,89 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Add updating resource type states
|
||||
|
||||
Revision ID: 27d2a1d205ff
|
||||
Revises: 7e6f9d542f8b
|
||||
Create Date: 2016-08-31 14:05:34.316496
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from gnocchi.indexer import sqlalchemy_base
|
||||
from gnocchi import utils
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '27d2a1d205ff'
|
||||
down_revision = '7e6f9d542f8b'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
resource_type = sa.sql.table(
|
||||
'resource_type',
|
||||
sa.sql.column('updated_at', sqlalchemy_base.PreciseTimestamp()))
|
||||
|
||||
state_enum = sa.Enum("active", "creating",
|
||||
"creation_error", "deleting",
|
||||
"deletion_error", "updating",
|
||||
"updating_error",
|
||||
name="resource_type_state_enum")
|
||||
|
||||
|
||||
def upgrade():
|
||||
|
||||
op.alter_column('resource_type', 'state',
|
||||
type_=state_enum,
|
||||
nullable=False,
|
||||
server_default=None)
|
||||
|
||||
# NOTE(sileht): postgresql have a builtin ENUM type, so
|
||||
# just altering the column won't works.
|
||||
# https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
|
||||
# Does it break offline migration because we use get_bind() ?
|
||||
|
||||
# NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
|
||||
# we split the 'ALTER TYPE' operation into several steps.
|
||||
bind = op.get_bind()
|
||||
if bind and bind.engine.name == "postgresql":
|
||||
op.execute("ALTER TYPE resource_type_state_enum RENAME TO \
|
||||
old_resource_type_state_enum")
|
||||
op.execute("CREATE TYPE resource_type_state_enum AS ENUM \
|
||||
('active', 'creating', 'creation_error', \
|
||||
'deleting', 'deletion_error', 'updating', \
|
||||
'updating_error')")
|
||||
op.execute("ALTER TABLE resource_type ALTER COLUMN state TYPE \
|
||||
resource_type_state_enum USING \
|
||||
state::text::resource_type_state_enum")
|
||||
op.execute("DROP TYPE old_resource_type_state_enum")
|
||||
|
||||
# NOTE(sileht): we can't alter type with server_default set on
|
||||
# postgresql...
|
||||
op.alter_column('resource_type', 'state',
|
||||
type_=state_enum,
|
||||
nullable=False,
|
||||
server_default="creating")
|
||||
op.add_column("resource_type",
|
||||
sa.Column("updated_at",
|
||||
sqlalchemy_base.PreciseTimestamp(),
|
||||
nullable=True))
|
||||
|
||||
op.execute(resource_type.update().values({'updated_at': utils.utcnow()}))
|
||||
op.alter_column("resource_type", "updated_at",
|
||||
type_=sqlalchemy_base.PreciseTimestamp(),
|
||||
nullable=False)
|
|
@ -0,0 +1,39 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""drop_useless_enum
|
||||
|
||||
Revision ID: 2e0b912062d1
|
||||
Revises: 34c517bcc2dd
|
||||
Create Date: 2016-04-15 07:29:38.492237
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '2e0b912062d1'
|
||||
down_revision = '34c517bcc2dd'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
bind = op.get_bind()
|
||||
if bind and bind.engine.name == "postgresql":
|
||||
# NOTE(sileht): we use IF exists because if the database have
|
||||
# been created from scratch with 2.1 the enum doesn't exists
|
||||
op.execute("DROP TYPE IF EXISTS resource_type_enum")
|
|
@ -0,0 +1,91 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""shorter_foreign_key
|
||||
|
||||
Revision ID: 34c517bcc2dd
|
||||
Revises: ed9c6ddc5c35
|
||||
Create Date: 2016-04-13 16:58:42.536431
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '34c517bcc2dd'
|
||||
down_revision = 'ed9c6ddc5c35'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
resource_type_helper = sqlalchemy.Table(
|
||||
'resource_type',
|
||||
sqlalchemy.MetaData(),
|
||||
sqlalchemy.Column('tablename', sqlalchemy.String(18), nullable=False)
|
||||
)
|
||||
|
||||
to_rename = [
|
||||
('fk_metric_archive_policy_name_archive_policy_name',
|
||||
'fk_metric_ap_name_ap_name',
|
||||
'archive_policy', 'name',
|
||||
'metric', 'archive_policy_name',
|
||||
"RESTRICT"),
|
||||
('fk_resource_history_resource_type_name',
|
||||
'fk_rh_resource_type_name',
|
||||
'resource_type', 'name', 'resource_history', 'type',
|
||||
"RESTRICT"),
|
||||
('fk_resource_history_id_resource_id',
|
||||
'fk_rh_id_resource_id',
|
||||
'resource', 'id', 'resource_history', 'id',
|
||||
"CASCADE"),
|
||||
('fk_archive_policy_rule_archive_policy_name_archive_policy_name',
|
||||
'fk_apr_ap_name_ap_name',
|
||||
'archive_policy', 'name', 'archive_policy_rule', 'archive_policy_name',
|
||||
"RESTRICT")
|
||||
]
|
||||
|
||||
|
||||
def upgrade():
|
||||
connection = op.get_bind()
|
||||
|
||||
insp = sqlalchemy.inspect(connection)
|
||||
|
||||
op.alter_column("resource_type", "tablename",
|
||||
type_=sqlalchemy.String(35),
|
||||
existing_type=sqlalchemy.String(18), nullable=False)
|
||||
|
||||
for rt in connection.execute(resource_type_helper.select()):
|
||||
if rt.tablename == "generic":
|
||||
continue
|
||||
|
||||
fk_names = [fk['name'] for fk in insp.get_foreign_keys("%s_history" %
|
||||
rt.tablename)]
|
||||
fk_old = ("fk_%s_history_resource_history_revision" %
|
||||
rt.tablename)
|
||||
if fk_old not in fk_names:
|
||||
# The table have been created from scratch recently
|
||||
fk_old = ("fk_%s_history_revision_resource_history_revision" %
|
||||
rt.tablename)
|
||||
|
||||
fk_new = "fk_%s_h_revision_rh_revision" % rt.tablename
|
||||
to_rename.append((fk_old, fk_new, 'resource_history', 'revision',
|
||||
"%s_history" % rt.tablename, 'revision', 'CASCADE'))
|
||||
|
||||
for (fk_old, fk_new, src_table, src_col, dst_table, dst_col, ondelete
|
||||
) in to_rename:
|
||||
op.drop_constraint(fk_old, dst_table, type_="foreignkey")
|
||||
op.create_foreign_key(fk_new, dst_table, src_table,
|
||||
[dst_col], [src_col], ondelete=ondelete)
|
|
@ -0,0 +1,43 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""resource_type state column
|
||||
|
||||
Revision ID: 7e6f9d542f8b
|
||||
Revises: c62df18bf4ee
|
||||
Create Date: 2016-05-19 16:52:58.939088
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '7e6f9d542f8b'
|
||||
down_revision = 'c62df18bf4ee'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
states = ("active", "creating", "creation_error", "deleting",
|
||||
"deletion_error")
|
||||
enum = sa.Enum(*states, name="resource_type_state_enum")
|
||||
enum.create(op.get_bind(), checkfirst=False)
|
||||
op.add_column("resource_type",
|
||||
sa.Column('state', enum, nullable=False,
|
||||
server_default="creating"))
|
||||
rt = sa.sql.table('resource_type', sa.sql.column('state', enum))
|
||||
op.execute(rt.update().values(state="active"))
|
|
@ -0,0 +1,85 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""create resource_type table
|
||||
|
||||
Revision ID: 828c16f70cce
|
||||
Revises: 9901e5ea4b6e
|
||||
Create Date: 2016-01-19 12:47:19.384127
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '828c16f70cce'
|
||||
down_revision = '9901e5ea4b6e'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
type_string = sa.String(255)
|
||||
type_enum = sa.Enum('generic', 'instance',
|
||||
'swift_account', 'volume',
|
||||
'ceph_account', 'network',
|
||||
'identity', 'ipmi', 'stack',
|
||||
'image', 'instance_disk',
|
||||
'instance_network_interface',
|
||||
'host', 'host_disk',
|
||||
'host_network_interface',
|
||||
name="resource_type_enum")
|
||||
|
||||
|
||||
def type_string_col(name, table):
|
||||
return sa.Column(
|
||||
name, type_string,
|
||||
sa.ForeignKey('resource_type.name',
|
||||
ondelete="RESTRICT",
|
||||
name="fk_%s_resource_type_name" % table))
|
||||
|
||||
|
||||
def type_enum_col(name):
|
||||
return sa.Column(name, type_enum,
|
||||
nullable=False, default='generic')
|
||||
|
||||
|
||||
def upgrade():
|
||||
resource_type = op.create_table(
|
||||
'resource_type',
|
||||
sa.Column('name', sa.String(length=255), nullable=False),
|
||||
sa.PrimaryKeyConstraint('name'),
|
||||
mysql_charset='utf8',
|
||||
mysql_engine='InnoDB'
|
||||
)
|
||||
|
||||
resource = sa.Table('resource', sa.MetaData(),
|
||||
type_string_col("type", "resource"))
|
||||
op.execute(resource_type.insert().from_select(
|
||||
['name'], sa.select([resource.c.type]).distinct()))
|
||||
|
||||
for table in ["resource", "resource_history"]:
|
||||
op.alter_column(table, "type", new_column_name="old_type",
|
||||
existing_type=type_enum)
|
||||
op.add_column(table, type_string_col("type", table))
|
||||
sa_table = sa.Table(table, sa.MetaData(),
|
||||
type_string_col("type", table),
|
||||
type_enum_col('old_type'))
|
||||
op.execute(sa_table.update().values(
|
||||
{sa_table.c.type: sa_table.c.old_type}))
|
||||
op.drop_column(table, "old_type")
|
||||
op.alter_column(table, "type", nullable=False,
|
||||
existing_type=type_string)
|
|
@ -0,0 +1,48 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Migrate legacy resources to DB
|
||||
|
||||
Revision ID: 8f376189b9eb
|
||||
Revises: d24877c22ab0
|
||||
Create Date: 2016-01-20 15:03:28.115656
|
||||
|
||||
"""
|
||||
import json
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from gnocchi.indexer import sqlalchemy_legacy_resources as legacy
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '8f376189b9eb'
|
||||
down_revision = 'd24877c22ab0'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
resource_type = sa.Table(
|
||||
'resource_type', sa.MetaData(),
|
||||
sa.Column('name', sa.String(255), nullable=False),
|
||||
sa.Column('attributes', sa.Text, nullable=False)
|
||||
)
|
||||
|
||||
for name, attributes in legacy.ceilometer_resources.items():
|
||||
text_attributes = json.dumps(attributes)
|
||||
op.execute(resource_type.update().where(
|
||||
resource_type.c.name == name
|
||||
).values({resource_type.c.attributes: text_attributes}))
|
|
@ -0,0 +1,127 @@
|
|||
# Copyright 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""create host tables
|
||||
|
||||
Revision ID: 9901e5ea4b6e
|
||||
Revises: a54c57ada3f5
|
||||
Create Date: 2015-12-15 17:20:25.092891
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '9901e5ea4b6e'
|
||||
down_revision = 'a54c57ada3f5'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import sqlalchemy_utils
|
||||
|
||||
|
||||
def upgrade():
|
||||
for table in ["resource", "resource_history"]:
|
||||
op.alter_column(table, "type",
|
||||
type_=sa.Enum('generic', 'instance', 'swift_account',
|
||||
'volume', 'ceph_account', 'network',
|
||||
'identity', 'ipmi', 'stack', 'image',
|
||||
'instance_network_interface',
|
||||
'instance_disk',
|
||||
'host', 'host_disk',
|
||||
'host_network_interface',
|
||||
name='resource_type_enum'),
|
||||
nullable=False)
|
||||
|
||||
# NOTE(sileht): postgresql have a builtin ENUM type, so
|
||||
# just altering the column won't works.
|
||||
# https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
|
||||
# Does it break offline migration because we use get_bind() ?
|
||||
|
||||
# NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
|
||||
# we split the 'ALTER TYPE' operation into several steps.
|
||||
bind = op.get_bind()
|
||||
if bind and bind.engine.name == "postgresql":
|
||||
op.execute("ALTER TYPE resource_type_enum RENAME TO \
|
||||
old_resource_type_enum")
|
||||
op.execute("CREATE TYPE resource_type_enum AS ENUM \
|
||||
('generic', 'instance', 'swift_account', \
|
||||
'volume', 'ceph_account', 'network', \
|
||||
'identity', 'ipmi', 'stack', 'image', \
|
||||
'instance_network_interface', 'instance_disk', \
|
||||
'host', 'host_disk', \
|
||||
'host_network_interface')")
|
||||
for table in ["resource", "resource_history"]:
|
||||
op.execute("ALTER TABLE %s ALTER COLUMN type TYPE \
|
||||
resource_type_enum USING \
|
||||
type::text::resource_type_enum" % table)
|
||||
op.execute("DROP TYPE old_resource_type_enum")
|
||||
|
||||
op.create_table(
|
||||
'host',
|
||||
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True),
|
||||
nullable=False),
|
||||
sa.Column('host_name', sa.String(length=255), nullable=False),
|
||||
sa.ForeignKeyConstraint(['id'], ['resource.id'],
|
||||
name="fk_hypervisor_id_resource_id",
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
mysql_charset='utf8',
|
||||
mysql_engine='InnoDB'
|
||||
)
|
||||
|
||||
op.create_table(
|
||||
'host_history',
|
||||
sa.Column('host_name', sa.String(length=255), nullable=False),
|
||||
sa.Column('revision', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['revision'],
|
||||
['resource_history.revision'],
|
||||
name=("fk_hypervisor_history_"
|
||||
"resource_history_revision"),
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('revision'),
|
||||
mysql_charset='utf8',
|
||||
mysql_engine='InnoDB'
|
||||
)
|
||||
|
||||
for table in ['host_disk', 'host_net_int']:
|
||||
op.create_table(
|
||||
table,
|
||||
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True),
|
||||
nullable=False),
|
||||
sa.Column('host_name', sa.String(length=255), nullable=False),
|
||||
sa.Column('device_name', sa.String(length=255), nullable=True),
|
||||
sa.ForeignKeyConstraint(['id'], ['resource.id'],
|
||||
name="fk_%s_id_resource_id" % table,
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
mysql_charset='utf8',
|
||||
mysql_engine='InnoDB'
|
||||
)
|
||||
|
||||
op.create_table(
|
||||
'%s_history' % table,
|
||||
sa.Column('host_name', sa.String(length=255), nullable=False),
|
||||
sa.Column('device_name', sa.String(length=255), nullable=True),
|
||||
sa.Column('revision', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['revision'],
|
||||
['resource_history.revision'],
|
||||
name=("fk_%s_history_"
|
||||
"resource_history_revision") % table,
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('revision'),
|
||||
mysql_charset='utf8',
|
||||
mysql_engine='InnoDB'
|
||||
)
|
|
@ -0,0 +1,38 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""add unit column for metric
|
||||
|
||||
Revision ID: c62df18bf4ee
|
||||
Revises: 2e0b912062d1
|
||||
Create Date: 2016-05-04 12:31:25.350190
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'c62df18bf4ee'
|
||||
down_revision = '2e0b912062d1'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column('metric', sa.Column('unit',
|
||||
sa.String(length=31),
|
||||
nullable=True))
|
|
@ -0,0 +1,38 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Add attributes to resource_type
|
||||
|
||||
Revision ID: d24877c22ab0
|
||||
Revises: 0718ed97e5b3
|
||||
Create Date: 2016-01-19 22:45:06.431190
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import sqlalchemy_utils as sa_utils
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'd24877c22ab0'
|
||||
down_revision = '0718ed97e5b3'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column("resource_type",
|
||||
sa.Column('attributes', sa_utils.JSONType(),))
|
|
@ -0,0 +1,53 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""fix_host_foreign_key
|
||||
|
||||
Revision ID: ed9c6ddc5c35
|
||||
Revises: ffc7bbeec0b0
|
||||
Create Date: 2016-04-15 06:25:34.649934
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
from sqlalchemy import inspect
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'ed9c6ddc5c35'
|
||||
down_revision = 'ffc7bbeec0b0'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
conn = op.get_bind()
|
||||
|
||||
insp = inspect(conn)
|
||||
fk_names = [fk['name'] for fk in insp.get_foreign_keys('host')]
|
||||
if ("fk_hypervisor_id_resource_id" not in fk_names and
|
||||
"fk_host_id_resource_id" in fk_names):
|
||||
# NOTE(sileht): we are already good, the BD have been created from
|
||||
# scratch after "a54c57ada3f5"
|
||||
return
|
||||
|
||||
op.drop_constraint("fk_hypervisor_id_resource_id", "host",
|
||||
type_="foreignkey")
|
||||
op.drop_constraint("fk_hypervisor_history_resource_history_revision",
|
||||
"host_history", type_="foreignkey")
|
||||
op.create_foreign_key("fk_host_id_resource_id", "host", "resource",
|
||||
["id"], ["id"], ondelete="CASCADE")
|
||||
op.create_foreign_key("fk_host_history_resource_history_revision",
|
||||
"host_history", "resource_history",
|
||||
["revision"], ["revision"], ondelete="CASCADE")
|
|
@ -0,0 +1,65 @@
|
|||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""migrate_legacy_resources_to_db2
|
||||
|
||||
Revision ID: ffc7bbeec0b0
|
||||
Revises: 8f376189b9eb
|
||||
Create Date: 2016-04-14 15:57:13.072128
|
||||
|
||||
"""
|
||||
import json
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from gnocchi.indexer import sqlalchemy_legacy_resources as legacy
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'ffc7bbeec0b0'
|
||||
down_revision = '8f376189b9eb'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
bind = op.get_bind()
|
||||
|
||||
resource_type = sa.Table(
|
||||
'resource_type', sa.MetaData(),
|
||||
sa.Column('name', sa.String(255), nullable=False),
|
||||
sa.Column('tablename', sa.String(18), nullable=False),
|
||||
sa.Column('attributes', sa.Text, nullable=False)
|
||||
)
|
||||
|
||||
# NOTE(gordc): fix for incorrect migration:
|
||||
# 0735ed97e5b3_add_tablename_to_resource_type.py#L46
|
||||
op.execute(resource_type.update().where(
|
||||
resource_type.c.name == "instance_network_interface"
|
||||
).values({'tablename': 'instance_net_int'}))
|
||||
|
||||
resource_type_names = [rt.name for rt in
|
||||
list(bind.execute(resource_type.select()))]
|
||||
|
||||
for name, attributes in legacy.ceilometer_resources.items():
|
||||
if name in resource_type_names:
|
||||
continue
|
||||
tablename = legacy.ceilometer_tablenames.get(name, name)
|
||||
text_attributes = json.dumps(attributes)
|
||||
op.execute(resource_type.insert().values({
|
||||
resource_type.c.attributes: text_attributes,
|
||||
resource_type.c.name: name,
|
||||
resource_type.c.tablename: tablename,
|
||||
}))
|
|
@ -1,22 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2014 eNovance
|
||||
#
|
||||
# Authors: Julien Danjou <julien@danjou.info>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from gnocchi import indexer
|
||||
|
||||
|
||||
class NullIndexer(indexer.IndexerDriver):
|
||||
pass
|
|
@ -20,15 +20,28 @@ import os.path
|
|||
import threading
|
||||
import uuid
|
||||
|
||||
from alembic import migration
|
||||
from alembic import operations
|
||||
import oslo_db.api
|
||||
from oslo_db import exception
|
||||
from oslo_db.sqlalchemy import enginefacade
|
||||
from oslo_db.sqlalchemy import models
|
||||
from oslo_db.sqlalchemy import utils as oslo_db_utils
|
||||
from oslo_log import log
|
||||
try:
|
||||
import psycopg2
|
||||
except ImportError:
|
||||
psycopg2 = None
|
||||
try:
|
||||
import pymysql.constants.ER
|
||||
import pymysql.err
|
||||
except ImportError:
|
||||
pymysql = None
|
||||
import six
|
||||
import sqlalchemy
|
||||
from sqlalchemy.engine import url as sqlalchemy_url
|
||||
import sqlalchemy.exc
|
||||
from sqlalchemy import types
|
||||
import sqlalchemy_utils
|
||||
from stevedore import extension
|
||||
|
||||
from gnocchi import exceptions
|
||||
from gnocchi import indexer
|
||||
|
@ -41,27 +54,18 @@ ArchivePolicy = base.ArchivePolicy
|
|||
ArchivePolicyRule = base.ArchivePolicyRule
|
||||
Resource = base.Resource
|
||||
ResourceHistory = base.ResourceHistory
|
||||
ResourceType = base.ResourceType
|
||||
|
||||
_marker = indexer._marker
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
def get_resource_mappers(ext):
|
||||
if ext.name == "generic":
|
||||
resource_ext = ext.plugin
|
||||
resource_history_ext = ResourceHistory
|
||||
else:
|
||||
tablename = getattr(ext.plugin, '__tablename__', ext.name)
|
||||
resource_ext = type(str(ext.name),
|
||||
(ext.plugin, base.ResourceExtMixin, Resource),
|
||||
{"__tablename__": tablename})
|
||||
resource_history_ext = type(str("%s_history" % ext.name),
|
||||
(ext.plugin, base.ResourceHistoryExtMixin,
|
||||
ResourceHistory),
|
||||
{"__tablename__": (
|
||||
"%s_history" % tablename)})
|
||||
|
||||
return {'resource': resource_ext,
|
||||
'history': resource_history_ext}
|
||||
def retry_on_deadlock(f):
|
||||
return oslo_db.api.wrap_db_retry(retry_on_deadlock=True,
|
||||
max_retries=20,
|
||||
retry_interval=0.1,
|
||||
max_retry_interval=2)(f)
|
||||
|
||||
|
||||
class PerInstanceFacade(object):
|
||||
|
@ -102,14 +106,180 @@ class PerInstanceFacade(object):
|
|||
self.trans._factory._writer_engine.dispose()
|
||||
|
||||
|
||||
class SQLAlchemyIndexer(indexer.IndexerDriver):
|
||||
resources = extension.ExtensionManager('gnocchi.indexer.resources')
|
||||
class ResourceClassMapper(object):
|
||||
def __init__(self):
|
||||
# FIXME(sileht): 3 attributes, perhaps we need a better structure.
|
||||
self._cache = {'generic': {'resource': base.Resource,
|
||||
'history': base.ResourceHistory,
|
||||
'updated_at': utils.utcnow()}}
|
||||
|
||||
_RESOURCE_CLASS_MAPPER = {ext.name: get_resource_mappers(ext)
|
||||
for ext in resources.extensions}
|
||||
@staticmethod
|
||||
def _build_class_mappers(resource_type, baseclass=None):
|
||||
tablename = resource_type.tablename
|
||||
tables_args = {"extend_existing": True}
|
||||
tables_args.update(base.COMMON_TABLES_ARGS)
|
||||
# TODO(sileht): Add columns
|
||||
if not baseclass:
|
||||
baseclass = resource_type.to_baseclass()
|
||||
resource_ext = type(
|
||||
str("%s_resource" % tablename),
|
||||
(baseclass, base.ResourceExtMixin, base.Resource),
|
||||
{"__tablename__": tablename, "__table_args__": tables_args})
|
||||
resource_history_ext = type(
|
||||
str("%s_history" % tablename),
|
||||
(baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory),
|
||||
{"__tablename__": ("%s_history" % tablename),
|
||||
"__table_args__": tables_args})
|
||||
return {'resource': resource_ext,
|
||||
'history': resource_history_ext,
|
||||
'updated_at': resource_type.updated_at}
|
||||
|
||||
def get_classes(self, resource_type):
|
||||
# NOTE(sileht): We don't care about concurrency here because we allow
|
||||
# sqlalchemy to override its global object with extend_existing=True
|
||||
# this is safe because classname and tablename are uuid.
|
||||
try:
|
||||
mappers = self._cache[resource_type.tablename]
|
||||
# Cache is outdated
|
||||
if (resource_type.name != "generic"
|
||||
and resource_type.updated_at > mappers['updated_at']):
|
||||
for table_purpose in ['resource', 'history']:
|
||||
Base.metadata.remove(Base.metadata.tables[
|
||||
mappers[table_purpose].__tablename__])
|
||||
del self._cache[resource_type.tablename]
|
||||
raise KeyError
|
||||
return mappers
|
||||
except KeyError:
|
||||
mapper = self._build_class_mappers(resource_type)
|
||||
self._cache[resource_type.tablename] = mapper
|
||||
return mapper
|
||||
|
||||
@retry_on_deadlock
|
||||
def map_and_create_tables(self, resource_type, facade):
|
||||
if resource_type.state != "creating":
|
||||
raise RuntimeError("map_and_create_tables must be called in state "
|
||||
"creating")
|
||||
|
||||
mappers = self.get_classes(resource_type)
|
||||
tables = [Base.metadata.tables[mappers["resource"].__tablename__],
|
||||
Base.metadata.tables[mappers["history"].__tablename__]]
|
||||
|
||||
try:
|
||||
with facade.writer_connection() as connection:
|
||||
Base.metadata.create_all(connection, tables=tables)
|
||||
except exception.DBError as e:
|
||||
if self._is_current_transaction_aborted(e):
|
||||
raise exception.RetryRequest(e)
|
||||
raise
|
||||
|
||||
# NOTE(sileht): no need to protect the _cache with a lock
|
||||
# get_classes cannot be called in state creating
|
||||
self._cache[resource_type.tablename] = mappers
|
||||
|
||||
@staticmethod
|
||||
def _is_current_transaction_aborted(exception):
|
||||
# HACK(jd) Sometimes, PostgreSQL raises an error such as "current
|
||||
# transaction is aborted, commands ignored until end of transaction
|
||||
# block" on its own catalog, so we need to retry, but this is not
|
||||
# caught by oslo.db as a deadlock. This is likely because when we use
|
||||
# Base.metadata.create_all(), sqlalchemy itself gets an error it does
|
||||
# not catch or something. So this is why this function exists. To
|
||||
# paperover I guess.
|
||||
inn_e = exception.inner_exception
|
||||
return (psycopg2
|
||||
and isinstance(inn_e, sqlalchemy.exc.InternalError)
|
||||
and isinstance(inn_e.orig, psycopg2.InternalError)
|
||||
# current transaction is aborted
|
||||
and inn_e.orig.pgcode == '25P02')
|
||||
|
||||
@retry_on_deadlock
|
||||
def unmap_and_delete_tables(self, resource_type, facade):
|
||||
if resource_type.state != "deleting":
|
||||
raise RuntimeError("unmap_and_delete_tables must be called in "
|
||||
"state deleting")
|
||||
|
||||
mappers = self.get_classes(resource_type)
|
||||
del self._cache[resource_type.tablename]
|
||||
|
||||
tables = [Base.metadata.tables[mappers['resource'].__tablename__],
|
||||
Base.metadata.tables[mappers['history'].__tablename__]]
|
||||
|
||||
# NOTE(sileht): Base.metadata.drop_all doesn't
|
||||
# issue CASCADE stuffs correctly at least on postgresql
|
||||
# We drop foreign keys manually to not lock the destination
|
||||
# table for too long during drop table.
|
||||
# It's safe to not use a transaction since
|
||||
# the resource_type table is already cleaned and committed
|
||||
# so this code cannot be triggerred anymore for this
|
||||
# resource_type
|
||||
with facade.writer_connection() as connection:
|
||||
try:
|
||||
for table in tables:
|
||||
for fk in table.foreign_key_constraints:
|
||||
try:
|
||||
self._safe_execute(
|
||||
connection,
|
||||
sqlalchemy.schema.DropConstraint(fk))
|
||||
except exception.DBNonExistentConstraint:
|
||||
pass
|
||||
for table in tables:
|
||||
try:
|
||||
self._safe_execute(connection,
|
||||
sqlalchemy.schema.DropTable(table))
|
||||
except exception.DBNonExistentTable:
|
||||
pass
|
||||
except exception.DBError as e:
|
||||
if self._is_current_transaction_aborted(e):
|
||||
raise exception.RetryRequest(e)
|
||||
raise
|
||||
|
||||
# NOTE(sileht): If something goes wrong here, we are currently
|
||||
# fucked, that why we expose the state to the superuser.
|
||||
# But we allow him to delete a resource type in error state
|
||||
# in case of he cleanup the mess manually and want gnocchi to
|
||||
# control and finish the cleanup.
|
||||
|
||||
# TODO(sileht): Remove this resource on other workers
|
||||
# by using expiration on cache ?
|
||||
for table in tables:
|
||||
Base.metadata.remove(table)
|
||||
|
||||
@retry_on_deadlock
|
||||
def _safe_execute(self, connection, works):
|
||||
# NOTE(sileht): we create a transaction to ensure mysql
|
||||
# create locks on other transaction...
|
||||
trans = connection.begin()
|
||||
connection.execute(works)
|
||||
trans.commit()
|
||||
|
||||
|
||||
class SQLAlchemyIndexer(indexer.IndexerDriver):
|
||||
_RESOURCE_TYPE_MANAGER = ResourceClassMapper()
|
||||
|
||||
@classmethod
|
||||
def _create_new_database(cls, url):
|
||||
"""Used by testing to create a new database."""
|
||||
purl = sqlalchemy_url.make_url(
|
||||
cls.dress_url(
|
||||
url))
|
||||
purl.database = purl.database + str(uuid.uuid4()).replace('-', '')
|
||||
new_url = str(purl)
|
||||
sqlalchemy_utils.create_database(new_url)
|
||||
return new_url
|
||||
|
||||
@staticmethod
|
||||
def dress_url(url):
|
||||
# If no explicit driver has been set, we default to pymysql
|
||||
if url.startswith("mysql://"):
|
||||
url = sqlalchemy_url.make_url(url)
|
||||
url.drivername = "mysql+pymysql"
|
||||
return str(url)
|
||||
return url
|
||||
|
||||
def __init__(self, conf):
|
||||
conf.set_override("connection", conf.indexer.url, "database")
|
||||
conf.set_override("connection",
|
||||
self.dress_url(conf.indexer.url),
|
||||
"database")
|
||||
self.conf = conf
|
||||
self.facade = PerInstanceFacade(conf)
|
||||
|
||||
|
@ -128,7 +298,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
def get_engine(self):
|
||||
return self.facade.get_engine()
|
||||
|
||||
def upgrade(self, nocreate=False):
|
||||
def upgrade(self, nocreate=False, create_legacy_resource_types=False):
|
||||
from alembic import command
|
||||
from alembic import migration
|
||||
|
||||
|
@ -146,10 +316,239 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
else:
|
||||
command.upgrade(cfg, "head")
|
||||
|
||||
def _resource_type_to_class(self, resource_type, purpose="resource"):
|
||||
if resource_type not in self._RESOURCE_CLASS_MAPPER:
|
||||
raise indexer.NoSuchResourceType(resource_type)
|
||||
return self._RESOURCE_CLASS_MAPPER[resource_type][purpose]
|
||||
# TODO(sileht): generic shouldn't be a particular case
|
||||
# we must create a rt_generic and rt_generic_history table
|
||||
# like other type
|
||||
for rt in base.get_legacy_resource_types():
|
||||
if not (rt.name == "generic" or create_legacy_resource_types):
|
||||
continue
|
||||
|
||||
try:
|
||||
with self.facade.writer() as session:
|
||||
session.add(rt)
|
||||
except exception.DBDuplicateEntry:
|
||||
continue
|
||||
|
||||
if rt.name != "generic":
|
||||
try:
|
||||
self._RESOURCE_TYPE_MANAGER.map_and_create_tables(
|
||||
rt, self.facade)
|
||||
except Exception:
|
||||
self._set_resource_type_state(rt.name, "creation_error")
|
||||
LOG.exception('Fail to create tables for '
|
||||
'resource_type "%s"', rt.name)
|
||||
continue
|
||||
|
||||
self._set_resource_type_state(rt.name, "active")
|
||||
|
||||
# NOTE(jd) We can have deadlock errors either here or later in
|
||||
# map_and_create_tables(). We can't decorate create_resource_type()
|
||||
# directly or each part might retry later on its own and cause a
|
||||
# duplicate. And it seems there's no way to use the same session for
|
||||
# both adding the resource_type in our table and calling
|
||||
# map_and_create_tables() :-(
|
||||
@retry_on_deadlock
|
||||
def _add_resource_type(self, resource_type):
|
||||
try:
|
||||
with self.facade.writer() as session:
|
||||
session.add(resource_type)
|
||||
except exception.DBDuplicateEntry:
|
||||
raise indexer.ResourceTypeAlreadyExists(resource_type.name)
|
||||
|
||||
def create_resource_type(self, resource_type):
|
||||
# NOTE(sileht): mysql have a stupid and small length limitation on the
|
||||
# foreign key and index name, so we can't use the resource type name as
|
||||
# tablename, the limit is 64. The longest name we have is
|
||||
# fk_<tablename>_h_revision_rh_revision,
|
||||
# so 64 - 26 = 38 and 3 chars for rt_, 35 chars, uuid is 32, it's cool.
|
||||
tablename = "rt_%s" % uuid.uuid4().hex
|
||||
resource_type = ResourceType(name=resource_type.name,
|
||||
tablename=tablename,
|
||||
attributes=resource_type.attributes,
|
||||
state="creating")
|
||||
|
||||
# NOTE(sileht): ensure the driver is able to store the request
|
||||
# resource_type
|
||||
resource_type.to_baseclass()
|
||||
|
||||
self._add_resource_type(resource_type)
|
||||
|
||||
try:
|
||||
self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type,
|
||||
self.facade)
|
||||
except Exception:
|
||||
# NOTE(sileht): We fail the DDL, we have no way to automatically
|
||||
# recover, just set a particular state
|
||||
self._set_resource_type_state(resource_type.name, "creation_error")
|
||||
raise
|
||||
|
||||
self._set_resource_type_state(resource_type.name, "active")
|
||||
resource_type.state = "active"
|
||||
return resource_type
|
||||
|
||||
def update_resource_type(self, name, add_attributes=None,
|
||||
del_attributes=None):
|
||||
if not add_attributes and not del_attributes:
|
||||
return
|
||||
self._set_resource_type_state(name, "updating", "active")
|
||||
|
||||
try:
|
||||
with self.facade.independent_writer() as session:
|
||||
rt = self._get_resource_type(session, name)
|
||||
|
||||
with self.facade.writer_connection() as connection:
|
||||
ctx = migration.MigrationContext.configure(connection)
|
||||
op = operations.Operations(ctx)
|
||||
with op.batch_alter_table(rt.tablename) as batch_op:
|
||||
for attr in del_attributes:
|
||||
batch_op.drop_column(attr)
|
||||
for attr in add_attributes:
|
||||
# TODO(sileht): When attr.required is True, we have
|
||||
# to pass a default. rest layer current protect us,
|
||||
# requied = True is not yet allowed
|
||||
batch_op.add_column(sqlalchemy.Column(
|
||||
attr.name, attr.satype,
|
||||
nullable=not attr.required))
|
||||
|
||||
rt.state = "active"
|
||||
rt.updated_at = utils.utcnow()
|
||||
rt.attributes.extend(add_attributes)
|
||||
for attr in list(rt.attributes):
|
||||
if attr.name in del_attributes:
|
||||
rt.attributes.remove(attr)
|
||||
# FIXME(sileht): yeah that's wierd but attributes is a custom
|
||||
# json column and 'extend' doesn't trigger sql update, this
|
||||
# enforce the update. I wonder if sqlalchemy provides something
|
||||
# on column description side.
|
||||
sqlalchemy.orm.attributes.flag_modified(rt, 'attributes')
|
||||
|
||||
except Exception:
|
||||
# NOTE(sileht): We fail the DDL, we have no way to automatically
|
||||
# recover, just set a particular state
|
||||
# TODO(sileht): Create a repair REST endpoint that delete
|
||||
# columns not existing in the database but in the resource type
|
||||
# description. This will allow to pass wrong update_error to active
|
||||
# state, that currently not possible.
|
||||
self._set_resource_type_state(name, "updating_error")
|
||||
raise
|
||||
|
||||
return rt
|
||||
|
||||
def get_resource_type(self, name):
|
||||
with self.facade.independent_reader() as session:
|
||||
return self._get_resource_type(session, name)
|
||||
|
||||
def _get_resource_type(self, session, name):
|
||||
resource_type = session.query(ResourceType).get(name)
|
||||
if not resource_type:
|
||||
raise indexer.NoSuchResourceType(name)
|
||||
return resource_type
|
||||
|
||||
@retry_on_deadlock
|
||||
def _set_resource_type_state(self, name, state,
|
||||
expected_previous_state=None):
|
||||
with self.facade.writer() as session:
|
||||
q = session.query(ResourceType)
|
||||
q = q.filter(ResourceType.name == name)
|
||||
if expected_previous_state is not None:
|
||||
q = q.filter(ResourceType.state == expected_previous_state)
|
||||
update = q.update({'state': state})
|
||||
if update == 0:
|
||||
if expected_previous_state is not None:
|
||||
rt = session.query(ResourceType).get(name)
|
||||
if rt:
|
||||
raise indexer.UnexpectedResourceTypeState(
|
||||
name, expected_previous_state, rt.state)
|
||||
raise indexer.IndexerException(
|
||||
"Fail to set resource type state of %s to %s" %
|
||||
(name, state))
|
||||
|
||||
@staticmethod
|
||||
def get_resource_type_schema():
|
||||
return base.RESOURCE_TYPE_SCHEMA_MANAGER
|
||||
|
||||
@staticmethod
|
||||
def get_resource_attributes_schemas():
|
||||
return [ext.plugin.schema() for ext in ResourceType.RESOURCE_SCHEMAS]
|
||||
|
||||
def list_resource_types(self):
|
||||
with self.facade.independent_reader() as session:
|
||||
return list(session.query(ResourceType).order_by(
|
||||
ResourceType.name.asc()).all())
|
||||
|
||||
# NOTE(jd) We can have deadlock errors either here or later in
|
||||
# map_and_create_tables(). We can't decorate delete_resource_type()
|
||||
# directly or each part might retry later on its own and cause a
|
||||
# duplicate. And it seems there's no way to use the same session for
|
||||
# both adding the resource_type in our table and calling
|
||||
# map_and_create_tables() :-(
|
||||
@retry_on_deadlock
|
||||
def _mark_as_deleting_resource_type(self, name):
|
||||
try:
|
||||
with self.facade.writer() as session:
|
||||
rt = self._get_resource_type(session, name)
|
||||
if rt.state not in ["active", "deletion_error",
|
||||
"creation_error", "updating_error"]:
|
||||
raise indexer.UnexpectedResourceTypeState(
|
||||
name,
|
||||
"active/deletion_error/creation_error/updating_error",
|
||||
rt.state)
|
||||
session.delete(rt)
|
||||
|
||||
# FIXME(sileht): Why do I need to flush here !!!
|
||||
# I want remove/add in the same transaction !!!
|
||||
session.flush()
|
||||
|
||||
# NOTE(sileht): delete and recreate to:
|
||||
# * raise duplicate constraints
|
||||
# * ensure we do not create a new resource type
|
||||
# with the same name while we destroy the tables next
|
||||
rt = ResourceType(name=rt.name,
|
||||
tablename=rt.tablename,
|
||||
state="deleting",
|
||||
attributes=rt.attributes)
|
||||
session.add(rt)
|
||||
except exception.DBReferenceError as e:
|
||||
if (e.constraint in [
|
||||
'fk_resource_resource_type_name',
|
||||
'fk_resource_history_resource_type_name',
|
||||
'fk_rh_resource_type_name']):
|
||||
raise indexer.ResourceTypeInUse(name)
|
||||
raise
|
||||
return rt
|
||||
|
||||
@retry_on_deadlock
|
||||
def _delete_resource_type(self, name):
|
||||
# Really delete the resource type, no resource can be linked to it
|
||||
# Because we cannot add a resource to a resource_type not in 'active'
|
||||
# state
|
||||
with self.facade.writer() as session:
|
||||
resource_type = self._get_resource_type(session, name)
|
||||
session.delete(resource_type)
|
||||
|
||||
def delete_resource_type(self, name):
|
||||
if name == "generic":
|
||||
raise indexer.ResourceTypeInUse(name)
|
||||
|
||||
rt = self._mark_as_deleting_resource_type(name)
|
||||
|
||||
try:
|
||||
self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables(
|
||||
rt, self.facade)
|
||||
except Exception:
|
||||
# NOTE(sileht): We fail the DDL, we have no way to automatically
|
||||
# recover, just set a particular state
|
||||
self._set_resource_type_state(rt.name, "deletion_error")
|
||||
raise
|
||||
|
||||
self._delete_resource_type(name)
|
||||
|
||||
def _resource_type_to_mappers(self, session, name):
|
||||
resource_type = self._get_resource_type(session, name)
|
||||
if resource_type.state != "active":
|
||||
raise indexer.UnexpectedResourceTypeState(
|
||||
name, "active", resource_type.state)
|
||||
return self._RESOURCE_TYPE_MANAGER.get_classes(resource_type)
|
||||
|
||||
def list_archive_policies(self):
|
||||
with self.facade.independent_reader() as session:
|
||||
|
@ -159,15 +558,37 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
with self.facade.independent_reader() as session:
|
||||
return session.query(ArchivePolicy).get(name)
|
||||
|
||||
def update_archive_policy(self, name, ap_items):
|
||||
with self.facade.independent_writer() as session:
|
||||
ap = session.query(ArchivePolicy).get(name)
|
||||
if not ap:
|
||||
raise indexer.NoSuchArchivePolicy(name)
|
||||
current = sorted(ap.definition,
|
||||
key=operator.attrgetter('granularity'))
|
||||
new = sorted(ap_items, key=operator.attrgetter('granularity'))
|
||||
if len(current) != len(new):
|
||||
raise indexer.UnsupportedArchivePolicyChange(
|
||||
name, 'Cannot add or drop granularities')
|
||||
for c, n in zip(current, new):
|
||||
if c.granularity != n.granularity:
|
||||
raise indexer.UnsupportedArchivePolicyChange(
|
||||
name, '%s granularity interval was changed'
|
||||
% c.granularity)
|
||||
# NOTE(gordc): ORM doesn't update JSON column unless new
|
||||
ap.definition = ap_items
|
||||
return ap
|
||||
|
||||
def delete_archive_policy(self, name):
|
||||
constraints = [
|
||||
"fk_metric_ap_name_ap_name",
|
||||
"fk_apr_ap_name_ap_name"]
|
||||
with self.facade.writer() as session:
|
||||
try:
|
||||
if session.query(ArchivePolicy).filter(
|
||||
ArchivePolicy.name == name).delete() == 0:
|
||||
raise indexer.NoSuchArchivePolicy(name)
|
||||
except exception.DBReferenceError as e:
|
||||
if (e.constraint ==
|
||||
'fk_metric_archive_policy_name_archive_policy_name'):
|
||||
if e.constraint in constraints:
|
||||
raise indexer.ArchivePolicyInUse(name)
|
||||
raise
|
||||
|
||||
|
@ -214,32 +635,37 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
raise indexer.ArchivePolicyRuleAlreadyExists(name)
|
||||
return apr
|
||||
|
||||
@retry_on_deadlock
|
||||
def create_metric(self, id, created_by_user_id, created_by_project_id,
|
||||
archive_policy_name,
|
||||
name=None, resource_id=None):
|
||||
name=None, unit=None, resource_id=None):
|
||||
m = Metric(id=id,
|
||||
created_by_user_id=created_by_user_id,
|
||||
created_by_project_id=created_by_project_id,
|
||||
archive_policy_name=archive_policy_name,
|
||||
name=name,
|
||||
unit=unit,
|
||||
resource_id=resource_id)
|
||||
try:
|
||||
with self.facade.writer() as session:
|
||||
session.add(m)
|
||||
except exception.DBReferenceError as e:
|
||||
if (e.constraint ==
|
||||
'fk_metric_archive_policy_name_archive_policy_name'):
|
||||
'fk_metric_ap_name_ap_name'):
|
||||
raise indexer.NoSuchArchivePolicy(archive_policy_name)
|
||||
raise
|
||||
return m
|
||||
|
||||
@retry_on_deadlock
|
||||
def list_metrics(self, names=None, ids=None, details=False,
|
||||
status='active', **kwargs):
|
||||
if ids == []:
|
||||
status='active', limit=None, marker=None, sorts=None,
|
||||
**kwargs):
|
||||
sorts = sorts or []
|
||||
if ids is not None and not ids:
|
||||
return []
|
||||
with self.facade.independent_reader() as session:
|
||||
q = session.query(Metric).filter(
|
||||
Metric.status == status).order_by(Metric.id)
|
||||
Metric.status == status)
|
||||
if names is not None:
|
||||
q = q.filter(Metric.name.in_(names))
|
||||
if ids is not None:
|
||||
|
@ -249,20 +675,44 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
if details:
|
||||
q = q.options(sqlalchemy.orm.joinedload('resource'))
|
||||
|
||||
sort_keys, sort_dirs = self._build_sort_keys(sorts)
|
||||
|
||||
if marker:
|
||||
metric_marker = self.list_metrics(ids=[marker])
|
||||
if metric_marker:
|
||||
metric_marker = metric_marker[0]
|
||||
else:
|
||||
raise indexer.InvalidPagination(
|
||||
"Invalid marker: `%s'" % marker)
|
||||
else:
|
||||
metric_marker = None
|
||||
|
||||
try:
|
||||
q = oslo_db_utils.paginate_query(q, Metric, limit=limit,
|
||||
sort_keys=sort_keys,
|
||||
marker=metric_marker,
|
||||
sort_dirs=sort_dirs)
|
||||
except ValueError as e:
|
||||
raise indexer.InvalidPagination(e)
|
||||
except exception.InvalidSortKey as e:
|
||||
raise indexer.InvalidPagination(e)
|
||||
|
||||
return list(q.all())
|
||||
|
||||
@retry_on_deadlock
|
||||
def create_resource(self, resource_type, id,
|
||||
created_by_user_id, created_by_project_id,
|
||||
user_id=None, project_id=None,
|
||||
started_at=None, ended_at=None, metrics=None,
|
||||
**kwargs):
|
||||
resource_cls = self._resource_type_to_class(resource_type)
|
||||
if (started_at is not None
|
||||
and ended_at is not None
|
||||
and started_at > ended_at):
|
||||
raise ValueError(
|
||||
"Start timestamp cannot be after end timestamp")
|
||||
with self.facade.writer() as session:
|
||||
resource_cls = self._resource_type_to_mappers(
|
||||
session, resource_type)['resource']
|
||||
r = resource_cls(
|
||||
id=id,
|
||||
type=resource_type,
|
||||
|
@ -290,16 +740,17 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
|
||||
return r
|
||||
|
||||
@oslo_db.api.retry_on_deadlock
|
||||
@retry_on_deadlock
|
||||
def update_resource(self, resource_type,
|
||||
resource_id, ended_at=_marker, metrics=_marker,
|
||||
append_metrics=False,
|
||||
create_revision=True,
|
||||
**kwargs):
|
||||
resource_cls = self._resource_type_to_class(resource_type)
|
||||
resource_history_cls = self._resource_type_to_class(resource_type,
|
||||
"history")
|
||||
with self.facade.writer() as session:
|
||||
mappers = self._resource_type_to_mappers(session, resource_type)
|
||||
resource_cls = mappers["resource"]
|
||||
resource_history_cls = mappers["history"]
|
||||
|
||||
try:
|
||||
# NOTE(sileht): We use FOR UPDATE that is not galera friendly,
|
||||
# but they are no other way to cleanly patch a resource and
|
||||
|
@ -379,12 +830,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
if update == 0:
|
||||
raise indexer.NoSuchMetric(value)
|
||||
else:
|
||||
unit = value.get('unit')
|
||||
ap_name = value['archive_policy_name']
|
||||
m = Metric(id=uuid.uuid4(),
|
||||
created_by_user_id=r.created_by_user_id,
|
||||
created_by_project_id=r.created_by_project_id,
|
||||
archive_policy_name=ap_name,
|
||||
name=name,
|
||||
unit=unit,
|
||||
resource_id=r.id)
|
||||
session.add(m)
|
||||
try:
|
||||
|
@ -393,12 +846,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
raise indexer.NamedMetricAlreadyExists(name)
|
||||
except exception.DBReferenceError as e:
|
||||
if (e.constraint ==
|
||||
'fk_metric_archive_policy_name_archive_policy_name'):
|
||||
'fk_metric_ap_name_ap_name'):
|
||||
raise indexer.NoSuchArchivePolicy(ap_name)
|
||||
raise
|
||||
|
||||
session.expire(r, ['metrics'])
|
||||
|
||||
@retry_on_deadlock
|
||||
def delete_resource(self, resource_id):
|
||||
with self.facade.writer() as session:
|
||||
# We are going to delete the resource; the on delete will set the
|
||||
|
@ -411,9 +865,42 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
Resource.id == resource_id).delete() == 0:
|
||||
raise indexer.NoSuchResource(resource_id)
|
||||
|
||||
@retry_on_deadlock
|
||||
def delete_resources(self, resource_type='generic',
|
||||
attribute_filter=None):
|
||||
if not attribute_filter:
|
||||
raise ValueError("attribute_filter must be set")
|
||||
|
||||
with self.facade.writer() as session:
|
||||
target_cls = self._resource_type_to_mappers(
|
||||
session, resource_type)["resource"]
|
||||
|
||||
q = session.query(target_cls.id)
|
||||
|
||||
engine = session.connection()
|
||||
try:
|
||||
f = QueryTransformer.build_filter(engine.dialect.name,
|
||||
target_cls,
|
||||
attribute_filter)
|
||||
except indexer.QueryAttributeError as e:
|
||||
# NOTE(jd) The QueryAttributeError does not know about
|
||||
# resource_type, so convert it
|
||||
raise indexer.ResourceAttributeError(resource_type,
|
||||
e.attribute)
|
||||
|
||||
q = q.filter(f)
|
||||
|
||||
session.query(Metric).filter(
|
||||
Metric.resource_id.in_(q)
|
||||
).update({"status": "delete"},
|
||||
synchronize_session=False)
|
||||
return q.delete(synchronize_session=False)
|
||||
|
||||
@retry_on_deadlock
|
||||
def get_resource(self, resource_type, resource_id, with_metrics=False):
|
||||
resource_cls = self._resource_type_to_class(resource_type)
|
||||
with self.facade.independent_reader() as session:
|
||||
resource_cls = self._resource_type_to_mappers(
|
||||
session, resource_type)['resource']
|
||||
q = session.query(
|
||||
resource_cls).filter(
|
||||
resource_cls.id == resource_id)
|
||||
|
@ -421,9 +908,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
q = q.options(sqlalchemy.orm.joinedload('metrics'))
|
||||
return q.first()
|
||||
|
||||
def _get_history_result_mapper(self, resource_type):
|
||||
resource_cls = self._resource_type_to_class(resource_type)
|
||||
history_cls = self._resource_type_to_class(resource_type, 'history')
|
||||
def _get_history_result_mapper(self, session, resource_type):
|
||||
mappers = self._resource_type_to_mappers(session, resource_type)
|
||||
resource_cls = mappers['resource']
|
||||
history_cls = mappers['history']
|
||||
|
||||
resource_cols = {}
|
||||
history_cols = {}
|
||||
|
@ -445,7 +933,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
|
||||
class Result(base.ResourceJsonifier, base.GnocchiBase):
|
||||
def __iter__(self):
|
||||
return models.ModelIterator(self, iter(stmt.c.keys()))
|
||||
return iter((key, getattr(self, key)) for key in stmt.c.keys())
|
||||
|
||||
sqlalchemy.orm.mapper(
|
||||
Result, stmt, primary_key=[stmt.c.id, stmt.c.revision],
|
||||
|
@ -460,6 +948,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
|
||||
return Result
|
||||
|
||||
@retry_on_deadlock
|
||||
def list_resources(self, resource_type='generic',
|
||||
attribute_filter=None,
|
||||
details=False,
|
||||
|
@ -469,12 +958,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
sorts=None):
|
||||
sorts = sorts or []
|
||||
|
||||
if history:
|
||||
target_cls = self._get_history_result_mapper(resource_type)
|
||||
else:
|
||||
target_cls = self._resource_type_to_class(resource_type)
|
||||
|
||||
with self.facade.independent_reader() as session:
|
||||
if history:
|
||||
target_cls = self._get_history_result_mapper(
|
||||
session, resource_type)
|
||||
else:
|
||||
target_cls = self._resource_type_to_mappers(
|
||||
session, resource_type)["resource"]
|
||||
|
||||
q = session.query(target_cls)
|
||||
|
||||
if attribute_filter:
|
||||
|
@ -491,18 +982,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
|
||||
q = q.filter(f)
|
||||
|
||||
# transform the api-wg representation to the oslo.db one
|
||||
sort_keys = []
|
||||
sort_dirs = []
|
||||
for sort in sorts:
|
||||
sort_key, __, sort_dir = sort.partition(":")
|
||||
sort_keys.append(sort_key.strip())
|
||||
sort_dirs.append(sort_dir or 'asc')
|
||||
|
||||
# paginate_query require at list one uniq column
|
||||
if 'id' not in sort_keys:
|
||||
sort_keys.append('id')
|
||||
sort_dirs.append('asc')
|
||||
sort_keys, sort_dirs = self._build_sort_keys(sorts)
|
||||
|
||||
if marker:
|
||||
resource_marker = self.get_resource(resource_type, marker)
|
||||
|
@ -520,9 +1000,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
except ValueError as e:
|
||||
raise indexer.InvalidPagination(e)
|
||||
except exception.InvalidSortKey as e:
|
||||
# FIXME(jd) Wait for https://review.openstack.org/274868 to be
|
||||
# released so we can return which key
|
||||
raise indexer.InvalidPagination("Invalid sort keys")
|
||||
raise indexer.InvalidPagination(e)
|
||||
|
||||
# Always include metrics
|
||||
q = q.options(sqlalchemy.orm.joinedload("metrics"))
|
||||
|
@ -537,19 +1015,43 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
# No need for a second query
|
||||
all_resources.extend(resources)
|
||||
else:
|
||||
try:
|
||||
target_cls = self._resource_type_to_mappers(
|
||||
session, type)['history' if is_history else
|
||||
'resource']
|
||||
except (indexer.UnexpectedResourceTypeState,
|
||||
indexer.NoSuchResourceType):
|
||||
# NOTE(sileht): This resource_type have been
|
||||
# removed in the meantime.
|
||||
continue
|
||||
if is_history:
|
||||
target_cls = self._resource_type_to_class(
|
||||
type, "history")
|
||||
f = target_cls.revision.in_(
|
||||
[r.revision for r in resources])
|
||||
f = target_cls.revision.in_([r.revision
|
||||
for r in resources])
|
||||
else:
|
||||
target_cls = self._resource_type_to_class(type)
|
||||
f = target_cls.id.in_([r.id for r in resources])
|
||||
|
||||
q = session.query(target_cls).filter(f)
|
||||
# Always include metrics
|
||||
q = q.options(sqlalchemy.orm.joinedload('metrics'))
|
||||
all_resources.extend(q.all())
|
||||
try:
|
||||
all_resources.extend(q.all())
|
||||
except sqlalchemy.exc.ProgrammingError as e:
|
||||
# NOTE(jd) This exception can happen when the
|
||||
# resources and their resource type have been
|
||||
# deleted in the meantime:
|
||||
# sqlalchemy.exc.ProgrammingError:
|
||||
# (pymysql.err.ProgrammingError)
|
||||
# (1146, "Table \'test.rt_f00\' doesn\'t exist")
|
||||
# In that case, just ignore those resources.
|
||||
if (not pymysql
|
||||
or not isinstance(
|
||||
e, sqlalchemy.exc.ProgrammingError)
|
||||
or not isinstance(
|
||||
e.orig, pymysql.err.ProgrammingError)
|
||||
or (e.orig.args[0]
|
||||
!= pymysql.constants.ER.NO_SUCH_TABLE)):
|
||||
raise
|
||||
|
||||
return all_resources
|
||||
|
||||
def expunge_metric(self, id):
|
||||
|
@ -560,9 +1062,27 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
|
|||
def delete_metric(self, id):
|
||||
with self.facade.writer() as session:
|
||||
if session.query(Metric).filter(
|
||||
Metric.id == id).update({"status": "delete"}) == 0:
|
||||
Metric.id == id, Metric.status == 'active').update(
|
||||
{"status": "delete"}) == 0:
|
||||
raise indexer.NoSuchMetric(id)
|
||||
|
||||
@staticmethod
|
||||
def _build_sort_keys(sorts):
|
||||
# transform the api-wg representation to the oslo.db one
|
||||
sort_keys = []
|
||||
sort_dirs = []
|
||||
for sort in sorts:
|
||||
sort_key, __, sort_dir = sort.partition(":")
|
||||
sort_keys.append(sort_key.strip())
|
||||
sort_dirs.append(sort_dir or 'asc')
|
||||
|
||||
# paginate_query require at list one uniq column
|
||||
if 'id' not in sort_keys:
|
||||
sort_keys.append('id')
|
||||
sort_dirs.append('asc')
|
||||
|
||||
return sort_keys, sort_dirs
|
||||
|
||||
|
||||
class QueryTransformer(object):
|
||||
unary_operators = {
|
||||
|
@ -650,10 +1170,20 @@ class QueryTransformer(object):
|
|||
elif (isinstance(attr.type, sqlalchemy_utils.UUIDType)
|
||||
and not isinstance(value, uuid.UUID)):
|
||||
converter = utils.ResourceUUID
|
||||
elif isinstance(attr.type, types.String):
|
||||
converter = six.text_type
|
||||
elif isinstance(attr.type, types.Integer):
|
||||
converter = int
|
||||
elif isinstance(attr.type, types.Numeric):
|
||||
converter = float
|
||||
|
||||
if converter:
|
||||
try:
|
||||
value = converter(value)
|
||||
if isinstance(value, list):
|
||||
# we got a list for in_ operator
|
||||
value = [converter(v) for v in value]
|
||||
else:
|
||||
value = converter(value)
|
||||
except Exception:
|
||||
raise indexer.QueryValueError(value, field_name)
|
||||
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
#
|
||||
# Copyright © 2014-2015 eNovance
|
||||
#
|
||||
# Authors: Julien Danjou <julien@danjou.info>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
@ -32,6 +30,8 @@ import sqlalchemy_utils
|
|||
|
||||
from gnocchi import archive_policy
|
||||
from gnocchi import indexer
|
||||
from gnocchi.indexer import sqlalchemy_legacy_resources as legacy
|
||||
from gnocchi import resource_type
|
||||
from gnocchi import storage
|
||||
from gnocchi import utils
|
||||
|
||||
|
@ -146,7 +146,7 @@ class Metric(Base, GnocchiBase, storage.Metric):
|
|||
sqlalchemy.ForeignKey(
|
||||
'archive_policy.name',
|
||||
ondelete="RESTRICT",
|
||||
name="fk_metric_archive_policy_name_archive_policy_name"),
|
||||
name="fk_metric_ap_name_ap_name"),
|
||||
nullable=False)
|
||||
archive_policy = sqlalchemy.orm.relationship(ArchivePolicy, lazy="joined")
|
||||
created_by_user_id = sqlalchemy.Column(
|
||||
|
@ -159,6 +159,7 @@ class Metric(Base, GnocchiBase, storage.Metric):
|
|||
ondelete="SET NULL",
|
||||
name="fk_metric_resource_id_resource_id"))
|
||||
name = sqlalchemy.Column(sqlalchemy.String(255))
|
||||
unit = sqlalchemy.Column(sqlalchemy.String(31))
|
||||
status = sqlalchemy.Column(sqlalchemy.Enum('active', 'delete',
|
||||
name="metric_status_enum"),
|
||||
nullable=False,
|
||||
|
@ -170,6 +171,7 @@ class Metric(Base, GnocchiBase, storage.Metric):
|
|||
"created_by_user_id": self.created_by_user_id,
|
||||
"created_by_project_id": self.created_by_project_id,
|
||||
"name": self.name,
|
||||
"unit": self.unit,
|
||||
}
|
||||
unloaded = sqlalchemy.inspect(self).unloaded
|
||||
if 'resource' in unloaded:
|
||||
|
@ -193,12 +195,77 @@ class Metric(Base, GnocchiBase, storage.Metric):
|
|||
and self.created_by_user_id == other.created_by_user_id
|
||||
and self.created_by_project_id == other.created_by_project_id
|
||||
and self.name == other.name
|
||||
and self.unit == other.unit
|
||||
and self.resource_id == other.resource_id)
|
||||
or (storage.Metric.__eq__(self, other)))
|
||||
|
||||
__hash__ = storage.Metric.__hash__
|
||||
|
||||
|
||||
RESOURCE_TYPE_SCHEMA_MANAGER = resource_type.ResourceTypeSchemaManager(
|
||||
"gnocchi.indexer.sqlalchemy.resource_type_attribute")
|
||||
|
||||
|
||||
def get_legacy_resource_types():
|
||||
resource_types = []
|
||||
for name, attributes in legacy.ceilometer_resources.items():
|
||||
tablename = legacy.ceilometer_tablenames.get(name, name)
|
||||
attrs = RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict(
|
||||
attributes)
|
||||
resource_types.append(ResourceType(name=name,
|
||||
tablename=tablename,
|
||||
state="creating",
|
||||
attributes=attrs))
|
||||
return resource_types
|
||||
|
||||
|
||||
class ResourceTypeAttributes(sqlalchemy_utils.JSONType):
|
||||
def process_bind_param(self, attributes, dialect):
|
||||
return super(ResourceTypeAttributes, self).process_bind_param(
|
||||
attributes.jsonify(), dialect)
|
||||
|
||||
def process_result_value(self, value, dialect):
|
||||
attributes = super(ResourceTypeAttributes, self).process_result_value(
|
||||
value, dialect)
|
||||
return RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict(attributes)
|
||||
|
||||
|
||||
class ResourceType(Base, GnocchiBase, resource_type.ResourceType):
|
||||
__tablename__ = 'resource_type'
|
||||
__table_args__ = (
|
||||
sqlalchemy.UniqueConstraint("tablename",
|
||||
name="uniq_resource_type0tablename"),
|
||||
COMMON_TABLES_ARGS,
|
||||
)
|
||||
|
||||
name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True,
|
||||
nullable=False)
|
||||
tablename = sqlalchemy.Column(sqlalchemy.String(35), nullable=False)
|
||||
attributes = sqlalchemy.Column(ResourceTypeAttributes)
|
||||
state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating",
|
||||
"creation_error", "deleting",
|
||||
"deletion_error", "updating",
|
||||
"updating_error",
|
||||
name="resource_type_state_enum"),
|
||||
nullable=False,
|
||||
server_default="creating")
|
||||
updated_at = sqlalchemy.Column(PreciseTimestamp, nullable=False,
|
||||
# NOTE(jd): We would like to use
|
||||
# sqlalchemy.func.now, but we can't
|
||||
# because the type of PreciseTimestamp in
|
||||
# MySQL is not a Timestamp, so it would
|
||||
# not store a timestamp but a date as an
|
||||
# integer.
|
||||
default=lambda: utils.utcnow())
|
||||
|
||||
def to_baseclass(self):
|
||||
cols = {}
|
||||
for attr in self.attributes:
|
||||
cols[attr.name] = sqlalchemy.Column(attr.satype,
|
||||
nullable=not attr.required)
|
||||
return type(str("%s_base" % self.tablename), (object, ), cols)
|
||||
|
||||
|
||||
class ResourceJsonifier(indexer.Resource):
|
||||
def jsonify(self):
|
||||
d = dict(self)
|
||||
|
@ -216,14 +283,16 @@ class ResourceMixin(ResourceJsonifier):
|
|||
name="ck_started_before_ended"),
|
||||
COMMON_TABLES_ARGS)
|
||||
|
||||
type = sqlalchemy.Column(sqlalchemy.Enum('generic', 'instance',
|
||||
'swift_account', 'volume',
|
||||
'ceph_account', 'network',
|
||||
'identity', 'ipmi', 'stack',
|
||||
'image', 'instance_disk',
|
||||
'instance_network_interface',
|
||||
name="resource_type_enum"),
|
||||
nullable=False, default='generic')
|
||||
@declarative.declared_attr
|
||||
def type(cls):
|
||||
return sqlalchemy.Column(
|
||||
sqlalchemy.String(255),
|
||||
sqlalchemy.ForeignKey('resource_type.name',
|
||||
ondelete="RESTRICT",
|
||||
name="fk_%s_resource_type_name" %
|
||||
cls.__tablename__),
|
||||
nullable=False)
|
||||
|
||||
created_by_user_id = sqlalchemy.Column(
|
||||
sqlalchemy.String(255))
|
||||
created_by_project_id = sqlalchemy.Column(
|
||||
|
@ -275,7 +344,7 @@ class ResourceHistory(ResourceMixin, Base, GnocchiBase):
|
|||
sqlalchemy.ForeignKey(
|
||||
'resource.id',
|
||||
ondelete="CASCADE",
|
||||
name="fk_resource_history_id_resource_id"),
|
||||
name="fk_rh_id_resource_id"),
|
||||
nullable=False)
|
||||
revision_end = sqlalchemy.Column(PreciseTimestamp, nullable=False,
|
||||
default=lambda: utils.utcnow())
|
||||
|
@ -298,13 +367,20 @@ class ResourceExtMixin(object):
|
|||
|
||||
@declarative.declared_attr
|
||||
def id(cls):
|
||||
tablename_compact = cls.__tablename__
|
||||
if tablename_compact.endswith("_history"):
|
||||
tablename_compact = tablename_compact[:-6]
|
||||
return sqlalchemy.Column(
|
||||
sqlalchemy_utils.UUIDType(),
|
||||
sqlalchemy.ForeignKey(
|
||||
'resource.id',
|
||||
ondelete="CASCADE",
|
||||
name="fk_%s_id_resource_id" % cls.__tablename__),
|
||||
primary_key=True)
|
||||
name="fk_%s_id_resource_id" % tablename_compact,
|
||||
# NOTE(sileht): We use to ensure that postgresql
|
||||
# does not use AccessExclusiveLock on destination table
|
||||
use_alter=True),
|
||||
primary_key=True
|
||||
)
|
||||
|
||||
|
||||
class ResourceHistoryExtMixin(object):
|
||||
|
@ -314,14 +390,31 @@ class ResourceHistoryExtMixin(object):
|
|||
|
||||
@declarative.declared_attr
|
||||
def revision(cls):
|
||||
tablename_compact = cls.__tablename__
|
||||
if tablename_compact.endswith("_history"):
|
||||
tablename_compact = tablename_compact[:-6]
|
||||
return sqlalchemy.Column(
|
||||
sqlalchemy.Integer,
|
||||
sqlalchemy.ForeignKey(
|
||||
'resource_history.revision',
|
||||
ondelete="CASCADE",
|
||||
name="fk_%s_revision_resource_history_revision"
|
||||
% cls.__tablename__),
|
||||
primary_key=True)
|
||||
name="fk_%s_revision_rh_revision"
|
||||
% tablename_compact,
|
||||
# NOTE(sileht): We use to ensure that postgresql
|
||||
# does not use AccessExclusiveLock on destination table
|
||||
use_alter=True),
|
||||
primary_key=True
|
||||
)
|
||||
|
||||
|
||||
class HistoryModelIterator(models.ModelIterator):
|
||||
def __next__(self):
|
||||
# NOTE(sileht): Our custom resource attribute columns don't
|
||||
# have the same name in database than in sqlalchemy model
|
||||
# so remove the additional "f_" for the model name
|
||||
n = six.advance_iterator(self.i)
|
||||
model_attr = n[2:] if n[:2] == "f_" else n
|
||||
return model_attr, getattr(self.model, n)
|
||||
|
||||
|
||||
class ArchivePolicyRule(Base, GnocchiBase):
|
||||
|
@ -333,7 +426,6 @@ class ArchivePolicyRule(Base, GnocchiBase):
|
|||
sqlalchemy.ForeignKey(
|
||||
'archive_policy.name',
|
||||
ondelete="RESTRICT",
|
||||
name="fk_archive_policy_rule_"
|
||||
"archive_policy_name_archive_policy_name"),
|
||||
name="fk_apr_ap_name_ap_name"),
|
||||
nullable=False)
|
||||
metric_pattern = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
@ -17,34 +17,22 @@ from __future__ import absolute_import
|
|||
import sqlalchemy
|
||||
import sqlalchemy_utils
|
||||
|
||||
|
||||
class Image(object):
|
||||
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
container_format = sqlalchemy.Column(sqlalchemy.String(255),
|
||||
nullable=False)
|
||||
disk_format = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
from gnocchi import resource_type
|
||||
|
||||
|
||||
class Instance(object):
|
||||
flavor_id = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
image_ref = sqlalchemy.Column(sqlalchemy.String(255))
|
||||
host = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
server_group = sqlalchemy.Column(sqlalchemy.String(255))
|
||||
class StringSchema(resource_type.StringSchema):
|
||||
@property
|
||||
def satype(self):
|
||||
return sqlalchemy.String(self.max_length)
|
||||
|
||||
|
||||
class InstanceDisk(object):
|
||||
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
instance_id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
|
||||
nullable=False)
|
||||
class UUIDSchema(resource_type.UUIDSchema):
|
||||
satype = sqlalchemy_utils.UUIDType()
|
||||
|
||||
|
||||
class InstanceNetworkInterface(object):
|
||||
__tablename__ = 'instance_net_int'
|
||||
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
instance_id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
|
||||
nullable=False)
|
||||
class NumberSchema(resource_type.NumberSchema):
|
||||
satype = sqlalchemy.Float(53)
|
||||
|
||||
|
||||
class Volume(object):
|
||||
display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=True)
|
||||
class BoolSchema(resource_type.BoolSchema):
|
||||
satype = sqlalchemy.Boolean
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE(sileht): this code is also in alembic migration
|
||||
ceilometer_tablenames = {
|
||||
"instance_network_interface": "instance_net_int",
|
||||
"host_network_interface": "host_net_int",
|
||||
}
|
||||
ceilometer_resources = {
|
||||
"generic": {},
|
||||
"image": {
|
||||
"name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"container_format": {"type": "string", "min_length": 0,
|
||||
"max_length": 255, "required": True},
|
||||
"disk_format": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
},
|
||||
"instance": {
|
||||
"flavor_id": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"image_ref": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": False},
|
||||
"host": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"display_name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"server_group": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": False},
|
||||
},
|
||||
"instance_disk": {
|
||||
"name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"instance_id": {"type": "uuid", "required": True},
|
||||
},
|
||||
"instance_network_interface": {
|
||||
"name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"instance_id": {"type": "uuid", "required": True},
|
||||
},
|
||||
"volume": {
|
||||
"display_name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": False},
|
||||
},
|
||||
"swift_account": {},
|
||||
"ceph_account": {},
|
||||
"network": {},
|
||||
"identity": {},
|
||||
"ipmi": {},
|
||||
"stack": {},
|
||||
"host": {
|
||||
"host_name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
},
|
||||
"host_network_interface": {
|
||||
"host_name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"device_name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": False},
|
||||
},
|
||||
"host_disk": {
|
||||
"host_name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": True},
|
||||
"device_name": {"type": "string", "min_length": 0, "max_length": 255,
|
||||
"required": False},
|
||||
},
|
||||
}
|
|
@ -14,6 +14,7 @@
|
|||
import itertools
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_middleware import cors
|
||||
import uuid
|
||||
|
||||
import gnocchi.archive_policy
|
||||
|
@ -21,7 +22,6 @@ import gnocchi.indexer
|
|||
import gnocchi.storage
|
||||
import gnocchi.storage.ceph
|
||||
import gnocchi.storage.file
|
||||
import gnocchi.storage.influxdb
|
||||
import gnocchi.storage.swift
|
||||
|
||||
|
||||
|
@ -37,18 +37,9 @@ def list_opts():
|
|||
cfg.StrOpt('paste_config',
|
||||
default='api-paste.ini',
|
||||
help='Path to API Paste configuration.'),
|
||||
cfg.PortOpt('port',
|
||||
default=8041,
|
||||
help='The port for the Gnocchi API server.'),
|
||||
cfg.StrOpt('host',
|
||||
default='0.0.0.0',
|
||||
help='The listen IP for the Gnocchi API server.'),
|
||||
cfg.BoolOpt('pecan_debug',
|
||||
default=False,
|
||||
help='Toggle Pecan Debug Middleware.'),
|
||||
cfg.IntOpt('workers', min=1,
|
||||
help='Number of workers for Gnocchi API server. '
|
||||
'By default the available number of CPU is used.'),
|
||||
cfg.IntOpt('max_limit',
|
||||
default=1000,
|
||||
help=('The maximum number of items returned in a '
|
||||
|
@ -58,8 +49,7 @@ def list_opts():
|
|||
gnocchi.storage.OPTS,
|
||||
gnocchi.storage.ceph.OPTS,
|
||||
gnocchi.storage.file.OPTS,
|
||||
gnocchi.storage.swift.OPTS,
|
||||
gnocchi.storage.influxdb.OPTS)),
|
||||
gnocchi.storage.swift.OPTS)),
|
||||
("statsd", (
|
||||
cfg.StrOpt('host',
|
||||
default='0.0.0.0',
|
||||
|
@ -87,3 +77,14 @@ def list_opts():
|
|||
)),
|
||||
("archive_policy", gnocchi.archive_policy.OPTS),
|
||||
]
|
||||
|
||||
|
||||
def set_defaults():
|
||||
cfg.set_defaults(cors.CORS_OPTS,
|
||||
allow_headers=[
|
||||
'X-Auth-Token',
|
||||
'X-Subject-Token',
|
||||
'X-User-Id',
|
||||
'X-Domain-Id',
|
||||
'X-Project-Id',
|
||||
'X-Roles'])
|
||||
|
|
|
@ -0,0 +1,211 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import numbers
|
||||
import re
|
||||
import six
|
||||
import stevedore
|
||||
import voluptuous
|
||||
|
||||
from gnocchi import utils
|
||||
|
||||
|
||||
INVALID_NAMES = [
|
||||
"id", "type", "metrics",
|
||||
"revision", "revision_start", "revision_end",
|
||||
"started_at", "ended_at",
|
||||
"user_id", "project_id",
|
||||
"created_by_user_id", "created_by_project_id", "get_metric"
|
||||
]
|
||||
|
||||
VALID_CHARS = re.compile("[a-zA-Z0-9][a-zA-Z0-9_]*")
|
||||
|
||||
|
||||
class InvalidResourceAttributeName(Exception):
|
||||
"""Error raised when the resource attribute name is invalid."""
|
||||
def __init__(self, name):
|
||||
super(InvalidResourceAttributeName, self).__init__(
|
||||
"Resource attribute name %s is invalid" % str(name))
|
||||
self.name = name
|
||||
|
||||
|
||||
class InvalidResourceAttributeValue(ValueError):
|
||||
"""Error raised when the resource attribute min is greater than max"""
|
||||
def __init__(self, min, max):
|
||||
super(InvalidResourceAttributeValue, self).__init__(
|
||||
"Resource attribute value min (or min_length) %s must be less "
|
||||
"than or equal to max (or max_length) %s!" % (str(min), str(max)))
|
||||
self.min = min
|
||||
self.max = max
|
||||
|
||||
|
||||
class CommonAttributeSchema(object):
|
||||
meta_schema_ext = {}
|
||||
schema_ext = None
|
||||
|
||||
def __init__(self, type, name, required):
|
||||
if (len(name) > 63 or name in INVALID_NAMES
|
||||
or not VALID_CHARS.match(name)):
|
||||
raise InvalidResourceAttributeName(name)
|
||||
|
||||
self.name = name
|
||||
self.required = required
|
||||
|
||||
@classmethod
|
||||
def meta_schema(cls):
|
||||
d = {
|
||||
voluptuous.Required('type'): cls.typename,
|
||||
voluptuous.Required('required', default=True): bool
|
||||
}
|
||||
if callable(cls.meta_schema_ext):
|
||||
d.update(cls.meta_schema_ext())
|
||||
else:
|
||||
d.update(cls.meta_schema_ext)
|
||||
return d
|
||||
|
||||
def schema(self):
|
||||
if self.required:
|
||||
return {self.name: self.schema_ext}
|
||||
else:
|
||||
return {voluptuous.Optional(self.name): self.schema_ext}
|
||||
|
||||
def jsonify(self):
|
||||
return {"type": self.typename,
|
||||
"required": self.required}
|
||||
|
||||
|
||||
class StringSchema(CommonAttributeSchema):
|
||||
typename = "string"
|
||||
|
||||
def __init__(self, min_length, max_length, *args, **kwargs):
|
||||
super(StringSchema, self).__init__(*args, **kwargs)
|
||||
if min_length > max_length:
|
||||
raise InvalidResourceAttributeValue(min_length, max_length)
|
||||
|
||||
self.min_length = min_length
|
||||
self.max_length = max_length
|
||||
|
||||
meta_schema_ext = {
|
||||
voluptuous.Required('min_length', default=0):
|
||||
voluptuous.All(int, voluptuous.Range(min=0, max=255)),
|
||||
voluptuous.Required('max_length', default=255):
|
||||
voluptuous.All(int, voluptuous.Range(min=1, max=255))
|
||||
}
|
||||
|
||||
@property
|
||||
def schema_ext(self):
|
||||
return voluptuous.All(six.text_type,
|
||||
voluptuous.Length(
|
||||
min=self.min_length,
|
||||
max=self.max_length))
|
||||
|
||||
def jsonify(self):
|
||||
d = super(StringSchema, self).jsonify()
|
||||
d.update({"max_length": self.max_length,
|
||||
"min_length": self.min_length})
|
||||
return d
|
||||
|
||||
|
||||
class UUIDSchema(CommonAttributeSchema):
|
||||
typename = "uuid"
|
||||
schema_ext = staticmethod(utils.UUID)
|
||||
|
||||
|
||||
class NumberSchema(CommonAttributeSchema):
|
||||
typename = "number"
|
||||
|
||||
def __init__(self, min, max, *args, **kwargs):
|
||||
super(NumberSchema, self).__init__(*args, **kwargs)
|
||||
if max is not None and min is not None and min > max:
|
||||
raise InvalidResourceAttributeValue(min, max)
|
||||
|
||||
self.min = min
|
||||
self.max = max
|
||||
|
||||
meta_schema_ext = {
|
||||
voluptuous.Required('min', default=None): voluptuous.Any(
|
||||
None, numbers.Real),
|
||||
voluptuous.Required('max', default=None): voluptuous.Any(
|
||||
None, numbers.Real)
|
||||
}
|
||||
|
||||
@property
|
||||
def schema_ext(self):
|
||||
return voluptuous.All(numbers.Real,
|
||||
voluptuous.Range(min=self.min,
|
||||
max=self.max))
|
||||
|
||||
def jsonify(self):
|
||||
d = super(NumberSchema, self).jsonify()
|
||||
d.update({"min": self.min, "max": self.max})
|
||||
return d
|
||||
|
||||
|
||||
class BoolSchema(CommonAttributeSchema):
|
||||
typename = "bool"
|
||||
schema_ext = bool
|
||||
|
||||
|
||||
class ResourceTypeAttributes(list):
|
||||
def jsonify(self):
|
||||
d = {}
|
||||
for attr in self:
|
||||
d[attr.name] = attr.jsonify()
|
||||
return d
|
||||
|
||||
|
||||
class ResourceTypeSchemaManager(stevedore.ExtensionManager):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ResourceTypeSchemaManager, self).__init__(*args, **kwargs)
|
||||
type_schemas = tuple([ext.plugin.meta_schema()
|
||||
for ext in self.extensions])
|
||||
self._schema = voluptuous.Schema({
|
||||
"name": six.text_type,
|
||||
voluptuous.Required("attributes", default={}): {
|
||||
six.text_type: voluptuous.Any(*tuple(type_schemas))
|
||||
}
|
||||
})
|
||||
|
||||
def __call__(self, definition):
|
||||
return self._schema(definition)
|
||||
|
||||
def attributes_from_dict(self, attributes):
|
||||
return ResourceTypeAttributes(
|
||||
self[attr["type"]].plugin(name=name, **attr)
|
||||
for name, attr in attributes.items())
|
||||
|
||||
def resource_type_from_dict(self, name, attributes, state):
|
||||
return ResourceType(name, self.attributes_from_dict(attributes), state)
|
||||
|
||||
|
||||
class ResourceType(object):
|
||||
def __init__(self, name, attributes, state):
|
||||
self.name = name
|
||||
self.attributes = attributes
|
||||
self.state = state
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
schema = {}
|
||||
for attr in self.attributes:
|
||||
schema.update(attr.schema())
|
||||
return schema
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.name == other.name
|
||||
|
||||
def jsonify(self):
|
||||
return {"name": self.name,
|
||||
"attributes": self.attributes.jsonify(),
|
||||
"state": self.state}
|
|
@ -17,6 +17,7 @@
|
|||
import itertools
|
||||
import uuid
|
||||
|
||||
import jsonpatch
|
||||
from oslo_utils import strutils
|
||||
import pecan
|
||||
from pecan import rest
|
||||
|
@ -31,6 +32,7 @@ from gnocchi import aggregates
|
|||
from gnocchi import archive_policy
|
||||
from gnocchi import indexer
|
||||
from gnocchi import json
|
||||
from gnocchi import resource_type
|
||||
from gnocchi import storage
|
||||
from gnocchi import utils
|
||||
|
||||
|
@ -81,7 +83,8 @@ def enforce(rule, target):
|
|||
creds = {
|
||||
'roles': headers.get("X-Roles", "").split(","),
|
||||
'user_id': user_id,
|
||||
'project_id': project_id
|
||||
'project_id': project_id,
|
||||
'domain_id': headers.get("X-Domain-Id", ""),
|
||||
}
|
||||
|
||||
if not isinstance(target, dict):
|
||||
|
@ -144,10 +147,13 @@ def set_resp_location_hdr(location):
|
|||
pecan.response.headers['Location'] = location
|
||||
|
||||
|
||||
def deserialize():
|
||||
def deserialize(expected_content_types=None):
|
||||
if expected_content_types is None:
|
||||
expected_content_types = ("application/json", )
|
||||
|
||||
mime_type, options = werkzeug.http.parse_options_header(
|
||||
pecan.request.headers.get('Content-Type'))
|
||||
if mime_type != "application/json":
|
||||
if mime_type not in expected_content_types:
|
||||
abort(415)
|
||||
try:
|
||||
params = json.load(pecan.request.body_file_raw,
|
||||
|
@ -157,18 +163,20 @@ def deserialize():
|
|||
return params
|
||||
|
||||
|
||||
def deserialize_and_validate(schema, required=True):
|
||||
def deserialize_and_validate(schema, required=True,
|
||||
expected_content_types=None):
|
||||
try:
|
||||
return voluptuous.Schema(schema, required=required)(
|
||||
deserialize())
|
||||
deserialize(expected_content_types=expected_content_types))
|
||||
except voluptuous.Error as e:
|
||||
abort(400, "Invalid input: %s" % e)
|
||||
|
||||
|
||||
def Timestamp(v):
|
||||
if v is None:
|
||||
return v
|
||||
return utils.to_timestamp(v)
|
||||
t = utils.to_timestamp(v)
|
||||
if t < utils.unix_universal_start:
|
||||
raise ValueError("Timestamp must be after Epoch")
|
||||
return t
|
||||
|
||||
|
||||
def PositiveOrNullInt(value):
|
||||
|
@ -215,6 +223,8 @@ def get_details(params):
|
|||
RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc',
|
||||
'started_at:asc']
|
||||
|
||||
METRIC_DEFAULT_PAGINATION = ['id:asc']
|
||||
|
||||
|
||||
def get_pagination_options(params, default):
|
||||
max_limit = pecan.request.conf.api.max_limit
|
||||
|
@ -255,6 +265,33 @@ class ArchivePolicyController(rest.RestController):
|
|||
return ap
|
||||
abort(404, indexer.NoSuchArchivePolicy(self.archive_policy))
|
||||
|
||||
@pecan.expose('json')
|
||||
def patch(self):
|
||||
ap = pecan.request.indexer.get_archive_policy(self.archive_policy)
|
||||
if not ap:
|
||||
abort(404, indexer.NoSuchArchivePolicy(self.archive_policy))
|
||||
enforce("update archive policy", ap)
|
||||
|
||||
body = deserialize_and_validate(voluptuous.Schema({
|
||||
voluptuous.Required("definition"):
|
||||
voluptuous.All([{
|
||||
"granularity": Timespan,
|
||||
"points": PositiveNotNullInt,
|
||||
"timespan": Timespan}], voluptuous.Length(min=1)),
|
||||
}))
|
||||
# Validate the data
|
||||
try:
|
||||
ap_items = [archive_policy.ArchivePolicyItem(**item) for item in
|
||||
body['definition']]
|
||||
except ValueError as e:
|
||||
abort(400, e)
|
||||
|
||||
try:
|
||||
return pecan.request.indexer.update_archive_policy(
|
||||
self.archive_policy, ap_items)
|
||||
except indexer.UnsupportedArchivePolicyChange as e:
|
||||
abort(400, e)
|
||||
|
||||
@pecan.expose()
|
||||
def delete(self):
|
||||
# NOTE(jd) I don't think there's any point in fetching and passing the
|
||||
|
@ -369,102 +406,6 @@ class ArchivePolicyRulesController(rest.RestController):
|
|||
abort(400, e)
|
||||
|
||||
|
||||
class AggregatedMetricController(rest.RestController):
|
||||
_custom_actions = {
|
||||
'measures': ['GET']
|
||||
}
|
||||
|
||||
def __init__(self, metric_ids):
|
||||
self.metric_ids = metric_ids
|
||||
|
||||
@pecan.expose('json')
|
||||
def get_measures(self, start=None, stop=None, aggregation='mean',
|
||||
granularity=None, needed_overlap=100.0):
|
||||
return self.get_cross_metric_measures_from_ids(
|
||||
self.metric_ids, start, stop,
|
||||
aggregation, granularity, needed_overlap)
|
||||
|
||||
@classmethod
|
||||
def get_cross_metric_measures_from_ids(cls, metric_ids, start=None,
|
||||
stop=None, aggregation='mean',
|
||||
granularity=None,
|
||||
needed_overlap=100.0):
|
||||
# Check RBAC policy
|
||||
metrics = pecan.request.indexer.list_metrics(ids=metric_ids)
|
||||
missing_metric_ids = (set(metric_ids)
|
||||
- set(six.text_type(m.id) for m in metrics))
|
||||
if missing_metric_ids:
|
||||
# Return one of the missing one in the error
|
||||
abort(404, storage.MetricDoesNotExist(
|
||||
missing_metric_ids.pop()))
|
||||
return cls.get_cross_metric_measures_from_objs(
|
||||
metrics, start, stop, aggregation, granularity, needed_overlap)
|
||||
|
||||
@staticmethod
|
||||
def get_cross_metric_measures_from_objs(metrics, start=None, stop=None,
|
||||
aggregation='mean',
|
||||
granularity=None,
|
||||
needed_overlap=100.0):
|
||||
try:
|
||||
needed_overlap = float(needed_overlap)
|
||||
except ValueError:
|
||||
abort(400, 'needed_overlap must be a number')
|
||||
|
||||
if start is not None:
|
||||
try:
|
||||
start = Timestamp(start)
|
||||
except Exception:
|
||||
abort(400, "Invalid value for start")
|
||||
|
||||
if stop is not None:
|
||||
try:
|
||||
stop = Timestamp(stop)
|
||||
except Exception:
|
||||
abort(400, "Invalid value for stop")
|
||||
|
||||
if (aggregation
|
||||
not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS):
|
||||
abort(
|
||||
400,
|
||||
'Invalid aggregation value %s, must be one of %s'
|
||||
% (aggregation,
|
||||
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS))
|
||||
|
||||
for metric in metrics:
|
||||
enforce("get metric", metric)
|
||||
|
||||
number_of_metrics = len(metrics)
|
||||
try:
|
||||
if number_of_metrics == 0:
|
||||
return []
|
||||
if granularity is not None:
|
||||
try:
|
||||
granularity = float(granularity)
|
||||
except ValueError as e:
|
||||
abort(400, "granularity must be a float: %s" % e)
|
||||
if number_of_metrics == 1:
|
||||
# NOTE(sileht): don't do the aggregation if we only have one
|
||||
# metric
|
||||
measures = pecan.request.storage.get_measures(
|
||||
metrics[0], start, stop, aggregation,
|
||||
granularity)
|
||||
else:
|
||||
measures = pecan.request.storage.get_cross_metric_measures(
|
||||
metrics, start, stop, aggregation,
|
||||
granularity,
|
||||
needed_overlap)
|
||||
# Replace timestamp keys by their string versions
|
||||
return [(timestamp.isoformat(), offset, v)
|
||||
for timestamp, offset, v in measures]
|
||||
except storage.MetricUnaggregatable as e:
|
||||
abort(400, ("One of the metrics being aggregated doesn't have "
|
||||
"matching granularity: %s") % str(e))
|
||||
except storage.MetricDoesNotExist as e:
|
||||
abort(404, e)
|
||||
except storage.AggregationDoesNotExist as e:
|
||||
abort(404, e)
|
||||
|
||||
|
||||
def MeasureSchema(m):
|
||||
# NOTE(sileht): don't use voluptuous for performance reasons
|
||||
try:
|
||||
|
@ -473,9 +414,10 @@ def MeasureSchema(m):
|
|||
abort(400, "Invalid input for a value")
|
||||
|
||||
try:
|
||||
timestamp = utils.to_timestamp(m['timestamp'])
|
||||
except Exception:
|
||||
abort(400, "Invalid input for a timestamp")
|
||||
timestamp = Timestamp(m['timestamp'])
|
||||
except Exception as e:
|
||||
abort(400,
|
||||
"Invalid input for timestamp `%s': %s" % (m['timestamp'], e))
|
||||
|
||||
return storage.Measure(timestamp, value)
|
||||
|
||||
|
@ -512,7 +454,7 @@ class MetricController(rest.RestController):
|
|||
|
||||
@pecan.expose('json')
|
||||
def get_measures(self, start=None, stop=None, aggregation='mean',
|
||||
granularity=None, **param):
|
||||
granularity=None, refresh=False, **param):
|
||||
self.enforce_metric("get measures")
|
||||
if not (aggregation
|
||||
in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS
|
||||
|
@ -536,6 +478,10 @@ class MetricController(rest.RestController):
|
|||
except Exception:
|
||||
abort(400, "Invalid value for stop")
|
||||
|
||||
if strutils.bool_from_string(refresh):
|
||||
pecan.request.storage.process_new_measures(
|
||||
pecan.request.indexer, [six.text_type(self.metric.id)], True)
|
||||
|
||||
try:
|
||||
if aggregation in self.custom_agg:
|
||||
measures = self.custom_agg[aggregation].compute(
|
||||
|
@ -564,13 +510,6 @@ class MetricController(rest.RestController):
|
|||
abort(404, e)
|
||||
|
||||
|
||||
def UUID(value):
|
||||
try:
|
||||
return uuid.UUID(value)
|
||||
except Exception as e:
|
||||
raise ValueError(e)
|
||||
|
||||
|
||||
class MetricsController(rest.RestController):
|
||||
|
||||
@pecan.expose()
|
||||
|
@ -590,6 +529,8 @@ class MetricsController(rest.RestController):
|
|||
"project_id": six.text_type,
|
||||
"archive_policy_name": six.text_type,
|
||||
"name": six.text_type,
|
||||
voluptuous.Optional("unit"):
|
||||
voluptuous.All(six.text_type, voluptuous.Length(max=31)),
|
||||
})
|
||||
|
||||
# NOTE(jd) Define this method as it was a voluptuous schema – it's just a
|
||||
|
@ -625,6 +566,7 @@ class MetricsController(rest.RestController):
|
|||
"project_id": definition.get('project_id'),
|
||||
"archive_policy_name": archive_policy_name,
|
||||
"name": name,
|
||||
"unit": definition.get('unit'),
|
||||
})
|
||||
|
||||
return definition
|
||||
|
@ -638,6 +580,7 @@ class MetricsController(rest.RestController):
|
|||
uuid.uuid4(),
|
||||
user, project,
|
||||
name=body.get('name'),
|
||||
unit=body.get('unit'),
|
||||
archive_policy_name=body['archive_policy_name'])
|
||||
except indexer.NoSuchArchivePolicy as e:
|
||||
abort(400, e)
|
||||
|
@ -664,14 +607,19 @@ class MetricsController(rest.RestController):
|
|||
project_id = kwargs.get('project_id')
|
||||
attr_filter = {}
|
||||
if user_id is not None:
|
||||
attr_filter['creater_by_user_id'] = user_id
|
||||
attr_filter['created_by_user_id'] = user_id
|
||||
if project_id is not None:
|
||||
attr_filter['created_by_project_id'] = project_id
|
||||
return pecan.request.indexer.list_metrics(**attr_filter)
|
||||
attr_filter.update(get_pagination_options(
|
||||
kwargs, METRIC_DEFAULT_PAGINATION))
|
||||
try:
|
||||
return pecan.request.indexer.list_metrics(**attr_filter)
|
||||
except indexer.IndexerException as e:
|
||||
abort(400, e)
|
||||
|
||||
|
||||
_MetricsSchema = voluptuous.Schema({
|
||||
six.text_type: voluptuous.Any(UUID,
|
||||
six.text_type: voluptuous.Any(utils.UUID,
|
||||
MetricsController.MetricSchema),
|
||||
})
|
||||
|
||||
|
@ -755,7 +703,7 @@ class ResourceHistoryController(rest.RestController):
|
|||
resource = pecan.request.indexer.get_resource(
|
||||
self.resource_type, self.resource_id)
|
||||
if not resource:
|
||||
abort(404, "foo")
|
||||
abort(404, indexer.NoSuchResource(self.resource_id))
|
||||
|
||||
enforce("get resource", resource)
|
||||
|
||||
|
@ -804,6 +752,146 @@ def etag_set_headers(obj):
|
|||
pecan.response.last_modified = obj.lastmodified
|
||||
|
||||
|
||||
def AttributesPath(value):
|
||||
if value.startswith("/attributes"):
|
||||
return value
|
||||
raise ValueError("Only attributes can be modified")
|
||||
|
||||
|
||||
ResourceTypeJsonPatchSchema = voluptuous.Schema([{
|
||||
"op": voluptuous.Any("add", "remove"),
|
||||
"path": AttributesPath,
|
||||
voluptuous.Optional("value"): dict,
|
||||
}])
|
||||
|
||||
|
||||
class ResourceTypeController(rest.RestController):
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
|
||||
@pecan.expose('json')
|
||||
def get(self):
|
||||
try:
|
||||
rt = pecan.request.indexer.get_resource_type(self._name)
|
||||
except indexer.NoSuchResourceType as e:
|
||||
abort(404, e)
|
||||
enforce("get resource type", rt)
|
||||
return rt
|
||||
|
||||
@pecan.expose('json')
|
||||
def patch(self):
|
||||
# NOTE(sileht): should we check for "application/json-patch+json"
|
||||
# Content-Type ?
|
||||
|
||||
try:
|
||||
rt = pecan.request.indexer.get_resource_type(self._name)
|
||||
except indexer.NoSuchResourceType as e:
|
||||
abort(404, e)
|
||||
enforce("update resource type", rt)
|
||||
|
||||
# Ensure this is a valid jsonpatch dict
|
||||
patch = deserialize_and_validate(
|
||||
ResourceTypeJsonPatchSchema,
|
||||
expected_content_types=["application/json-patch+json"])
|
||||
|
||||
# Add new attributes to the resource type
|
||||
rt_json_current = rt.jsonify()
|
||||
try:
|
||||
rt_json_next = jsonpatch.apply_patch(rt_json_current, patch)
|
||||
except jsonpatch.JsonPatchException as e:
|
||||
abort(400, e)
|
||||
del rt_json_next['state']
|
||||
|
||||
# Validate that the whole new resource_type is valid
|
||||
schema = pecan.request.indexer.get_resource_type_schema()
|
||||
try:
|
||||
rt_json_next = voluptuous.Schema(schema, required=True)(
|
||||
rt_json_next)
|
||||
except voluptuous.Error as e:
|
||||
abort(400, "Invalid input: %s" % e)
|
||||
|
||||
# Get only newly formatted and deleted attributes
|
||||
add_attrs = {k: v for k, v in rt_json_next["attributes"].items()
|
||||
if k not in rt_json_current["attributes"]}
|
||||
del_attrs = [k for k in rt_json_current["attributes"]
|
||||
if k not in rt_json_next["attributes"]]
|
||||
|
||||
if not add_attrs and not del_attrs:
|
||||
# NOTE(sileht): just returns the resource, the asked changes
|
||||
# just do nothing
|
||||
return rt
|
||||
|
||||
try:
|
||||
add_attrs = schema.attributes_from_dict(add_attrs)
|
||||
except resource_type.InvalidResourceAttributeName as e:
|
||||
abort(400, e)
|
||||
|
||||
# TODO(sileht): Add a default field on an attribute
|
||||
# to be able to fill non-nullable column on sql side.
|
||||
# And obviousy remove this limitation
|
||||
for attr in add_attrs:
|
||||
if attr.required:
|
||||
abort(400, ValueError("Adding required attributes is not yet "
|
||||
"possible."))
|
||||
|
||||
try:
|
||||
return pecan.request.indexer.update_resource_type(
|
||||
self._name, add_attributes=add_attrs,
|
||||
del_attributes=del_attrs)
|
||||
except indexer.NoSuchResourceType as e:
|
||||
abort(400, e)
|
||||
|
||||
@pecan.expose()
|
||||
def delete(self):
|
||||
try:
|
||||
pecan.request.indexer.get_resource_type(self._name)
|
||||
except indexer.NoSuchResourceType as e:
|
||||
abort(404, e)
|
||||
enforce("delete resource type", resource_type)
|
||||
try:
|
||||
pecan.request.indexer.delete_resource_type(self._name)
|
||||
except (indexer.NoSuchResourceType,
|
||||
indexer.ResourceTypeInUse) as e:
|
||||
abort(400, e)
|
||||
|
||||
|
||||
class ResourceTypesController(rest.RestController):
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, name, *remainder):
|
||||
return ResourceTypeController(name), remainder
|
||||
|
||||
@pecan.expose('json')
|
||||
def post(self):
|
||||
schema = pecan.request.indexer.get_resource_type_schema()
|
||||
body = deserialize_and_validate(schema)
|
||||
body["state"] = "creating"
|
||||
|
||||
try:
|
||||
rt = schema.resource_type_from_dict(**body)
|
||||
except resource_type.InvalidResourceAttributeName as e:
|
||||
abort(400, e)
|
||||
except resource_type.InvalidResourceAttributeValue as e:
|
||||
abort(400, e)
|
||||
|
||||
enforce("create resource type", body)
|
||||
try:
|
||||
rt = pecan.request.indexer.create_resource_type(rt)
|
||||
except indexer.ResourceTypeAlreadyExists as e:
|
||||
abort(409, e)
|
||||
set_resp_location_hdr("/resource_type/" + rt.name)
|
||||
pecan.response.status = 201
|
||||
return rt
|
||||
|
||||
@pecan.expose('json')
|
||||
def get_all(self, **kwargs):
|
||||
enforce("list resource type", {})
|
||||
try:
|
||||
return pecan.request.indexer.list_resource_types()
|
||||
except indexer.IndexerException as e:
|
||||
abort(400, e)
|
||||
|
||||
|
||||
def ResourceSchema(schema):
|
||||
base_schema = {
|
||||
voluptuous.Optional('started_at'): Timestamp,
|
||||
|
@ -896,45 +984,9 @@ class ResourceController(rest.RestController):
|
|||
abort(404, e)
|
||||
|
||||
|
||||
GenericSchema = ResourceSchema({})
|
||||
|
||||
InstanceDiskSchema = ResourceSchema({
|
||||
"name": six.text_type,
|
||||
"instance_id": UUID,
|
||||
})
|
||||
|
||||
InstanceNetworkInterfaceSchema = ResourceSchema({
|
||||
"name": six.text_type,
|
||||
"instance_id": UUID,
|
||||
})
|
||||
|
||||
InstanceSchema = ResourceSchema({
|
||||
"flavor_id": six.text_type,
|
||||
voluptuous.Optional("image_ref"): six.text_type,
|
||||
"host": six.text_type,
|
||||
"display_name": six.text_type,
|
||||
voluptuous.Optional("server_group"): six.text_type,
|
||||
})
|
||||
|
||||
VolumeSchema = ResourceSchema({
|
||||
voluptuous.Optional("display_name"): voluptuous.Any(None,
|
||||
six.text_type),
|
||||
})
|
||||
|
||||
ImageSchema = ResourceSchema({
|
||||
"name": six.text_type,
|
||||
"container_format": six.text_type,
|
||||
"disk_format": six.text_type,
|
||||
})
|
||||
|
||||
|
||||
# NOTE(sileht): Must be loaded after all ResourceSchema
|
||||
RESOURCE_SCHEMA_MANAGER = extension.ExtensionManager(
|
||||
'gnocchi.controller.schemas')
|
||||
|
||||
|
||||
def schema_for(resource_type):
|
||||
return RESOURCE_SCHEMA_MANAGER[resource_type].plugin
|
||||
resource_type = pecan.request.indexer.get_resource_type(resource_type)
|
||||
return ResourceSchema(resource_type.schema)
|
||||
|
||||
|
||||
def ResourceID(value):
|
||||
|
@ -1007,21 +1059,50 @@ class ResourcesController(rest.RestController):
|
|||
except indexer.IndexerException as e:
|
||||
abort(400, e)
|
||||
|
||||
@pecan.expose('json')
|
||||
def delete(self, **kwargs):
|
||||
# NOTE(sileht): Don't allow empty filter, this is going to delete
|
||||
# the entire database.
|
||||
attr_filter = deserialize_and_validate(
|
||||
SearchResourceTypeController.ResourceSearchSchema)
|
||||
|
||||
# the voluptuous checks everything, but it is better to
|
||||
# have this here.
|
||||
if not attr_filter:
|
||||
abort(400, "caution: the query can not be empty, or it will \
|
||||
delete entire database")
|
||||
|
||||
user, project = get_user_and_project()
|
||||
policy_filter = _get_list_resource_policy_filter(
|
||||
"delete resources", self._resource_type, user, project)
|
||||
|
||||
if policy_filter:
|
||||
attr_filter = {"and": [policy_filter, attr_filter]}
|
||||
|
||||
try:
|
||||
delete_num = pecan.request.indexer.delete_resources(
|
||||
self._resource_type, attribute_filter=attr_filter)
|
||||
except indexer.IndexerException as e:
|
||||
abort(400, e)
|
||||
|
||||
return {"deleted": delete_num}
|
||||
|
||||
|
||||
class ResourcesByTypeController(rest.RestController):
|
||||
@pecan.expose('json')
|
||||
def get_all(self):
|
||||
return dict(
|
||||
(ext.name,
|
||||
pecan.request.application_url + '/resource/' + ext.name)
|
||||
for ext in RESOURCE_SCHEMA_MANAGER)
|
||||
(rt.name,
|
||||
pecan.request.application_url + '/resource/' + rt.name)
|
||||
for rt in pecan.request.indexer.list_resource_types())
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, resource_type, *remainder):
|
||||
if resource_type in RESOURCE_SCHEMA_MANAGER:
|
||||
return ResourcesController(resource_type), remainder
|
||||
else:
|
||||
abort(404, indexer.NoSuchResourceType(resource_type))
|
||||
try:
|
||||
pecan.request.indexer.get_resource_type(resource_type)
|
||||
except indexer.NoSuchResourceType as e:
|
||||
abort(404, e)
|
||||
return ResourcesController(resource_type), remainder
|
||||
|
||||
|
||||
def _ResourceSearchSchema(v):
|
||||
|
@ -1051,7 +1132,9 @@ class SearchResourceTypeController(rest.RestController):
|
|||
u"and", u"∨",
|
||||
u"or", u"∧",
|
||||
u"not",
|
||||
): [_ResourceSearchSchema],
|
||||
): voluptuous.All(
|
||||
[_ResourceSearchSchema], voluptuous.Length(min=1)
|
||||
)
|
||||
}
|
||||
)
|
||||
)
|
||||
|
@ -1097,10 +1180,11 @@ class SearchResourceTypeController(rest.RestController):
|
|||
class SearchResourceController(rest.RestController):
|
||||
@pecan.expose()
|
||||
def _lookup(self, resource_type, *remainder):
|
||||
if resource_type in RESOURCE_SCHEMA_MANAGER:
|
||||
return SearchResourceTypeController(resource_type), remainder
|
||||
else:
|
||||
abort(404, indexer.NoSuchResourceType(resource_type))
|
||||
try:
|
||||
pecan.request.indexer.get_resource_type(resource_type)
|
||||
except indexer.NoSuchResourceType as e:
|
||||
abort(404, e)
|
||||
return SearchResourceTypeController(resource_type), remainder
|
||||
|
||||
|
||||
def _MetricSearchSchema(v):
|
||||
|
@ -1243,7 +1327,7 @@ class MetricsMeasuresBatchController(rest.RestController):
|
|||
# only the last key will be retain by json python module to
|
||||
# build the python dict.
|
||||
MeasuresBatchSchema = voluptuous.Schema(
|
||||
{UUID: [MeasureSchema]}
|
||||
{utils.UUID: [MeasureSchema]}
|
||||
)
|
||||
|
||||
@pecan.expose()
|
||||
|
@ -1277,8 +1361,8 @@ class AggregationResourceController(rest.RestController):
|
|||
|
||||
@pecan.expose('json')
|
||||
def post(self, start=None, stop=None, aggregation='mean',
|
||||
granularity=None, needed_overlap=100.0,
|
||||
groupby=None):
|
||||
reaggregation=None, granularity=None, needed_overlap=100.0,
|
||||
groupby=None, refresh=False):
|
||||
# First, set groupby in the right format: a sorted list of unique
|
||||
# strings.
|
||||
groupby = sorted(set(arg_to_list(groupby)))
|
||||
|
@ -1300,8 +1384,9 @@ class AggregationResourceController(rest.RestController):
|
|||
metrics = list(filter(None,
|
||||
(r.get_metric(self.metric_name)
|
||||
for r in resources)))
|
||||
return AggregatedMetricController.get_cross_metric_measures_from_objs( # noqa
|
||||
metrics, start, stop, aggregation, granularity, needed_overlap)
|
||||
return AggregationController.get_cross_metric_measures_from_objs(
|
||||
metrics, start, stop, aggregation, reaggregation,
|
||||
granularity, needed_overlap, refresh)
|
||||
|
||||
def groupper(r):
|
||||
return tuple((attr, r[attr]) for attr in groupby)
|
||||
|
@ -1313,9 +1398,9 @@ class AggregationResourceController(rest.RestController):
|
|||
for r in resources)))
|
||||
results.append({
|
||||
"group": dict(key),
|
||||
"measures": AggregatedMetricController.get_cross_metric_measures_from_objs( # noqa
|
||||
metrics, start, stop, aggregation,
|
||||
granularity, needed_overlap)
|
||||
"measures": AggregationController.get_cross_metric_measures_from_objs( # noqa
|
||||
metrics, start, stop, aggregation, reaggregation,
|
||||
granularity, needed_overlap, refresh)
|
||||
})
|
||||
|
||||
return results
|
||||
|
@ -1333,18 +1418,100 @@ class AggregationController(rest.RestController):
|
|||
# NOTE(sileht): we want the raw 404 message here
|
||||
# so use directly pecan
|
||||
pecan.abort(404)
|
||||
elif resource_type not in RESOURCE_SCHEMA_MANAGER:
|
||||
abort(404, indexer.NoSuchResourceType(resource_type))
|
||||
try:
|
||||
pecan.request.indexer.get_resource_type(resource_type)
|
||||
except indexer.NoSuchResourceType as e:
|
||||
abort(404, e)
|
||||
return AggregationResourceController(resource_type,
|
||||
metric_name), remainder
|
||||
|
||||
@staticmethod
|
||||
def get_cross_metric_measures_from_objs(metrics, start=None, stop=None,
|
||||
aggregation='mean',
|
||||
reaggregation=None,
|
||||
granularity=None,
|
||||
needed_overlap=100.0,
|
||||
refresh=False):
|
||||
try:
|
||||
needed_overlap = float(needed_overlap)
|
||||
except ValueError:
|
||||
abort(400, 'needed_overlap must be a number')
|
||||
|
||||
if start is not None:
|
||||
try:
|
||||
start = Timestamp(start)
|
||||
except Exception:
|
||||
abort(400, "Invalid value for start")
|
||||
|
||||
if stop is not None:
|
||||
try:
|
||||
stop = Timestamp(stop)
|
||||
except Exception:
|
||||
abort(400, "Invalid value for stop")
|
||||
|
||||
if (aggregation
|
||||
not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS):
|
||||
abort(
|
||||
400,
|
||||
'Invalid aggregation value %s, must be one of %s'
|
||||
% (aggregation,
|
||||
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS))
|
||||
|
||||
for metric in metrics:
|
||||
enforce("get metric", metric)
|
||||
|
||||
number_of_metrics = len(metrics)
|
||||
if number_of_metrics == 0:
|
||||
return []
|
||||
if granularity is not None:
|
||||
try:
|
||||
granularity = float(granularity)
|
||||
except ValueError as e:
|
||||
abort(400, "granularity must be a float: %s" % e)
|
||||
try:
|
||||
if strutils.bool_from_string(refresh):
|
||||
pecan.request.storage.process_new_measures(
|
||||
pecan.request.indexer,
|
||||
[six.text_type(m.id) for m in metrics], True)
|
||||
if number_of_metrics == 1:
|
||||
# NOTE(sileht): don't do the aggregation if we only have one
|
||||
# metric
|
||||
measures = pecan.request.storage.get_measures(
|
||||
metrics[0], start, stop, aggregation,
|
||||
granularity)
|
||||
else:
|
||||
measures = pecan.request.storage.get_cross_metric_measures(
|
||||
metrics, start, stop, aggregation,
|
||||
reaggregation,
|
||||
granularity,
|
||||
needed_overlap)
|
||||
# Replace timestamp keys by their string versions
|
||||
return [(timestamp.isoformat(), offset, v)
|
||||
for timestamp, offset, v in measures]
|
||||
except storage.MetricUnaggregatable as e:
|
||||
abort(400, ("One of the metrics being aggregated doesn't have "
|
||||
"matching granularity: %s") % str(e))
|
||||
except storage.MetricDoesNotExist as e:
|
||||
abort(404, e)
|
||||
except storage.AggregationDoesNotExist as e:
|
||||
abort(404, e)
|
||||
|
||||
@pecan.expose('json')
|
||||
def get_metric(self, metric=None, start=None,
|
||||
stop=None, aggregation='mean',
|
||||
granularity=None, needed_overlap=100.0):
|
||||
return AggregatedMetricController.get_cross_metric_measures_from_ids(
|
||||
arg_to_list(metric), start, stop, aggregation,
|
||||
granularity, needed_overlap)
|
||||
def get_metric(self, metric=None, start=None, stop=None,
|
||||
aggregation='mean', reaggregation=None, granularity=None,
|
||||
needed_overlap=100.0, refresh=False):
|
||||
# Check RBAC policy
|
||||
metric_ids = arg_to_list(metric)
|
||||
metrics = pecan.request.indexer.list_metrics(ids=metric_ids)
|
||||
missing_metric_ids = (set(metric_ids)
|
||||
- set(six.text_type(m.id) for m in metrics))
|
||||
if missing_metric_ids:
|
||||
# Return one of the missing one in the error
|
||||
abort(404, storage.MetricDoesNotExist(
|
||||
missing_metric_ids.pop()))
|
||||
return self.get_cross_metric_measures_from_objs(
|
||||
metrics, start, stop, aggregation, reaggregation,
|
||||
granularity, needed_overlap, refresh)
|
||||
|
||||
|
||||
class CapabilityController(rest.RestController):
|
||||
|
@ -1353,10 +1520,11 @@ class CapabilityController(rest.RestController):
|
|||
def get():
|
||||
aggregation_methods = set(
|
||||
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)
|
||||
aggregation_methods.update(
|
||||
ext.name for ext in extension.ExtensionManager(
|
||||
namespace='gnocchi.aggregates'))
|
||||
return dict(aggregation_methods=aggregation_methods)
|
||||
return dict(aggregation_methods=aggregation_methods,
|
||||
dynamic_aggregation_methods=[
|
||||
ext.name for ext in extension.ExtensionManager(
|
||||
namespace='gnocchi.aggregates')
|
||||
])
|
||||
|
||||
|
||||
class StatusController(rest.RestController):
|
||||
|
@ -1364,7 +1532,8 @@ class StatusController(rest.RestController):
|
|||
@pecan.expose('json')
|
||||
def get(details=True):
|
||||
enforce("get status", {})
|
||||
report = pecan.request.storage.measures_report(details)
|
||||
report = pecan.request.storage.measures_report(
|
||||
strutils.bool_from_string(details))
|
||||
report_dict = {"storage": {"summary": report['summary']}}
|
||||
if 'details' in report:
|
||||
report_dict["storage"]["measures_to_process"] = report['details']
|
||||
|
@ -1398,6 +1567,7 @@ class V1Controller(object):
|
|||
"metric": MetricsController(),
|
||||
"batch": BatchController(),
|
||||
"resource": ResourcesByTypeController(),
|
||||
"resource_type": ResourceTypesController(),
|
||||
"aggregation": AggregationController(),
|
||||
"capabilities": CapabilityController(),
|
||||
"status": StatusController(),
|
||||
|
|
|
@ -18,11 +18,11 @@ import uuid
|
|||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_middleware import cors
|
||||
from oslo_policy import policy
|
||||
from paste import deploy
|
||||
import pecan
|
||||
import webob.exc
|
||||
from werkzeug import serving
|
||||
|
||||
from gnocchi import exceptions
|
||||
from gnocchi import indexer as gnocchi_indexer
|
||||
|
@ -107,21 +107,15 @@ def load_app(conf, appname=None, indexer=None, storage=None,
|
|||
APPCONFIGS[configkey] = config
|
||||
|
||||
LOG.info("WSGI config used: %s" % cfg_path)
|
||||
return deploy.loadapp("config:" + cfg_path, name=appname,
|
||||
global_conf={'configkey': configkey})
|
||||
app = deploy.loadapp("config:" + cfg_path, name=appname,
|
||||
global_conf={'configkey': configkey})
|
||||
return cors.CORS(app, conf=conf)
|
||||
|
||||
|
||||
def _setup_app(root, conf, indexer, storage, not_implemented_middleware):
|
||||
# NOTE(sileht): pecan debug won't work in multi-process environment
|
||||
pecan_debug = conf.api.pecan_debug
|
||||
if conf.api.workers != 1 and pecan_debug:
|
||||
pecan_debug = False
|
||||
LOG.warning('pecan_debug cannot be enabled, if workers is > 1, '
|
||||
'the value is overrided with False')
|
||||
|
||||
app = pecan.make_app(
|
||||
root,
|
||||
debug=pecan_debug,
|
||||
debug=conf.api.pecan_debug,
|
||||
hooks=(GnocchiHook(storage, indexer, conf),),
|
||||
guess_content_type_from_ext=False,
|
||||
custom_renderers={'json': OsloJSONRenderer},
|
||||
|
@ -133,30 +127,11 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware):
|
|||
return app
|
||||
|
||||
|
||||
class WerkzeugApp(object):
|
||||
# NOTE(sileht): The purpose of this class is only to be used
|
||||
# with werkzeug to create the app after the werkzeug
|
||||
# fork gnocchi-api and avoid creation of connection of the
|
||||
# storage/indexer by the main process.
|
||||
|
||||
def __init__(self, conf):
|
||||
self.app = None
|
||||
self.conf = conf
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
if self.app is None:
|
||||
self.app = load_app(conf=self.conf)
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
def build_server():
|
||||
conf = service.prepare_service()
|
||||
serving.run_simple(conf.api.host, conf.api.port,
|
||||
WerkzeugApp(conf),
|
||||
processes=conf.api.workers)
|
||||
|
||||
|
||||
def app_factory(global_config, **local_conf):
|
||||
global APPCONFIGS
|
||||
appconfig = APPCONFIGS.get(global_config.get('configkey'))
|
||||
return _setup_app(root=local_conf.get('root'), **appconfig)
|
||||
|
||||
|
||||
def build_wsgi_app():
|
||||
return load_app(service.prepare_service())
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#
|
||||
# Copyright 2014 eNovance
|
||||
#
|
||||
# Authors: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
@ -19,11 +17,6 @@
|
|||
|
||||
See http://pecan.readthedocs.org/en/latest/deployment.html for details.
|
||||
"""
|
||||
from gnocchi import service
|
||||
from gnocchi.rest import app
|
||||
|
||||
# Initialize the oslo configuration library and logging
|
||||
conf = service.prepare_service()
|
||||
# The pecan debugger cannot be used in wsgi mode
|
||||
conf.set_default('pecan_debug', False, group='api')
|
||||
application = app.load_app(conf)
|
||||
application = app.build_wsgi_app()
|
||||
|
|
|
@ -15,13 +15,13 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import multiprocessing
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import options as db_options
|
||||
from oslo_log import log
|
||||
from oslo_policy import opts as policy_opts
|
||||
import pbr.version
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
from gnocchi import archive_policy
|
||||
|
@ -34,6 +34,7 @@ def prepare_service(args=None, conf=None,
|
|||
default_config_files=None):
|
||||
if conf is None:
|
||||
conf = cfg.ConfigOpts()
|
||||
opts.set_defaults()
|
||||
# FIXME(jd) Use the pkg_entry info to register the options of these libs
|
||||
log.register_options(conf)
|
||||
db_options.set_defaults(conf)
|
||||
|
@ -54,11 +55,11 @@ def prepare_service(args=None, conf=None,
|
|||
except NotImplementedError:
|
||||
default_workers = 1
|
||||
|
||||
conf.set_default("workers", default_workers, group="api")
|
||||
conf.set_default("workers", default_workers, group="metricd")
|
||||
|
||||
conf(args, project='gnocchi', validate_default_values=True,
|
||||
default_config_files=default_config_files)
|
||||
default_config_files=default_config_files,
|
||||
version=pbr.version.VersionInfo('gnocchi').version_string())
|
||||
|
||||
# If no coordination URL is provided, default to using the indexer as
|
||||
# coordinator
|
||||
|
@ -72,7 +73,9 @@ def prepare_service(args=None, conf=None,
|
|||
urlparse.urlunparse(parsed),
|
||||
"storage")
|
||||
|
||||
log.set_defaults(default_log_levels=log.get_default_log_levels() +
|
||||
["passlib.utils.compat=INFO"])
|
||||
log.setup(conf, 'gnocchi')
|
||||
conf.log_opt_values(LOG, logging.DEBUG)
|
||||
conf.log_opt_values(LOG, log.DEBUG)
|
||||
|
||||
return conf
|
||||
|
|
|
@ -44,8 +44,8 @@ class Stats(object):
|
|||
self.conf.statsd.user_id,
|
||||
self.conf.statsd.project_id)
|
||||
except indexer.ResourceAlreadyExists:
|
||||
LOG.info("Resource %s already exists"
|
||||
% self.conf.statsd.resource_id)
|
||||
LOG.debug("Resource %s already exists"
|
||||
% self.conf.statsd.resource_id)
|
||||
else:
|
||||
LOG.info("Created resource %s" % self.conf.statsd.resource_id)
|
||||
self.gauges = {}
|
||||
|
@ -186,6 +186,9 @@ def start():
|
|||
loop.call_later(conf.statsd.flush_delay, _flush)
|
||||
transport, protocol = loop.run_until_complete(listen)
|
||||
|
||||
LOG.info("Started on %s:%d" % (conf.statsd.host, conf.statsd.port))
|
||||
LOG.info("Flush delay: %d seconds" % conf.statsd.flush_delay)
|
||||
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
|
|
|
@ -13,10 +13,9 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import logging
|
||||
import operator
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
from oslo_log import log
|
||||
from stevedore import driver
|
||||
|
||||
from gnocchi import exceptions
|
||||
|
@ -28,21 +27,25 @@ OPTS = [
|
|||
default='file',
|
||||
help='Storage driver to use'),
|
||||
cfg.IntOpt('metric_processing_delay',
|
||||
default=5,
|
||||
default=60,
|
||||
help="How many seconds to wait between "
|
||||
"new metric measure processing"),
|
||||
"scheduling new metrics to process"),
|
||||
cfg.IntOpt('metric_reporting_delay',
|
||||
default=10,
|
||||
default=120,
|
||||
help="How many seconds to wait between "
|
||||
"metric ingestion reporting"),
|
||||
cfg.IntOpt('metric_cleanup_delay',
|
||||
default=300,
|
||||
help="How many seconds to wait between "
|
||||
"cleaning of expired data"),
|
||||
]
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Measure(object):
|
||||
def __init__(self, timestamp, value):
|
||||
self.timestamp = timeutils.normalize_time(timestamp)
|
||||
self.timestamp = timestamp
|
||||
self.value = value
|
||||
|
||||
def __iter__(self):
|
||||
|
@ -168,11 +171,10 @@ class StorageDriver(object):
|
|||
def upgrade(index):
|
||||
pass
|
||||
|
||||
def process_background_tasks(self, index, block_size=128, sync=False):
|
||||
def process_background_tasks(self, index, metrics, sync=False):
|
||||
"""Process background tasks for this storage.
|
||||
|
||||
This calls :func:`process_measures` to process new measures and
|
||||
:func:`expunge_metrics` to expunge deleted metrics.
|
||||
This calls :func:`process_new_measures` to process new measures
|
||||
|
||||
:param index: An indexer to be used for querying metrics
|
||||
:param block_size: number of metrics to process
|
||||
|
@ -180,24 +182,24 @@ class StorageDriver(object):
|
|||
on error
|
||||
:type sync: bool
|
||||
"""
|
||||
LOG.debug("Processing new and to delete measures")
|
||||
LOG.debug("Processing new measures")
|
||||
try:
|
||||
self.process_measures(index, block_size, sync)
|
||||
self.process_new_measures(index, metrics, sync)
|
||||
except Exception:
|
||||
if sync:
|
||||
raise
|
||||
LOG.error("Unexpected error during measures processing",
|
||||
exc_info=True)
|
||||
LOG.debug("Expunging deleted metrics")
|
||||
try:
|
||||
self.expunge_metrics(index, sync)
|
||||
except Exception:
|
||||
if sync:
|
||||
raise
|
||||
LOG.error("Unexpected error during deleting metrics",
|
||||
exc_info=True)
|
||||
|
||||
def expunge_metrics(self, index, sync=False):
|
||||
"""Remove deleted metrics
|
||||
|
||||
:param index: An indexer to be used for querying metrics
|
||||
:param sync: If True, then delete everything synchronously and raise
|
||||
on error
|
||||
:type sync: bool
|
||||
"""
|
||||
|
||||
metrics_to_expunge = index.list_metrics(status='delete')
|
||||
for m in metrics_to_expunge:
|
||||
try:
|
||||
|
@ -225,7 +227,7 @@ class StorageDriver(object):
|
|||
raise exceptions.NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def process_measures(indexer=None, block_size=None, sync=False):
|
||||
def process_new_measures(indexer, metrics, sync=False):
|
||||
"""Process added measures in background.
|
||||
|
||||
Some drivers might need to have a background task running that process
|
||||
|
@ -264,6 +266,7 @@ class StorageDriver(object):
|
|||
@staticmethod
|
||||
def get_cross_metric_measures(metrics, from_timestamp=None,
|
||||
to_timestamp=None, aggregation='mean',
|
||||
reaggregation=None,
|
||||
granularity=None,
|
||||
needed_overlap=None):
|
||||
"""Get aggregated measures of multiple entities.
|
||||
|
@ -273,6 +276,8 @@ class StorageDriver(object):
|
|||
:param to timestamp: The timestamp to get the measure to.
|
||||
:param granularity: The granularity to retrieve.
|
||||
:param aggregation: The type of aggregation to retrieve.
|
||||
:param reaggregation: The type of aggregation to compute
|
||||
on the retrieved measures.
|
||||
"""
|
||||
for metric in metrics:
|
||||
if aggregation not in metric.archive_policy.aggregation_methods:
|
||||
|
|
|
@ -16,65 +16,73 @@
|
|||
# under the License.
|
||||
import collections
|
||||
import datetime
|
||||
import logging
|
||||
import multiprocessing
|
||||
import threading
|
||||
import time
|
||||
import itertools
|
||||
import operator
|
||||
import struct
|
||||
import uuid
|
||||
|
||||
from concurrent import futures
|
||||
import iso8601
|
||||
import msgpack
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import msgpackutils
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
import pandas
|
||||
import six
|
||||
import six.moves
|
||||
from tooz import coordination
|
||||
|
||||
from gnocchi import carbonara
|
||||
from gnocchi import storage
|
||||
from gnocchi import utils
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.IntOpt('aggregation_workers_number',
|
||||
default=1, min=1,
|
||||
help='Number of workers to run during adding new measures for '
|
||||
'pre-aggregation needs.'),
|
||||
'pre-aggregation needs. Due to the Python GIL, '
|
||||
'1 is usually faster, unless you have high latency I/O'),
|
||||
cfg.StrOpt('coordination_url',
|
||||
secret=True,
|
||||
help='Coordination driver URL'),
|
||||
|
||||
]
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class CorruptionError(ValueError):
|
||||
"""Data corrupted, damn it."""
|
||||
|
||||
def __init__(self, message):
|
||||
super(CorruptionError, self).__init__(message)
|
||||
|
||||
|
||||
class CarbonaraBasedStorage(storage.StorageDriver):
|
||||
MEASURE_PREFIX = "measure"
|
||||
UPGRADE_BATCH_SIZE = 1000
|
||||
|
||||
def __init__(self, conf):
|
||||
super(CarbonaraBasedStorage, self).__init__(conf)
|
||||
self.coord = coordination.get_coordinator(
|
||||
conf.coordination_url,
|
||||
str(uuid.uuid4()).encode('ascii'))
|
||||
self.coord.start()
|
||||
if conf.aggregation_workers_number is None:
|
||||
try:
|
||||
self.aggregation_workers_number = multiprocessing.cpu_count()
|
||||
except NotImplementedError:
|
||||
self.aggregation_workers_number = 2
|
||||
self.aggregation_workers_number = conf.aggregation_workers_number
|
||||
if self.aggregation_workers_number == 1:
|
||||
# NOTE(jd) Avoid using futures at all if we don't want any threads.
|
||||
self._map_in_thread = self._map_no_thread
|
||||
else:
|
||||
self.aggregation_workers_number = conf.aggregation_workers_number
|
||||
self.partition = 0
|
||||
self.heartbeater = threading.Thread(target=self._heartbeat,
|
||||
name='heartbeat')
|
||||
self.heartbeater.setDaemon(True)
|
||||
self.heartbeater.start()
|
||||
self._map_in_thread = self._map_in_futures_threads
|
||||
self.start()
|
||||
|
||||
def _heartbeat(self):
|
||||
while True:
|
||||
# FIXME(jd) Why 10? Why not. We should have a way to find out
|
||||
# what's the best value here, but it depends on the timeout used by
|
||||
# the driver; tooz should help us here!
|
||||
time.sleep(10)
|
||||
self.coord.heartbeat()
|
||||
@utils.retry
|
||||
def start(self):
|
||||
try:
|
||||
self.coord.start(start_heart=True)
|
||||
except Exception as e:
|
||||
LOG.error("Unable to start coordinator: %s" % e)
|
||||
raise utils.Retry(e)
|
||||
|
||||
def stop(self):
|
||||
self.coord.stop()
|
||||
|
@ -84,25 +92,67 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
return self.coord.get_lock(lock_name)
|
||||
|
||||
@staticmethod
|
||||
def _get_measures(metric, timestamp_key, aggregation, granularity):
|
||||
def _get_measures(metric, timestamp_key, aggregation, granularity,
|
||||
version=3):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _get_unaggregated_timeserie(metric):
|
||||
def _get_unaggregated_timeserie(metric, version=3):
|
||||
raise NotImplementedError
|
||||
|
||||
def _get_unaggregated_timeserie_and_unserialize(
|
||||
self, metric, block_size, back_window):
|
||||
"""Retrieve unaggregated timeserie for a metric and unserialize it.
|
||||
|
||||
Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot
|
||||
be retrieved, returns None.
|
||||
|
||||
"""
|
||||
with timeutils.StopWatch() as sw:
|
||||
raw_measures = (
|
||||
self._get_unaggregated_timeserie(
|
||||
metric)
|
||||
)
|
||||
LOG.debug(
|
||||
"Retrieve unaggregated measures "
|
||||
"for %s in %.2fs"
|
||||
% (metric.id, sw.elapsed()))
|
||||
try:
|
||||
return carbonara.BoundTimeSerie.unserialize(
|
||||
raw_measures, block_size, back_window)
|
||||
except ValueError:
|
||||
raise CorruptionError(
|
||||
"Data corruption detected for %s "
|
||||
"unaggregated timeserie" % metric.id)
|
||||
|
||||
@staticmethod
|
||||
def _store_unaggregated_timeserie(metric, data, version=3):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _store_unaggregated_timeserie(metric, data):
|
||||
def _store_metric_measures(metric, timestamp_key, aggregation,
|
||||
granularity, data, offset=None, version=3):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _store_metric_measures(metric, aggregation, granularity, data):
|
||||
def _delete_unaggregated_timeserie(metric, version=3):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _list_split_keys_for_metric(metric, aggregation, granularity):
|
||||
def _list_split_keys_for_metric(metric, aggregation, granularity,
|
||||
version=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _version_check(name, v):
|
||||
"""Validate object matches expected version.
|
||||
|
||||
Version should be last attribute and start with 'v'
|
||||
"""
|
||||
attrs = name.split("_")
|
||||
return not v or (not attrs[-1].startswith('v') if v == 2
|
||||
else attrs[-1] == 'v%s' % v)
|
||||
|
||||
def get_measures(self, metric, from_timestamp=None, to_timestamp=None,
|
||||
aggregation='mean', granularity=None):
|
||||
super(CarbonaraBasedStorage, self).get_measures(
|
||||
|
@ -125,12 +175,13 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
aggregation, granularity):
|
||||
data = self._get_measures(metric, key, aggregation, granularity)
|
||||
try:
|
||||
return carbonara.TimeSerie.unserialize(data)
|
||||
return carbonara.AggregatedTimeSerie.unserialize(
|
||||
data, key, aggregation, granularity)
|
||||
except ValueError:
|
||||
LOG.error("Data corruption detected for %s "
|
||||
"aggregated `%s' timeserie, granularity `%s' "
|
||||
"around time `%s', ignoring."
|
||||
% (metric.id, aggregation, granularity, key))
|
||||
LOG.error("Data corruption detected for %s "
|
||||
"aggregated `%s' timeserie, granularity `%s' "
|
||||
"around time `%s', ignoring."
|
||||
% (metric.id, aggregation, granularity, key))
|
||||
|
||||
def _get_measures_timeserie(self, metric,
|
||||
aggregation, granularity,
|
||||
|
@ -149,39 +200,23 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
all_keys = self._list_split_keys_for_metric(
|
||||
metric, aggregation, granularity)
|
||||
except storage.MetricDoesNotExist:
|
||||
# This can happen if it's an old metric with a TimeSerieArchive
|
||||
all_keys = None
|
||||
|
||||
if not all_keys:
|
||||
# It does not mean we have no data: it can be an old metric with a
|
||||
# TimeSerieArchive.
|
||||
try:
|
||||
data = self._get_metric_archive(metric, aggregation)
|
||||
except (storage.MetricDoesNotExist,
|
||||
storage.AggregationDoesNotExist):
|
||||
# It really does not exist
|
||||
for d in metric.archive_policy.definition:
|
||||
if d.granularity == granularity:
|
||||
return carbonara.AggregatedTimeSerie(
|
||||
aggregation_method=aggregation,
|
||||
sampling=granularity,
|
||||
max_size=d.points)
|
||||
raise storage.GranularityDoesNotExist(metric, granularity)
|
||||
else:
|
||||
archive = carbonara.TimeSerieArchive.unserialize(data)
|
||||
# It's an old metric with an TimeSerieArchive!
|
||||
for ts in archive.agg_timeseries:
|
||||
if ts.sampling == granularity:
|
||||
return ts
|
||||
raise storage.GranularityDoesNotExist(metric, granularity)
|
||||
for d in metric.archive_policy.definition:
|
||||
if d.granularity == granularity:
|
||||
return carbonara.AggregatedTimeSerie(
|
||||
sampling=granularity,
|
||||
aggregation_method=aggregation,
|
||||
max_size=d.points)
|
||||
raise storage.GranularityDoesNotExist(metric, granularity)
|
||||
|
||||
if from_timestamp:
|
||||
from_timestamp = carbonara.AggregatedTimeSerie.get_split_key(
|
||||
from_timestamp, granularity)
|
||||
from_timestamp = str(
|
||||
carbonara.SplitKey.from_timestamp_and_sampling(
|
||||
from_timestamp, granularity))
|
||||
|
||||
if to_timestamp:
|
||||
to_timestamp = carbonara.AggregatedTimeSerie.get_split_key(
|
||||
to_timestamp, granularity)
|
||||
to_timestamp = str(
|
||||
carbonara.SplitKey.from_timestamp_and_sampling(
|
||||
to_timestamp, granularity))
|
||||
|
||||
timeseries = filter(
|
||||
lambda x: x is not None,
|
||||
|
@ -194,48 +229,121 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
)
|
||||
|
||||
return carbonara.AggregatedTimeSerie.from_timeseries(
|
||||
timeseries,
|
||||
aggregation_method=aggregation,
|
||||
sampling=granularity,
|
||||
aggregation_method=aggregation,
|
||||
timeseries=timeseries,
|
||||
max_size=points)
|
||||
|
||||
def _add_measures(self, aggregation, archive_policy_def,
|
||||
metric, timeserie):
|
||||
with timeutils.StopWatch() as sw:
|
||||
ts = self._get_measures_timeserie(metric, aggregation,
|
||||
archive_policy_def.granularity,
|
||||
timeserie.first, timeserie.last)
|
||||
LOG.debug("Retrieve measures"
|
||||
"for %s/%s/%s in %.2fs"
|
||||
% (metric.id, aggregation, archive_policy_def.
|
||||
granularity, sw.elapsed()))
|
||||
ts.update(timeserie)
|
||||
with timeutils.StopWatch() as sw:
|
||||
for key, split in ts.split():
|
||||
self._store_metric_measures(metric, key, aggregation,
|
||||
archive_policy_def.granularity,
|
||||
split.serialize())
|
||||
LOG.debug("Store measures for %s/%s/%s in %.2fs"
|
||||
% (metric.id, aggregation,
|
||||
archive_policy_def.granularity, sw.elapsed()))
|
||||
def _store_timeserie_split(self, metric, key, split,
|
||||
aggregation, archive_policy_def,
|
||||
oldest_mutable_timestamp):
|
||||
# NOTE(jd) We write the full split only if the driver works that way
|
||||
# (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range.
|
||||
write_full = self.WRITE_FULL or oldest_mutable_timestamp >= next(key)
|
||||
key_as_str = str(key)
|
||||
if write_full:
|
||||
try:
|
||||
existing = self._get_measures_and_unserialize(
|
||||
metric, key_as_str, aggregation,
|
||||
archive_policy_def.granularity)
|
||||
except storage.AggregationDoesNotExist:
|
||||
pass
|
||||
else:
|
||||
if existing is not None:
|
||||
if split is None:
|
||||
split = existing
|
||||
else:
|
||||
split.merge(existing)
|
||||
|
||||
if ts.last and archive_policy_def.timespan:
|
||||
with timeutils.StopWatch() as sw:
|
||||
oldest_point_to_keep = ts.last - datetime.timedelta(
|
||||
seconds=archive_policy_def.timespan)
|
||||
self._delete_metric_measures_before(
|
||||
metric, aggregation, archive_policy_def.granularity,
|
||||
oldest_point_to_keep)
|
||||
LOG.debug("Expire measures for %s/%s/%s in %.2fs"
|
||||
% (metric.id, aggregation,
|
||||
archive_policy_def.granularity, sw.elapsed()))
|
||||
offset, data = split.serialize(key, compressed=write_full)
|
||||
|
||||
return self._store_metric_measures(
|
||||
metric, key_as_str, aggregation, archive_policy_def.granularity,
|
||||
data, offset=offset)
|
||||
|
||||
def _add_measures(self, aggregation, archive_policy_def,
|
||||
metric, grouped_serie,
|
||||
previous_oldest_mutable_timestamp,
|
||||
oldest_mutable_timestamp):
|
||||
ts = carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped_serie, archive_policy_def.granularity,
|
||||
aggregation, max_size=archive_policy_def.points)
|
||||
|
||||
# Don't do anything if the timeserie is empty
|
||||
if not ts:
|
||||
return
|
||||
|
||||
# We only need to check for rewrite if driver is not in WRITE_FULL mode
|
||||
# and if we already stored splits once
|
||||
need_rewrite = (
|
||||
not self.WRITE_FULL
|
||||
and previous_oldest_mutable_timestamp is not None
|
||||
)
|
||||
|
||||
if archive_policy_def.timespan or need_rewrite:
|
||||
existing_keys = self._list_split_keys_for_metric(
|
||||
metric, aggregation, archive_policy_def.granularity)
|
||||
|
||||
# First delete old splits
|
||||
if archive_policy_def.timespan:
|
||||
oldest_point_to_keep = ts.last - datetime.timedelta(
|
||||
seconds=archive_policy_def.timespan)
|
||||
oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep)
|
||||
oldest_key_to_keep_s = str(oldest_key_to_keep)
|
||||
for key in list(existing_keys):
|
||||
# NOTE(jd) Only delete if the key is strictly inferior to
|
||||
# the timestamp; we don't delete any timeserie split that
|
||||
# contains our timestamp, so we prefer to keep a bit more
|
||||
# than deleting too much
|
||||
if key < oldest_key_to_keep_s:
|
||||
self._delete_metric_measures(
|
||||
metric, key, aggregation,
|
||||
archive_policy_def.granularity)
|
||||
existing_keys.remove(key)
|
||||
else:
|
||||
oldest_key_to_keep = carbonara.SplitKey(0)
|
||||
|
||||
# Rewrite all read-only splits just for fun (and compression). This
|
||||
# only happens if `previous_oldest_mutable_timestamp' exists, which
|
||||
# means we already wrote some splits at some point – so this is not the
|
||||
# first time we treat this timeserie.
|
||||
if need_rewrite:
|
||||
previous_oldest_mutable_key = str(ts.get_split_key(
|
||||
previous_oldest_mutable_timestamp))
|
||||
oldest_mutable_key = str(ts.get_split_key(
|
||||
oldest_mutable_timestamp))
|
||||
|
||||
if previous_oldest_mutable_key != oldest_mutable_key:
|
||||
for key in existing_keys:
|
||||
if previous_oldest_mutable_key <= key < oldest_mutable_key:
|
||||
# NOTE(jd) Rewrite it entirely for fun (and later for
|
||||
# compression). For that, we just pass None as split.
|
||||
self._store_timeserie_split(
|
||||
metric, carbonara.SplitKey.from_key_string(
|
||||
key, archive_policy_def.granularity),
|
||||
None, aggregation, archive_policy_def,
|
||||
oldest_mutable_timestamp)
|
||||
|
||||
for key, split in ts.split():
|
||||
if key >= oldest_key_to_keep:
|
||||
self._store_timeserie_split(
|
||||
metric, key, split, aggregation, archive_policy_def,
|
||||
oldest_mutable_timestamp)
|
||||
|
||||
def add_measures(self, metric, measures):
|
||||
self._store_measures(metric, msgpackutils.dumps(
|
||||
list(map(tuple, measures))))
|
||||
measures = list(measures)
|
||||
data = struct.pack(
|
||||
"<" + self._MEASURE_SERIAL_FORMAT * len(measures),
|
||||
*list(
|
||||
itertools.chain(
|
||||
# NOTE(jd) int(10e8) to avoid rounding errors
|
||||
*((int(utils.datetime_to_unix(timestamp) * int(10e8)),
|
||||
value)
|
||||
for timestamp, value in measures))))
|
||||
self._store_new_measures(metric, data)
|
||||
|
||||
@staticmethod
|
||||
def _store_measures(metric, data):
|
||||
def _store_new_measures(metric, data):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
|
@ -243,7 +351,7 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _list_metric_with_measures_to_process(full=False):
|
||||
def list_metric_with_measures_to_process(size, part, full=False):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
|
@ -254,32 +362,23 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
with self._lock(metric.id)(blocking=sync):
|
||||
# If the metric has never been upgraded, we need to delete this
|
||||
# here too
|
||||
self._delete_metric_archives(metric)
|
||||
self._delete_metric(metric)
|
||||
|
||||
def _delete_metric_measures_before(self, metric, aggregation_method,
|
||||
granularity, timestamp):
|
||||
"""Delete measures for a metric before a timestamp."""
|
||||
ts = carbonara.AggregatedTimeSerie.get_split_key(
|
||||
timestamp, granularity)
|
||||
for key in self._list_split_keys_for_metric(
|
||||
metric, aggregation_method, granularity):
|
||||
# NOTE(jd) Only delete if the key is strictly inferior to
|
||||
# the timestamp; we don't delete any timeserie split that
|
||||
# contains our timestamp, so we prefer to keep a bit more
|
||||
# than deleting too much
|
||||
if key < ts:
|
||||
self._delete_metric_measures(
|
||||
metric, key, aggregation_method, granularity)
|
||||
|
||||
@staticmethod
|
||||
def _delete_metric_measures(metric, timestamp_key,
|
||||
aggregation, granularity):
|
||||
aggregation, granularity, version=3):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _unserialize_measures(data):
|
||||
return msgpackutils.loads(data)
|
||||
_MEASURE_SERIAL_FORMAT = "Qd"
|
||||
_MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT)
|
||||
|
||||
def _unserialize_measures(self, data):
|
||||
nb_measures = len(data) // self._MEASURE_SERIAL_LEN
|
||||
measures = struct.unpack(
|
||||
"<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data)
|
||||
return six.moves.zip(
|
||||
pandas.to_datetime(measures[::2], unit='ns'),
|
||||
itertools.islice(measures, 1, len(measures), 2))
|
||||
|
||||
def measures_report(self, details=True):
|
||||
metrics, measures, full_details = self._build_report(details)
|
||||
|
@ -291,42 +390,78 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
def _check_for_metric_upgrade(self, metric):
|
||||
lock = self._lock(metric.id)
|
||||
with lock:
|
||||
for agg_method in metric.archive_policy.aggregation_methods:
|
||||
try:
|
||||
old_unaggregated = self._get_unaggregated_timeserie_and_unserialize_v2( # noqa
|
||||
metric)
|
||||
except (storage.MetricDoesNotExist, CorruptionError) as e:
|
||||
# NOTE(jd) This case is not really possible – you can't
|
||||
# have archives with splits and no unaggregated
|
||||
# timeserie…
|
||||
LOG.error(
|
||||
"Unable to find unaggregated timeserie for "
|
||||
"metric %s, unable to upgrade data: %s",
|
||||
metric.id, e)
|
||||
return
|
||||
unaggregated = carbonara.BoundTimeSerie(
|
||||
ts=old_unaggregated.ts,
|
||||
block_size=metric.archive_policy.max_block_size,
|
||||
back_window=metric.archive_policy.back_window)
|
||||
# Upgrade unaggregated timeserie to v3
|
||||
self._store_unaggregated_timeserie(
|
||||
metric, unaggregated.serialize())
|
||||
oldest_mutable_timestamp = (
|
||||
unaggregated.first_block_timestamp()
|
||||
)
|
||||
for agg_method, d in itertools.product(
|
||||
metric.archive_policy.aggregation_methods,
|
||||
metric.archive_policy.definition):
|
||||
LOG.debug(
|
||||
"Checking if the metric %s needs migration for %s"
|
||||
% (metric, agg_method))
|
||||
|
||||
try:
|
||||
data = self._get_metric_archive(metric, agg_method)
|
||||
all_keys = self._list_split_keys_for_metric(
|
||||
metric, agg_method, d.granularity, version=2)
|
||||
except storage.MetricDoesNotExist:
|
||||
# Just try the next metric, this one has no measures
|
||||
break
|
||||
except storage.AggregationDoesNotExist:
|
||||
# This should not happen, but you never know.
|
||||
LOG.warn(
|
||||
"Metric %s does not have an archive "
|
||||
"for aggregation %s, "
|
||||
"no migration can be done" % (metric, agg_method))
|
||||
else:
|
||||
LOG.info("Migrating metric %s to new format" % metric)
|
||||
archive = carbonara.TimeSerieArchive.unserialize(data)
|
||||
for ts in archive.agg_timeseries:
|
||||
# Store each AggregatedTimeSerie independently
|
||||
for key, split in ts.split():
|
||||
self._store_metric_measures(metric, key,
|
||||
ts.aggregation_method,
|
||||
ts.sampling,
|
||||
split.serialize())
|
||||
self._delete_metric_archives(metric)
|
||||
timeseries = filter(
|
||||
lambda x: x is not None,
|
||||
self._map_in_thread(
|
||||
self._get_measures_and_unserialize_v2,
|
||||
((metric, key, agg_method, d.granularity)
|
||||
for key in all_keys))
|
||||
)
|
||||
ts = carbonara.AggregatedTimeSerie.from_timeseries(
|
||||
sampling=d.granularity,
|
||||
aggregation_method=agg_method,
|
||||
timeseries=timeseries, max_size=d.points)
|
||||
for key, split in ts.split():
|
||||
self._store_timeserie_split(
|
||||
metric, key, split,
|
||||
ts.aggregation_method,
|
||||
d, oldest_mutable_timestamp)
|
||||
for key in all_keys:
|
||||
self._delete_metric_measures(
|
||||
metric, key, agg_method,
|
||||
d.granularity, version=None)
|
||||
self._delete_unaggregated_timeserie(metric, version=None)
|
||||
LOG.info("Migrated metric %s to new format" % metric)
|
||||
|
||||
def upgrade(self, index):
|
||||
self._map_in_thread(
|
||||
self._check_for_metric_upgrade,
|
||||
((metric,) for metric in index.list_metrics()))
|
||||
marker = None
|
||||
while True:
|
||||
metrics = index.list_metrics(limit=self.UPGRADE_BATCH_SIZE,
|
||||
marker=marker)
|
||||
for m in metrics:
|
||||
self._check_for_metric_upgrade(m)
|
||||
if len(metrics) == 0:
|
||||
break
|
||||
marker = metrics[-1].id
|
||||
|
||||
def process_measures(self, indexer, block_size, sync=False):
|
||||
metrics_to_process = self._list_metric_with_measures_to_process(
|
||||
block_size, full=sync)
|
||||
def process_new_measures(self, indexer, metrics_to_process, sync=False):
|
||||
metrics = indexer.list_metrics(ids=metrics_to_process)
|
||||
# This build the list of deleted metrics, i.e. the metrics we have
|
||||
# measures to process for but that are not in the indexer anymore.
|
||||
|
@ -336,8 +471,12 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
# NOTE(jd): We need to lock the metric otherwise we might delete
|
||||
# measures that another worker might be processing. Deleting
|
||||
# measurement files under its feet is not nice!
|
||||
with self._lock(metric_id)(blocking=sync):
|
||||
self._delete_unprocessed_measures_for_metric_id(metric_id)
|
||||
try:
|
||||
with self._lock(metric_id)(blocking=sync):
|
||||
self._delete_unprocessed_measures_for_metric_id(metric_id)
|
||||
except coordination.LockAcquireFailed:
|
||||
LOG.debug("Cannot acquire lock for metric %s, postponing "
|
||||
"unprocessed measures deletion" % metric_id)
|
||||
for metric in metrics:
|
||||
lock = self._lock(metric.id)
|
||||
agg_methods = list(metric.archive_policy.aggregation_methods)
|
||||
|
@ -346,6 +485,7 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
# get back later to it if needed.
|
||||
if lock.acquire(blocking=sync):
|
||||
try:
|
||||
locksw = timeutils.StopWatch().start()
|
||||
LOG.debug("Processing measures for %s" % metric)
|
||||
with self._process_measure_for_metric(metric) as measures:
|
||||
# NOTE(mnaser): The metric could have been handled by
|
||||
|
@ -355,16 +495,14 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
% metric)
|
||||
continue
|
||||
|
||||
measures = sorted(measures, key=operator.itemgetter(0))
|
||||
|
||||
block_size = metric.archive_policy.max_block_size
|
||||
try:
|
||||
with timeutils.StopWatch() as sw:
|
||||
raw_measures = (
|
||||
self._get_unaggregated_timeserie(
|
||||
metric)
|
||||
)
|
||||
LOG.debug(
|
||||
"Retrieve unaggregated measures "
|
||||
"for %s in %.2fs"
|
||||
% (metric.id, sw.elapsed()))
|
||||
ts = self._get_unaggregated_timeserie_and_unserialize( # noqa
|
||||
metric,
|
||||
block_size=block_size,
|
||||
back_window=metric.archive_policy.back_window)
|
||||
except storage.MetricDoesNotExist:
|
||||
try:
|
||||
self._create_metric(metric)
|
||||
|
@ -372,46 +510,78 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
# Created in the mean time, do not worry
|
||||
pass
|
||||
ts = None
|
||||
else:
|
||||
try:
|
||||
ts = carbonara.BoundTimeSerie.unserialize(
|
||||
raw_measures)
|
||||
except ValueError:
|
||||
ts = None
|
||||
LOG.error(
|
||||
"Data corruption detected for %s "
|
||||
"unaggregated timeserie, "
|
||||
"recreating an empty one."
|
||||
% metric.id)
|
||||
except CorruptionError as e:
|
||||
LOG.error(e)
|
||||
ts = None
|
||||
|
||||
if ts is None:
|
||||
# This is the first time we treat measures for this
|
||||
# metric, or data are corrupted, create a new one
|
||||
mbs = metric.archive_policy.max_block_size
|
||||
ts = carbonara.BoundTimeSerie(
|
||||
block_size=mbs,
|
||||
block_size=block_size,
|
||||
back_window=metric.archive_policy.back_window)
|
||||
current_first_block_timestamp = None
|
||||
else:
|
||||
current_first_block_timestamp = (
|
||||
ts.first_block_timestamp()
|
||||
)
|
||||
|
||||
# NOTE(jd) This is Python where you need such
|
||||
# hack to pass a variable around a closure,
|
||||
# sorry.
|
||||
computed_points = {"number": 0}
|
||||
|
||||
def _map_add_measures(bound_timeserie):
|
||||
self._map_in_thread(
|
||||
self._add_measures,
|
||||
((aggregation, d, metric, bound_timeserie)
|
||||
for aggregation in agg_methods
|
||||
for d in metric.archive_policy.definition))
|
||||
# NOTE (gordc): bound_timeserie is entire set of
|
||||
# unaggregated measures matching largest
|
||||
# granularity. the following takes only the points
|
||||
# affected by new measures for specific granularity
|
||||
tstamp = max(bound_timeserie.first, measures[0][0])
|
||||
computed_points['number'] = len(bound_timeserie)
|
||||
for d in metric.archive_policy.definition:
|
||||
ts = bound_timeserie.group_serie(
|
||||
d.granularity, carbonara.round_timestamp(
|
||||
tstamp, d.granularity * 10e8))
|
||||
self._map_in_thread(
|
||||
self._add_measures,
|
||||
((aggregation, d, metric, ts,
|
||||
current_first_block_timestamp,
|
||||
bound_timeserie.first_block_timestamp())
|
||||
for aggregation in agg_methods))
|
||||
|
||||
with timeutils.StopWatch() as sw:
|
||||
ts.set_values(
|
||||
measures,
|
||||
before_truncate_callback=_map_add_measures,
|
||||
ignore_too_old_timestamps=True)
|
||||
elapsed = sw.elapsed()
|
||||
number_of_operations = (
|
||||
len(agg_methods)
|
||||
* len(metric.archive_policy.definition)
|
||||
)
|
||||
|
||||
if elapsed > 0:
|
||||
perf = " (%d points/s, %d measures/s)" % (
|
||||
((number_of_operations
|
||||
* computed_points['number']) / elapsed),
|
||||
((number_of_operations
|
||||
* len(measures)) / elapsed)
|
||||
)
|
||||
else:
|
||||
perf = ""
|
||||
LOG.debug(
|
||||
"Computed new metric %s with %d new measures "
|
||||
"in %.2f seconds"
|
||||
% (metric.id, len(measures), sw.elapsed()))
|
||||
"in %.2f seconds%s"
|
||||
% (metric.id, len(measures), elapsed, perf))
|
||||
|
||||
self._store_unaggregated_timeserie(metric,
|
||||
ts.serialize())
|
||||
|
||||
LOG.debug("Metric %s locked during %.2f seconds" %
|
||||
(metric.id, locksw.elapsed()))
|
||||
except Exception:
|
||||
LOG.debug("Metric %s locked during %.2f seconds" %
|
||||
(metric.id, locksw.elapsed()))
|
||||
if sync:
|
||||
raise
|
||||
LOG.error("Error processing new measures", exc_info=True)
|
||||
|
@ -420,11 +590,15 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
|
||||
def get_cross_metric_measures(self, metrics, from_timestamp=None,
|
||||
to_timestamp=None, aggregation='mean',
|
||||
reaggregation=None,
|
||||
granularity=None,
|
||||
needed_overlap=100.0):
|
||||
super(CarbonaraBasedStorage, self).get_cross_metric_measures(
|
||||
metrics, from_timestamp, to_timestamp,
|
||||
aggregation, granularity, needed_overlap)
|
||||
aggregation, reaggregation, granularity, needed_overlap)
|
||||
|
||||
if reaggregation is None:
|
||||
reaggregation = aggregation
|
||||
|
||||
if granularity is None:
|
||||
granularities = (
|
||||
|
@ -434,9 +608,9 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
)
|
||||
granularities_in_common = [
|
||||
g
|
||||
for g, occurence in six.iteritems(
|
||||
for g, occurrence in six.iteritems(
|
||||
collections.Counter(granularities))
|
||||
if occurence == len(metrics)
|
||||
if occurrence == len(metrics)
|
||||
]
|
||||
|
||||
if not granularities_in_common:
|
||||
|
@ -454,8 +628,8 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v)
|
||||
for timestamp, r, v
|
||||
in carbonara.AggregatedTimeSerie.aggregated(
|
||||
tss, from_timestamp, to_timestamp,
|
||||
aggregation, needed_overlap)]
|
||||
tss, reaggregation, from_timestamp, to_timestamp,
|
||||
needed_overlap)]
|
||||
except carbonara.UnAggregableTimeseries as e:
|
||||
raise storage.MetricUnaggregatable(metrics, e.reason)
|
||||
|
||||
|
@ -495,9 +669,40 @@ class CarbonaraBasedStorage(storage.StorageDriver):
|
|||
|
||||
return result
|
||||
|
||||
def _map_in_thread(self, method, list_of_args):
|
||||
@staticmethod
|
||||
def _map_no_thread(method, list_of_args):
|
||||
return list(itertools.starmap(method, list_of_args))
|
||||
|
||||
def _map_in_futures_threads(self, method, list_of_args):
|
||||
with futures.ThreadPoolExecutor(
|
||||
max_workers=self.aggregation_workers_number) as executor:
|
||||
# We use 'list' to iterate all threads here to raise the first
|
||||
# exception now, not much choice
|
||||
return list(executor.map(lambda args: method(*args), list_of_args))
|
||||
|
||||
@staticmethod
|
||||
def _unserialize_timeserie_v2(data):
|
||||
return carbonara.TimeSerie.from_data(
|
||||
*carbonara.TimeSerie._timestamps_and_values_from_dict(
|
||||
msgpack.loads(data, encoding='utf-8')['values']))
|
||||
|
||||
def _get_unaggregated_timeserie_and_unserialize_v2(self, metric):
|
||||
"""Unserialization method for unaggregated v2 timeseries."""
|
||||
data = self._get_unaggregated_timeserie(metric, version=None)
|
||||
try:
|
||||
return self._unserialize_timeserie_v2(data)
|
||||
except ValueError:
|
||||
LOG.error("Data corruption detected for %s ignoring." % metric.id)
|
||||
|
||||
def _get_measures_and_unserialize_v2(self, metric, key,
|
||||
aggregation, granularity):
|
||||
"""Unserialization method for upgrading v2 objects. Upgrade only."""
|
||||
data = self._get_measures(
|
||||
metric, key, aggregation, granularity, version=None)
|
||||
try:
|
||||
return self._unserialize_timeserie_v2(data)
|
||||
except ValueError:
|
||||
LOG.error("Data corruption detected for %s "
|
||||
"aggregated `%s' timeserie, granularity `%s' "
|
||||
"around time `%s', ignoring."
|
||||
% (metric.id, aggregation, granularity, key))
|
||||
|
|
|
@ -16,18 +16,19 @@
|
|||
from collections import defaultdict
|
||||
import contextlib
|
||||
import datetime
|
||||
import errno
|
||||
import itertools
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
|
||||
from gnocchi import storage
|
||||
from gnocchi.storage import _carbonara
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
for RADOS_MODULE_NAME in ('cradox', 'rados'):
|
||||
rados = importutils.try_import(RADOS_MODULE_NAME)
|
||||
|
@ -47,8 +48,8 @@ OPTS = [
|
|||
help='Ceph pool name to use.'),
|
||||
cfg.StrOpt('ceph_username',
|
||||
help='Ceph username (ie: admin without "client." prefix).'),
|
||||
cfg.StrOpt('ceph_keyring',
|
||||
help='Ceph keyring path.'),
|
||||
cfg.StrOpt('ceph_secret', help='Ceph key', secret=True),
|
||||
cfg.StrOpt('ceph_keyring', help='Ceph keyring path.'),
|
||||
cfg.StrOpt('ceph_conffile',
|
||||
default='/etc/ceph/ceph.conf',
|
||||
help='Ceph configuration file.'),
|
||||
|
@ -57,16 +58,25 @@ OPTS = [
|
|||
|
||||
class CephStorage(_carbonara.CarbonaraBasedStorage):
|
||||
|
||||
WRITE_FULL = False
|
||||
|
||||
def __init__(self, conf):
|
||||
super(CephStorage, self).__init__(conf)
|
||||
self.pool = conf.ceph_pool
|
||||
options = {}
|
||||
if conf.ceph_keyring:
|
||||
options['keyring'] = conf.ceph_keyring
|
||||
if conf.ceph_secret:
|
||||
options['key'] = conf.ceph_secret
|
||||
|
||||
if not rados:
|
||||
raise ImportError("No module named 'rados' nor 'cradox'")
|
||||
|
||||
if not hasattr(rados, 'OmapIterator'):
|
||||
raise ImportError("Your rados python module does not support "
|
||||
"omap feature. Install 'cradox' (recommended) "
|
||||
"or upgrade 'python-rados' >= 9.1.0 ")
|
||||
|
||||
LOG.info("Ceph storage backend use '%s' python library" %
|
||||
RADOS_MODULE_NAME)
|
||||
|
||||
|
@ -78,32 +88,78 @@ class CephStorage(_carbonara.CarbonaraBasedStorage):
|
|||
rados_id=conf.ceph_username,
|
||||
conf=options)
|
||||
self.rados.connect()
|
||||
self.ioctx = self.rados.open_ioctx(self.pool)
|
||||
|
||||
def _store_measures(self, metric, data):
|
||||
# NOTE(sileht): constants can't be class attributes because
|
||||
# they rely on presence of rados module
|
||||
|
||||
# NOTE(sileht): We allow to read the measure object on
|
||||
# outdated replicats, that safe for us, we will
|
||||
# get the new stuffs on next metricd pass.
|
||||
self.OMAP_READ_FLAGS = (rados.LIBRADOS_OPERATION_BALANCE_READS |
|
||||
rados.LIBRADOS_OPERATION_SKIPRWLOCKS)
|
||||
|
||||
# NOTE(sileht): That should be safe to manipulate the omap keys
|
||||
# with any OSDs at the same times, each osd should replicate the
|
||||
# new key to others and same thing for deletion.
|
||||
# I wonder how ceph handle rm_omap and set_omap run at same time
|
||||
# on the same key. I assume the operation are timestamped so that will
|
||||
# be same. If not, they are still one acceptable race here, a rm_omap
|
||||
# can finish before all replicats of set_omap are done, but we don't
|
||||
# care, if that occurs next metricd run, will just remove it again, no
|
||||
# object with the measure have already been delected by previous, so
|
||||
# we are safe and good.
|
||||
self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS
|
||||
|
||||
def stop(self):
|
||||
self.ioctx.aio_flush()
|
||||
self.ioctx.close()
|
||||
self.rados.shutdown()
|
||||
super(CephStorage, self).stop()
|
||||
|
||||
def upgrade(self, index):
|
||||
super(CephStorage, self).upgrade(index)
|
||||
|
||||
# Move names stored in xattrs to omap
|
||||
try:
|
||||
xattrs = tuple(k for k, v in
|
||||
self.ioctx.get_xattrs(self.MEASURE_PREFIX))
|
||||
except rados.ObjectNotFound:
|
||||
return
|
||||
with rados.WriteOpCtx() as op:
|
||||
self.ioctx.set_omap(op, xattrs, xattrs)
|
||||
self.ioctx.operate_write_op(op, self.MEASURE_PREFIX,
|
||||
flags=self.OMAP_WRITE_FLAGS)
|
||||
|
||||
for xattr in xattrs:
|
||||
self.ioctx.rm_xattr(self.MEASURE_PREFIX, xattr)
|
||||
|
||||
def _store_new_measures(self, metric, data):
|
||||
# NOTE(sileht): list all objects in a pool is too slow with
|
||||
# many objects (2min for 20000 objects in 50osds cluster),
|
||||
# and enforce us to iterrate over all objects
|
||||
# So we create an object MEASURE_PREFIX, that have as
|
||||
# xattr the list of objects to process
|
||||
# omap the list of objects to process (not xattr because
|
||||
# it doesn't allow to configure the locking behavior)
|
||||
name = "_".join((
|
||||
self.MEASURE_PREFIX,
|
||||
str(metric.id),
|
||||
str(uuid.uuid4()),
|
||||
datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S")))
|
||||
with self._get_ioctx() as ioctx:
|
||||
ioctx.write_full(name, data)
|
||||
ioctx.set_xattr(self.MEASURE_PREFIX, name, "")
|
||||
|
||||
self.ioctx.write_full(name, data)
|
||||
|
||||
with rados.WriteOpCtx() as op:
|
||||
self.ioctx.set_omap(op, (name,), ("",))
|
||||
self.ioctx.operate_write_op(op, self.MEASURE_PREFIX,
|
||||
flags=self.OMAP_WRITE_FLAGS)
|
||||
|
||||
def _build_report(self, details):
|
||||
with self._get_ioctx() as ioctx:
|
||||
try:
|
||||
xattrs = ioctx.get_xattrs(self.MEASURE_PREFIX)
|
||||
except rados.ObjectNotFound:
|
||||
return 0, 0, {} if details else None
|
||||
names = self._list_object_names_to_process()
|
||||
metrics = set()
|
||||
count = 0
|
||||
metric_details = defaultdict(int)
|
||||
for name, __ in xattrs:
|
||||
for name in names:
|
||||
count += 1
|
||||
metric = name.split("_")[1]
|
||||
metrics.add(metric)
|
||||
|
@ -111,194 +167,176 @@ class CephStorage(_carbonara.CarbonaraBasedStorage):
|
|||
metric_details[metric] += 1
|
||||
return len(metrics), count, metric_details if details else None
|
||||
|
||||
def _list_object_names_to_process(self, ioctx, prefix):
|
||||
try:
|
||||
xattrs = ioctx.get_xattrs(self.MEASURE_PREFIX)
|
||||
except rados.ObjectNotFound:
|
||||
return ()
|
||||
return (name for name, __ in xattrs if name.startswith(prefix))
|
||||
def _list_object_names_to_process(self, prefix=""):
|
||||
with rados.ReadOpCtx() as op:
|
||||
omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1)
|
||||
try:
|
||||
self.ioctx.operate_read_op(
|
||||
op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS)
|
||||
except rados.ObjectNotFound:
|
||||
# API have still written nothing
|
||||
return ()
|
||||
# NOTE(sileht): after reading the libradospy, I'm
|
||||
# not sure that ret will have the correct value
|
||||
# get_omap_vals transforms the C int to python int
|
||||
# before operate_read_op is called, I dunno if the int
|
||||
# content is copied during this transformation or if
|
||||
# this is a pointer to the C int, I think it's copied...
|
||||
if ret == errno.ENOENT:
|
||||
return ()
|
||||
return (k for k, v in omaps)
|
||||
|
||||
def _pending_measures_to_process_count(self, metric_id):
|
||||
with self._get_ioctx() as ioctx:
|
||||
object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id)
|
||||
return len(list(self._list_object_names_to_process(ioctx,
|
||||
object_prefix)))
|
||||
object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id)
|
||||
return len(list(self._list_object_names_to_process(object_prefix)))
|
||||
|
||||
def _list_metric_with_measures_to_process(self, block_size, full=False):
|
||||
with self._get_ioctx() as ioctx:
|
||||
try:
|
||||
xattrs = ioctx.get_xattrs(self.MEASURE_PREFIX)
|
||||
except rados.ObjectNotFound:
|
||||
return []
|
||||
metrics = set()
|
||||
def list_metric_with_measures_to_process(self, size, part, full=False):
|
||||
names = self._list_object_names_to_process()
|
||||
if full:
|
||||
objs_it = xattrs
|
||||
objs_it = names
|
||||
else:
|
||||
objs_it = itertools.islice(
|
||||
xattrs, block_size * self.partition, None)
|
||||
for name, __ in objs_it:
|
||||
metrics.add(name.split("_")[1])
|
||||
if full is False and len(metrics) >= block_size:
|
||||
break
|
||||
return metrics
|
||||
objs_it = itertools.islice(names, size * part, size * (part + 1))
|
||||
return set([name.split("_")[1] for name in objs_it])
|
||||
|
||||
def _delete_unprocessed_measures_for_metric_id(self, metric_id):
|
||||
with self._get_ioctx() as ctx:
|
||||
object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id)
|
||||
object_names = self._list_object_names_to_process(ctx,
|
||||
object_prefix)
|
||||
for n in object_names:
|
||||
try:
|
||||
ctx.rm_xattr(self.MEASURE_PREFIX, n)
|
||||
except rados.ObjectNotFound:
|
||||
# Another worker may have removed it, don't worry.
|
||||
pass
|
||||
ctx.aio_remove(n)
|
||||
object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id)
|
||||
object_names = self._list_object_names_to_process(object_prefix)
|
||||
# Now clean objects and xattrs
|
||||
with rados.WriteOpCtx() as op:
|
||||
# NOTE(sileht): come on Ceph, no return code
|
||||
# for this operation ?!!
|
||||
self.ioctx.remove_omap_keys(op, tuple(object_names))
|
||||
self.ioctx.operate_write_op(op, self.MEASURE_PREFIX,
|
||||
flags=self.OMAP_WRITE_FLAGS)
|
||||
|
||||
for n in object_names:
|
||||
self.ioctx.aio_remove(n)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _process_measure_for_metric(self, metric):
|
||||
with self._get_ioctx() as ctx:
|
||||
object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id)
|
||||
object_names = list(self._list_object_names_to_process(
|
||||
ctx, object_prefix))
|
||||
object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id)
|
||||
object_names = list(self._list_object_names_to_process(object_prefix))
|
||||
|
||||
measures = []
|
||||
for n in object_names:
|
||||
data = self._get_object_content(ctx, n)
|
||||
measures.extend(self._unserialize_measures(data))
|
||||
measures = []
|
||||
for n in object_names:
|
||||
data = self._get_object_content(n)
|
||||
measures.extend(self._unserialize_measures(data))
|
||||
|
||||
yield measures
|
||||
yield measures
|
||||
|
||||
# Now clean objects and xattrs
|
||||
for n in object_names:
|
||||
ctx.rm_xattr(self.MEASURE_PREFIX, n)
|
||||
ctx.aio_remove(n)
|
||||
# Now clean objects and xattrs
|
||||
with rados.WriteOpCtx() as op:
|
||||
# NOTE(sileht): come on Ceph, no return code
|
||||
# for this operation ?!!
|
||||
self.ioctx.remove_omap_keys(op, tuple(object_names))
|
||||
self.ioctx.operate_write_op(op, self.MEASURE_PREFIX,
|
||||
flags=self.OMAP_WRITE_FLAGS)
|
||||
|
||||
def _get_ioctx(self):
|
||||
return self.rados.open_ioctx(self.pool)
|
||||
for n in object_names:
|
||||
self.ioctx.aio_remove(n)
|
||||
|
||||
@staticmethod
|
||||
def _get_object_name(metric, timestamp_key, aggregation, granularity):
|
||||
return str("gnocchi_%s_%s_%s_%s" % (
|
||||
def _get_object_name(metric, timestamp_key, aggregation, granularity,
|
||||
version=3):
|
||||
name = str("gnocchi_%s_%s_%s_%s" % (
|
||||
metric.id, timestamp_key, aggregation, granularity))
|
||||
return name + '_v%s' % version if version else name
|
||||
|
||||
@staticmethod
|
||||
def _object_exists(ioctx, name):
|
||||
def _object_exists(self, name):
|
||||
try:
|
||||
ioctx.stat(name)
|
||||
self.ioctx.stat(name)
|
||||
return True
|
||||
except rados.ObjectNotFound:
|
||||
return False
|
||||
|
||||
def _create_metric(self, metric):
|
||||
name = "gnocchi_%s_container" % metric.id
|
||||
with self._get_ioctx() as ioctx:
|
||||
if self._object_exists(ioctx, name):
|
||||
raise storage.MetricAlreadyExists(metric)
|
||||
else:
|
||||
ioctx.write_full(name, "metric created")
|
||||
if self._object_exists(name):
|
||||
raise storage.MetricAlreadyExists(metric)
|
||||
else:
|
||||
self.ioctx.write_full(name, "metric created")
|
||||
|
||||
def _store_metric_measures(self, metric, timestamp_key,
|
||||
aggregation, granularity, data):
|
||||
def _store_metric_measures(self, metric, timestamp_key, aggregation,
|
||||
granularity, data, offset=None, version=3):
|
||||
name = self._get_object_name(metric, timestamp_key,
|
||||
aggregation, granularity)
|
||||
with self._get_ioctx() as ioctx:
|
||||
ioctx.write_full(name, data)
|
||||
ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "")
|
||||
aggregation, granularity, version)
|
||||
if offset is None:
|
||||
self.ioctx.write_full(name, data)
|
||||
else:
|
||||
self.ioctx.write(name, data, offset=offset)
|
||||
self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "")
|
||||
|
||||
def _delete_metric_measures(self, metric, timestamp_key, aggregation,
|
||||
granularity):
|
||||
granularity, version=3):
|
||||
name = self._get_object_name(metric, timestamp_key,
|
||||
aggregation, granularity)
|
||||
with self._get_ioctx() as ioctx:
|
||||
ioctx.rm_xattr("gnocchi_%s_container" % metric.id, name)
|
||||
ioctx.remove_object(name)
|
||||
aggregation, granularity, version)
|
||||
self.ioctx.rm_xattr("gnocchi_%s_container" % metric.id, name)
|
||||
self.ioctx.aio_remove(name)
|
||||
|
||||
def _delete_metric(self, metric):
|
||||
with self._get_ioctx() as ioctx:
|
||||
try:
|
||||
xattrs = ioctx.get_xattrs("gnocchi_%s_container" % metric.id)
|
||||
except rados.ObjectNotFound:
|
||||
pass
|
||||
else:
|
||||
for xattr, _ in xattrs:
|
||||
ioctx.aio_remove(xattr)
|
||||
for name in ('container', 'none'):
|
||||
ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name))
|
||||
|
||||
def _get_measures(self, metric, timestamp_key, aggregation, granularity):
|
||||
try:
|
||||
with self._get_ioctx() as ioctx:
|
||||
name = self._get_object_name(metric, timestamp_key,
|
||||
aggregation, granularity)
|
||||
return self._get_object_content(ioctx, name)
|
||||
xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id)
|
||||
except rados.ObjectNotFound:
|
||||
with self._get_ioctx() as ioctx:
|
||||
if self._object_exists(
|
||||
ioctx, "gnocchi_%s_container" % metric.id):
|
||||
raise storage.AggregationDoesNotExist(metric, aggregation)
|
||||
else:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
pass
|
||||
else:
|
||||
for xattr, _ in xattrs:
|
||||
self.ioctx.aio_remove(xattr)
|
||||
for name in ('container', 'none'):
|
||||
self.ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name))
|
||||
|
||||
def _list_split_keys_for_metric(self, metric, aggregation, granularity):
|
||||
with self._get_ioctx() as ioctx:
|
||||
try:
|
||||
xattrs = ioctx.get_xattrs("gnocchi_%s_container" % metric.id)
|
||||
except rados.ObjectNotFound:
|
||||
def _get_measures(self, metric, timestamp_key, aggregation, granularity,
|
||||
version=3):
|
||||
try:
|
||||
name = self._get_object_name(metric, timestamp_key,
|
||||
aggregation, granularity, version)
|
||||
return self._get_object_content(name)
|
||||
except rados.ObjectNotFound:
|
||||
if self._object_exists("gnocchi_%s_container" % metric.id):
|
||||
raise storage.AggregationDoesNotExist(metric, aggregation)
|
||||
else:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
keys = []
|
||||
for xattr, value in xattrs:
|
||||
_, metric_id, key, agg, g = xattr.split('_', 4)
|
||||
if aggregation == agg and granularity == float(g):
|
||||
keys.append(key)
|
||||
|
||||
def _list_split_keys_for_metric(self, metric, aggregation, granularity,
|
||||
version=None):
|
||||
try:
|
||||
xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id)
|
||||
except rados.ObjectNotFound:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
keys = set()
|
||||
for xattr, value in xattrs:
|
||||
meta = xattr.split('_')
|
||||
if (aggregation == meta[3] and granularity == float(meta[4]) and
|
||||
self._version_check(xattr, version)):
|
||||
keys.add(meta[2])
|
||||
return keys
|
||||
|
||||
def _get_unaggregated_timeserie(self, metric):
|
||||
@staticmethod
|
||||
def _build_unaggregated_timeserie_path(metric, version):
|
||||
return (('gnocchi_%s_none' % metric.id)
|
||||
+ ("_v%s" % version if version else ""))
|
||||
|
||||
def _get_unaggregated_timeserie(self, metric, version=3):
|
||||
try:
|
||||
with self._get_ioctx() as ioctx:
|
||||
return self._get_object_content(
|
||||
ioctx, "gnocchi_%s_none" % metric.id)
|
||||
return self._get_object_content(
|
||||
self._build_unaggregated_timeserie_path(metric, version))
|
||||
except rados.ObjectNotFound:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
|
||||
def _store_unaggregated_timeserie(self, metric, data):
|
||||
with self._get_ioctx() as ioctx:
|
||||
ioctx.write_full("gnocchi_%s_none" % metric.id, data)
|
||||
def _store_unaggregated_timeserie(self, metric, data, version=3):
|
||||
self.ioctx.write_full(
|
||||
self._build_unaggregated_timeserie_path(metric, version), data)
|
||||
|
||||
@staticmethod
|
||||
def _get_object_content(ioctx, name):
|
||||
def _delete_unaggregated_timeserie(self, metric, version=3):
|
||||
self.ioctx.aio_remove(
|
||||
self._build_unaggregated_timeserie_path(metric, version))
|
||||
|
||||
def _get_object_content(self, name):
|
||||
offset = 0
|
||||
content = b''
|
||||
while True:
|
||||
data = ioctx.read(name, offset=offset)
|
||||
data = self.ioctx.read(name, offset=offset)
|
||||
if not data:
|
||||
break
|
||||
content += data
|
||||
offset += len(data)
|
||||
return content
|
||||
|
||||
# The following methods deal with Gnocchi <= 1.3 archives
|
||||
def _get_metric_archive(self, metric, aggregation):
|
||||
"""Retrieve data in the place we used to store TimeSerieArchive."""
|
||||
try:
|
||||
with self._get_ioctx() as ioctx:
|
||||
return self._get_object_content(
|
||||
ioctx, str("gnocchi_%s_%s" % (metric.id, aggregation)))
|
||||
except rados.ObjectNotFound:
|
||||
raise storage.AggregationDoesNotExist(metric, aggregation)
|
||||
|
||||
def _store_metric_archive(self, metric, aggregation, data):
|
||||
"""Stores data in the place we used to store TimeSerieArchive."""
|
||||
with self._get_ioctx() as ioctx:
|
||||
ioctx.write_full(
|
||||
str("gnocchi_%s_%s" % (metric.id, aggregation)), data)
|
||||
|
||||
def _delete_metric_archives(self, metric):
|
||||
with self._get_ioctx() as ioctx:
|
||||
for aggregation in metric.archive_policy.aggregation_methods:
|
||||
try:
|
||||
ioctx.remove_object(
|
||||
str("gnocchi_%s_%s" % (metric.id, aggregation)))
|
||||
except rados.ObjectNotFound:
|
||||
pass
|
||||
|
|
|
@ -40,6 +40,9 @@ OPTS = [
|
|||
|
||||
|
||||
class FileStorage(_carbonara.CarbonaraBasedStorage):
|
||||
|
||||
WRITE_FULL = True
|
||||
|
||||
def __init__(self, conf):
|
||||
super(FileStorage, self).__init__(conf)
|
||||
self.basepath = conf.file_basepath
|
||||
|
@ -75,17 +78,20 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
def _build_metric_dir(self, metric):
|
||||
return os.path.join(self.basepath, str(metric.id))
|
||||
|
||||
def _build_unaggregated_timeserie_path(self, metric):
|
||||
return os.path.join(self._build_metric_dir(metric), 'none')
|
||||
def _build_unaggregated_timeserie_path(self, metric, version=3):
|
||||
return os.path.join(
|
||||
self._build_metric_dir(metric),
|
||||
'none' + ("_v%s" % version if version else ""))
|
||||
|
||||
def _build_metric_path(self, metric, aggregation):
|
||||
return os.path.join(self._build_metric_dir(metric),
|
||||
"agg_" + aggregation)
|
||||
|
||||
def _build_metric_path_for_split(self, metric, aggregation,
|
||||
timestamp_key, granularity):
|
||||
return os.path.join(self._build_metric_path(metric, aggregation),
|
||||
timestamp_key, granularity, version=3):
|
||||
path = os.path.join(self._build_metric_path(metric, aggregation),
|
||||
timestamp_key + "_" + str(granularity))
|
||||
return path + '_v%s' % version if version else path
|
||||
|
||||
def _build_measure_path(self, metric_id, random_id=None):
|
||||
path = os.path.join(self.measure_path, six.text_type(metric_id))
|
||||
|
@ -111,7 +117,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
def _store_measures(self, metric, data):
|
||||
def _store_new_measures(self, metric, data):
|
||||
tmpfile = self._get_tempfile()
|
||||
tmpfile.write(data)
|
||||
tmpfile.close()
|
||||
|
@ -140,11 +146,11 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
return (len(metric_details.keys()), sum(metric_details.values()),
|
||||
metric_details if details else None)
|
||||
|
||||
def _list_metric_with_measures_to_process(self, block_size, full=False):
|
||||
def list_metric_with_measures_to_process(self, size, part, full=False):
|
||||
if full:
|
||||
return os.listdir(self.measure_path)
|
||||
return os.listdir(self.measure_path)[
|
||||
block_size * self.partition:block_size * (self.partition + 1)]
|
||||
return set(os.listdir(self.measure_path))
|
||||
return set(
|
||||
os.listdir(self.measure_path)[size * part:size * (part + 1)])
|
||||
|
||||
def _list_measures_container_for_metric_id(self, metric_id):
|
||||
try:
|
||||
|
@ -170,7 +176,8 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
# by another process
|
||||
# ENOTEMPTY: ok, someone pushed measure in the meantime,
|
||||
# we'll delete the measures and directory later
|
||||
if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
|
||||
# EEXIST: some systems use this instead of ENOTEMPTY
|
||||
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST):
|
||||
raise
|
||||
|
||||
def _delete_unprocessed_measures_for_metric_id(self, metric_id):
|
||||
|
@ -193,13 +200,13 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
|
||||
self._delete_measures_files_for_metric_id(metric.id, files)
|
||||
|
||||
def _store_unaggregated_timeserie(self, metric, data):
|
||||
def _store_unaggregated_timeserie(self, metric, data, version=3):
|
||||
self._atomic_file_store(
|
||||
self._build_unaggregated_timeserie_path(metric),
|
||||
self._build_unaggregated_timeserie_path(metric, version),
|
||||
data)
|
||||
|
||||
def _get_unaggregated_timeserie(self, metric):
|
||||
path = self._build_unaggregated_timeserie_path(metric)
|
||||
def _get_unaggregated_timeserie(self, metric, version=3):
|
||||
path = self._build_unaggregated_timeserie_path(metric, version)
|
||||
try:
|
||||
with open(path, 'rb') as f:
|
||||
return f.read()
|
||||
|
@ -208,30 +215,41 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
raise storage.MetricDoesNotExist(metric)
|
||||
raise
|
||||
|
||||
def _list_split_keys_for_metric(self, metric, aggregation, granularity):
|
||||
def _delete_unaggregated_timeserie(self, metric, version=3):
|
||||
path = self._build_unaggregated_timeserie_path(metric, version)
|
||||
try:
|
||||
os.unlink(path)
|
||||
except IOError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
raise
|
||||
|
||||
def _list_split_keys_for_metric(self, metric, aggregation, granularity,
|
||||
version=None):
|
||||
try:
|
||||
files = os.listdir(self._build_metric_path(metric, aggregation))
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
raise
|
||||
keys = []
|
||||
keys = set()
|
||||
for f in files:
|
||||
key, sep, file_granularity = f.partition("_")
|
||||
if file_granularity == str(granularity):
|
||||
keys.append(key)
|
||||
meta = f.split("_")
|
||||
if meta[1] == str(granularity) and self._version_check(f, version):
|
||||
keys.add(meta[0])
|
||||
return keys
|
||||
|
||||
def _delete_metric_measures(self, metric, timestamp_key, aggregation,
|
||||
granularity):
|
||||
granularity, version=3):
|
||||
os.unlink(self._build_metric_path_for_split(
|
||||
metric, aggregation, timestamp_key, granularity))
|
||||
metric, aggregation, timestamp_key, granularity, version))
|
||||
|
||||
def _store_metric_measures(self, metric, timestamp_key, aggregation,
|
||||
granularity, data):
|
||||
granularity, data, offset=None, version=3):
|
||||
self._atomic_file_store(
|
||||
self._build_metric_path_for_split(metric, aggregation,
|
||||
timestamp_key, granularity),
|
||||
timestamp_key, granularity,
|
||||
version),
|
||||
data)
|
||||
|
||||
def _delete_metric(self, metric):
|
||||
|
@ -244,9 +262,10 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
# measures)
|
||||
raise
|
||||
|
||||
def _get_measures(self, metric, timestamp_key, aggregation, granularity):
|
||||
path = self._build_metric_path_for_split(metric, aggregation,
|
||||
timestamp_key, granularity)
|
||||
def _get_measures(self, metric, timestamp_key, aggregation, granularity,
|
||||
version=3):
|
||||
path = self._build_metric_path_for_split(
|
||||
metric, aggregation, timestamp_key, granularity, version)
|
||||
try:
|
||||
with open(path, 'rb') as aggregation_file:
|
||||
return aggregation_file.read()
|
||||
|
@ -256,36 +275,3 @@ class FileStorage(_carbonara.CarbonaraBasedStorage):
|
|||
raise storage.AggregationDoesNotExist(metric, aggregation)
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
raise
|
||||
|
||||
# The following methods deal with Gnocchi <= 1.3 archives
|
||||
def _build_metric_archive_path(self, metric, aggregation):
|
||||
return os.path.join(self._build_metric_dir(metric), aggregation)
|
||||
|
||||
def _get_metric_archive(self, metric, aggregation):
|
||||
"""Retrieve data in the place we used to store TimeSerieArchive."""
|
||||
path = self._build_metric_archive_path(metric, aggregation)
|
||||
try:
|
||||
with open(path, 'rb') as aggregation_file:
|
||||
return aggregation_file.read()
|
||||
except IOError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
if os.path.exists(self._build_metric_dir(metric)):
|
||||
raise storage.AggregationDoesNotExist(metric, aggregation)
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
raise
|
||||
|
||||
def _store_metric_archive(self, metric, aggregation, data):
|
||||
"""Stores data in the place we used to store TimeSerieArchive."""
|
||||
self._atomic_file_store(
|
||||
self._build_metric_archive_path(metric, aggregation),
|
||||
data)
|
||||
|
||||
def _delete_metric_archives(self, metric):
|
||||
for agg in metric.archive_policy.aggregation_methods:
|
||||
try:
|
||||
os.unlink(self._build_metric_archive_path(metric, agg))
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
# NOTE(jd) Maybe the metric has never been created (no
|
||||
# measures)
|
||||
raise
|
||||
|
|
|
@ -1,281 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2015 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from __future__ import absolute_import
|
||||
import datetime
|
||||
import logging
|
||||
import operator
|
||||
|
||||
try:
|
||||
import influxdb
|
||||
except ImportError:
|
||||
influxdb = None
|
||||
import iso8601
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
import retrying
|
||||
|
||||
from gnocchi import exceptions
|
||||
from gnocchi import storage
|
||||
from gnocchi import utils
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('influxdb_host',
|
||||
default='localhost',
|
||||
help='InfluxDB host'),
|
||||
cfg.PortOpt('influxdb_port',
|
||||
default=8086,
|
||||
help='InfluxDB port'),
|
||||
cfg.StrOpt('influxdb_username',
|
||||
default='root',
|
||||
help='InfluxDB username'),
|
||||
cfg.StrOpt('influxdb_password',
|
||||
secret=True,
|
||||
help='InfluxDB password'),
|
||||
cfg.StrOpt('influxdb_database',
|
||||
default='gnocchi',
|
||||
help='InfluxDB database'),
|
||||
cfg.BoolOpt('influxdb_block_until_data_ingested',
|
||||
default=False,
|
||||
help='InfluxDB ingests data in asynchroneous ways. '
|
||||
'Set to True to wait data are ingested.'),
|
||||
]
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
START_EPOCH = datetime.datetime(1, 1, 1, tzinfo=iso8601.iso8601.UTC)
|
||||
|
||||
|
||||
class InfluxDBStorage(storage.StorageDriver):
|
||||
|
||||
def __init__(self, conf):
|
||||
if not influxdb:
|
||||
raise ImportError("Module influxdb could not be loaded")
|
||||
super(InfluxDBStorage, self).__init__(conf)
|
||||
self._block_until_data_ingested = (
|
||||
conf.influxdb_block_until_data_ingested)
|
||||
self.influx = influxdb.InfluxDBClient(conf.influxdb_host,
|
||||
conf.influxdb_port,
|
||||
conf.influxdb_username,
|
||||
conf.influxdb_password,
|
||||
conf.influxdb_database)
|
||||
self.database = conf.influxdb_database
|
||||
|
||||
@staticmethod
|
||||
def _get_metric_id(metric):
|
||||
return str(metric.id)
|
||||
|
||||
def _metric_exists(self, metric):
|
||||
list_series = [s['name'] for s in self.influx.get_list_series()]
|
||||
return self._get_metric_id(metric) in list_series
|
||||
|
||||
def _query(self, metric, query):
|
||||
try:
|
||||
return self.influx.query(query, database=self.database)
|
||||
except influxdb.client.InfluxDBClientError as e:
|
||||
# NOTE(ityaptin) If metric exists but doesn't have any measures
|
||||
# with `value` field influxdb client may raise exception for
|
||||
# (aggregate) query. It's not error in Gnocchi context and we
|
||||
# should to return empty list in this case.
|
||||
if ("unknown field or tag name" in e.content
|
||||
or "measurement not found" in e.content):
|
||||
return {self._get_metric_id(metric): []}
|
||||
raise
|
||||
|
||||
@retrying.retry(stop_max_delay=5000, wait_fixed=500,
|
||||
retry_on_exception=utils.retry_if_retry_raised)
|
||||
def _wait_points_exists(self, metric_id, where):
|
||||
# NOTE(sileht): influxdb query returns even the data is not yet insert
|
||||
# in the asked series, the work is done in an async fashion, so a
|
||||
# immediate get_measures after an add_measures will not returns the
|
||||
# just inserted data. perhaps related:
|
||||
# https://github.com/influxdb/influxdb/issues/2450 This is a workaround
|
||||
# to wait that data appear in influxdb...
|
||||
if not self._block_until_data_ingested:
|
||||
return
|
||||
try:
|
||||
result = self.influx.query("SELECT * FROM \"%(metric_id)s\" WHERE "
|
||||
"%(where)s LIMIT 1" %
|
||||
dict(metric_id=metric_id, where=where),
|
||||
database=self.database)
|
||||
except influxdb.client.InfluxDBClientError as e:
|
||||
if "measurement not found" in e.content:
|
||||
raise utils.Retry
|
||||
raise
|
||||
|
||||
result = list(result[metric_id])
|
||||
if not result:
|
||||
raise utils.Retry
|
||||
|
||||
def delete_metric(self, metric):
|
||||
metric_id = self._get_metric_id(metric)
|
||||
self._query(metric, "DROP MEASUREMENT \"%s\"" % metric_id)
|
||||
|
||||
def add_measures(self, metric, measures):
|
||||
metric_id = self._get_metric_id(metric)
|
||||
points = [dict(measurement=metric_id,
|
||||
time=self._timestamp_to_utc(m.timestamp).isoformat(),
|
||||
fields=dict(value=float(m.value)))
|
||||
for m in measures]
|
||||
self.influx.write_points(points=points, time_precision='n',
|
||||
database=self.database,
|
||||
retention_policy="default")
|
||||
self._wait_points_exists(metric_id, "time = '%(time)s' AND "
|
||||
"value = %(value)s" %
|
||||
dict(time=points[-1]['time'],
|
||||
value=points[-1]["fields"]["value"]))
|
||||
|
||||
def get_measures(self, metric, from_timestamp=None, to_timestamp=None,
|
||||
aggregation='mean', granularity=None):
|
||||
super(InfluxDBStorage, self).get_measures(
|
||||
metric, from_timestamp, to_timestamp, aggregation)
|
||||
|
||||
if from_timestamp:
|
||||
from_timestamp = self._timestamp_to_utc(from_timestamp)
|
||||
if to_timestamp:
|
||||
to_timestamp = self._timestamp_to_utc(to_timestamp)
|
||||
|
||||
metric_id = self._get_metric_id(metric)
|
||||
|
||||
if from_timestamp:
|
||||
first_measure_timestamp = from_timestamp
|
||||
else:
|
||||
result = self._query(metric, "select * from \"%(metric_id)s\"" %
|
||||
dict(metric_id=metric_id))
|
||||
result = list(result[metric_id])
|
||||
if result:
|
||||
first_measure_timestamp = self._timestamp_to_utc(
|
||||
timeutils.parse_isotime(result[0]['time']))
|
||||
else:
|
||||
first_measure_timestamp = None
|
||||
|
||||
query = ("SELECT %(aggregation)s(value) FROM \"%(metric_id)s\""
|
||||
% dict(aggregation=aggregation,
|
||||
metric_id=metric_id))
|
||||
|
||||
# NOTE(jd) So this is totally suboptimal as we CANNOT limit the range
|
||||
# on time. InfluxDB is not smart enough yet to limit the result of the
|
||||
# time we want based on the GROUP BY result, not based on the time
|
||||
# value. If we do from_timestamp < t < to_timestamp, InfluxDB will
|
||||
# limit the datapoints to those, and then run the aggregate function.
|
||||
# What we want instead, is something like:
|
||||
# SELECT mean(value) FROM serie
|
||||
# GROUP BY time(5s) as groupedtime
|
||||
# WHERE from_timestamp <= groupedtime < to_timestamp
|
||||
# Since we cannot do that, we aggregate everything and then limit
|
||||
# the returned result.
|
||||
# see https://github.com/influxdb/influxdb/issues/1973
|
||||
# NOTE(sileht): But we have to set one time boundary to have the
|
||||
# request accept by influxdb.
|
||||
# see https://github.com/influxdb/influxdb/issues/2444
|
||||
#
|
||||
# That's good enough until we support continuous query or the like.
|
||||
|
||||
results = []
|
||||
defs = sorted(
|
||||
(d
|
||||
for d in metric.archive_policy.definition
|
||||
if granularity is None or granularity == d.granularity),
|
||||
key=operator.attrgetter('granularity'))
|
||||
|
||||
for definition in defs:
|
||||
time_query = self._make_time_query(
|
||||
first_measure_timestamp,
|
||||
to_timestamp,
|
||||
definition.granularity)
|
||||
subquery = (query +
|
||||
" WHERE %(times)s GROUP BY time(%(granularity)ds) "
|
||||
"fill(none) LIMIT %(points)d" %
|
||||
dict(times=time_query,
|
||||
granularity=definition.granularity,
|
||||
points=definition.points))
|
||||
|
||||
result = self._query(metric, subquery)
|
||||
|
||||
subresults = []
|
||||
for point in result[metric_id]:
|
||||
timestamp = self._timestamp_to_utc(
|
||||
timeutils.parse_isotime(point['time']))
|
||||
if (point[aggregation] is not None and
|
||||
((from_timestamp is None or timestamp >= from_timestamp)
|
||||
and (to_timestamp is None or timestamp < to_timestamp))):
|
||||
subresults.insert(0, (timestamp,
|
||||
definition.granularity,
|
||||
point[aggregation]))
|
||||
results.extend(subresults)
|
||||
|
||||
return list(reversed(results))
|
||||
|
||||
def search_value(self, metrics, query, from_timestamp=None,
|
||||
to_timestamp=None,
|
||||
aggregation='mean'):
|
||||
results = {}
|
||||
predicate = storage.MeasureQuery(query)
|
||||
|
||||
for metric in metrics:
|
||||
measures = self.get_measures(metric, from_timestamp, to_timestamp,
|
||||
aggregation)
|
||||
results[metric] = [
|
||||
(timestamp, granularity, value)
|
||||
for timestamp, granularity, value in measures
|
||||
if predicate(value)]
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def _timestamp_to_utc(ts):
|
||||
return timeutils.normalize_time(ts).replace(tzinfo=iso8601.iso8601.UTC)
|
||||
|
||||
def _make_time_query(self, from_timestamp, to_timestamp, granularity):
|
||||
if from_timestamp:
|
||||
from_timestamp = find_nearest_stable_point(from_timestamp,
|
||||
granularity)
|
||||
left_time = self._timestamp_to_utc(from_timestamp).isoformat()
|
||||
else:
|
||||
left_time = "now()"
|
||||
|
||||
if to_timestamp and to_timestamp >= from_timestamp:
|
||||
right_time = self._timestamp_to_utc(to_timestamp).isoformat()
|
||||
else:
|
||||
right_time = None
|
||||
|
||||
return ("time >= '%s'" % left_time) + (" and time < '%s'" % right_time
|
||||
if right_time else "")
|
||||
|
||||
def get_cross_metric_measures(self, metrics, from_timestamp=None,
|
||||
to_timestamp=None, aggregation='mean',
|
||||
needed_overlap=None):
|
||||
super(InfluxDBStorage, self).get_cross_metric_measures(
|
||||
metrics, from_timestamp, to_timestamp, aggregation, needed_overlap)
|
||||
raise exceptions.NotImplementedError
|
||||
|
||||
|
||||
def find_nearest_stable_point(timestamp, granularity, next=False):
|
||||
"""Find the timetamp before another one for a particular granularity.
|
||||
|
||||
e.g. the nearest timestamp for 14:23:45
|
||||
with a granularity of 60 is 14:23:00
|
||||
|
||||
:param timestamp: The timestamp to use as a reference point
|
||||
:param granularity: Granularity to use to look for the nearest timestamp
|
||||
:param next: Whatever to run the next timestamp
|
||||
rather than the previous one
|
||||
"""
|
||||
seconds = timeutils.delta_seconds(START_EPOCH, timestamp)
|
||||
seconds = int(seconds - seconds % granularity)
|
||||
stable_point = START_EPOCH + datetime.timedelta(seconds=seconds)
|
||||
if next:
|
||||
stable_point += datetime.timedelta(seconds=granularity)
|
||||
return stable_point
|
|
@ -1,22 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2014-2015 eNovance
|
||||
#
|
||||
# Authors: Julien Danjou <julien@danjou.info>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from gnocchi import storage
|
||||
|
||||
|
||||
class NullStorage(storage.StorageDriver):
|
||||
pass
|
|
@ -19,16 +19,19 @@ import datetime
|
|||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
import retrying
|
||||
from oslo_log import log
|
||||
import six
|
||||
from six.moves.urllib.parse import quote
|
||||
try:
|
||||
from swiftclient import client as swclient
|
||||
from swiftclient import utils as swift_utils
|
||||
except ImportError:
|
||||
swclient = None
|
||||
|
||||
from gnocchi import storage
|
||||
from gnocchi.storage import _carbonara
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('swift_auth_version',
|
||||
|
@ -45,15 +48,25 @@ OPTS = [
|
|||
cfg.StrOpt('swift_user',
|
||||
default="admin:admin",
|
||||
help='Swift user.'),
|
||||
cfg.StrOpt('swift_user_domain_name',
|
||||
default='Default',
|
||||
help='Swift user domain name.'),
|
||||
cfg.StrOpt('swift_key',
|
||||
secret=True,
|
||||
default="admin",
|
||||
help='Swift key/password.'),
|
||||
cfg.StrOpt('swift_tenant_name',
|
||||
help='Swift tenant name, only used in v2 auth.'),
|
||||
cfg.StrOpt('swift_project_name',
|
||||
help='Swift tenant name, only used in v2/v3 auth.',
|
||||
deprecated_name="swift_tenant_name"),
|
||||
cfg.StrOpt('swift_project_domain_name',
|
||||
default='Default',
|
||||
help='Swift project domain name.'),
|
||||
cfg.StrOpt('swift_container_prefix',
|
||||
default='gnocchi',
|
||||
help='Prefix to namespace metric containers.'),
|
||||
cfg.StrOpt('swift_endpoint_type',
|
||||
default='publicURL',
|
||||
help='Endpoint type to connect to Swift',),
|
||||
cfg.IntOpt('swift_timeout',
|
||||
min=0,
|
||||
default=300,
|
||||
|
@ -61,11 +74,11 @@ OPTS = [
|
|||
]
|
||||
|
||||
|
||||
def retry_if_result_empty(result):
|
||||
return len(result) == 0
|
||||
|
||||
|
||||
class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
||||
|
||||
WRITE_FULL = True
|
||||
POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'}
|
||||
|
||||
def __init__(self, conf):
|
||||
super(SwiftStorage, self).__init__(conf)
|
||||
if swclient is None:
|
||||
|
@ -76,8 +89,11 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
preauthtoken=conf.swift_preauthtoken,
|
||||
user=conf.swift_user,
|
||||
key=conf.swift_key,
|
||||
tenant_name=conf.swift_tenant_name,
|
||||
timeout=conf.swift_timeout)
|
||||
tenant_name=conf.swift_project_name,
|
||||
timeout=conf.swift_timeout,
|
||||
os_options={'endpoint_type': conf.swift_endpoint_type,
|
||||
'user_domain_name': conf.swift_user_domain_name},
|
||||
retries=0)
|
||||
self._container_prefix = conf.swift_container_prefix
|
||||
self.swift.put_container(self.MEASURE_PREFIX)
|
||||
|
||||
|
@ -85,8 +101,9 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
return '%s.%s' % (self._container_prefix, str(metric.id))
|
||||
|
||||
@staticmethod
|
||||
def _object_name(split_key, aggregation, granularity):
|
||||
return '%s_%s_%s' % (split_key, aggregation, granularity)
|
||||
def _object_name(split_key, aggregation, granularity, version=3):
|
||||
name = '%s_%s_%s' % (split_key, aggregation, granularity)
|
||||
return name + '_v%s' % version if version else name
|
||||
|
||||
def _create_metric(self, metric):
|
||||
# TODO(jd) A container per user in their account?
|
||||
|
@ -98,7 +115,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
if resp['status'] == 204:
|
||||
raise storage.MetricAlreadyExists(metric)
|
||||
|
||||
def _store_measures(self, metric, data):
|
||||
def _store_new_measures(self, metric, data):
|
||||
now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S")
|
||||
self.swift.put_object(
|
||||
self.MEASURE_PREFIX,
|
||||
|
@ -106,30 +123,34 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
data)
|
||||
|
||||
def _build_report(self, details):
|
||||
headers, files = self.swift.get_container(self.MEASURE_PREFIX,
|
||||
delimiter='/',
|
||||
full_listing=True)
|
||||
metrics = len(files)
|
||||
measures = headers.get('x-container-object-count')
|
||||
metric_details = defaultdict(int)
|
||||
if details:
|
||||
headers, files = self.swift.get_container(self.MEASURE_PREFIX,
|
||||
full_listing=True)
|
||||
metrics = set()
|
||||
for f in files:
|
||||
metric = f['name'].split('/', 1)[0]
|
||||
metric, metric_files = f['name'].split("/", 1)
|
||||
metric_details[metric] += 1
|
||||
return metrics, measures, metric_details if details else None
|
||||
metrics.add(metric)
|
||||
nb_metrics = len(metrics)
|
||||
else:
|
||||
headers, files = self.swift.get_container(self.MEASURE_PREFIX,
|
||||
delimiter='/',
|
||||
full_listing=True)
|
||||
nb_metrics = len(files)
|
||||
measures = int(headers.get('x-container-object-count'))
|
||||
return nb_metrics, measures, metric_details if details else None
|
||||
|
||||
def _list_metric_with_measures_to_process(self, block_size, full=False):
|
||||
def list_metric_with_measures_to_process(self, size, part, full=False):
|
||||
limit = None
|
||||
if not full:
|
||||
limit = block_size * (self.partition + 1)
|
||||
limit = size * (part + 1)
|
||||
headers, files = self.swift.get_container(self.MEASURE_PREFIX,
|
||||
delimiter='/',
|
||||
full_listing=full,
|
||||
limit=limit)
|
||||
if not full:
|
||||
files = files[block_size * self.partition:]
|
||||
files = files[size * part:]
|
||||
return set(f['subdir'][:-1] for f in files if 'subdir' in f)
|
||||
|
||||
def _list_measure_files_for_metric_id(self, metric_id):
|
||||
|
@ -141,16 +162,24 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
def _pending_measures_to_process_count(self, metric_id):
|
||||
return len(self._list_measure_files_for_metric_id(metric_id))
|
||||
|
||||
def _bulk_delete(self, container, objects):
|
||||
objects = [quote(('/%s/%s' % (container, obj['name'])).encode('utf-8'))
|
||||
for obj in objects]
|
||||
resp = {}
|
||||
headers, body = self.swift.post_account(
|
||||
headers=self.POST_HEADERS, query_string='bulk-delete',
|
||||
data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects),
|
||||
response_dict=resp)
|
||||
if resp['status'] != 200:
|
||||
raise storage.StorageError(
|
||||
"Unable to bulk-delete, is bulk-delete enabled in Swift?")
|
||||
resp = swift_utils.parse_api_response(headers, body)
|
||||
LOG.debug('# of objects deleted: %s, # of objects skipped: %s',
|
||||
resp['Number Deleted'], resp['Number Not Found'])
|
||||
|
||||
def _delete_unprocessed_measures_for_metric_id(self, metric_id):
|
||||
files = self._list_measure_files_for_metric_id(metric_id)
|
||||
for f in files:
|
||||
try:
|
||||
self.swift.delete_object(self.MEASURE_PREFIX, f['name'])
|
||||
except swclient.ClientException as e:
|
||||
# If the object has already been deleted by another worker, do
|
||||
# not worry.
|
||||
if e.http_status != 404:
|
||||
raise
|
||||
self._bulk_delete(self.MEASURE_PREFIX, files)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _process_measure_for_metric(self, metric):
|
||||
|
@ -165,21 +194,22 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
yield measures
|
||||
|
||||
# Now clean objects
|
||||
for f in files:
|
||||
self.swift.delete_object(self.MEASURE_PREFIX, f['name'])
|
||||
self._bulk_delete(self.MEASURE_PREFIX, files)
|
||||
|
||||
def _store_metric_measures(self, metric, timestamp_key,
|
||||
aggregation, granularity, data):
|
||||
def _store_metric_measures(self, metric, timestamp_key, aggregation,
|
||||
granularity, data, offset=None, version=3):
|
||||
self.swift.put_object(
|
||||
self._container_name(metric),
|
||||
self._object_name(timestamp_key, aggregation, granularity),
|
||||
self._object_name(timestamp_key, aggregation, granularity,
|
||||
version),
|
||||
data)
|
||||
|
||||
def _delete_metric_measures(self, metric, timestamp_key, aggregation,
|
||||
granularity):
|
||||
granularity, version=3):
|
||||
self.swift.delete_object(
|
||||
self._container_name(metric),
|
||||
self._object_name(timestamp_key, aggregation, granularity))
|
||||
self._object_name(timestamp_key, aggregation, granularity,
|
||||
version))
|
||||
|
||||
def _delete_metric(self, metric):
|
||||
self._delete_unaggregated_timeserie(metric)
|
||||
|
@ -192,8 +222,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
# Maybe it never has been created (no measure)
|
||||
raise
|
||||
else:
|
||||
for obj in files:
|
||||
self.swift.delete_object(container, obj['name'])
|
||||
self._bulk_delete(container, files)
|
||||
try:
|
||||
self.swift.delete_container(container)
|
||||
except swclient.ClientException as e:
|
||||
|
@ -201,14 +230,12 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
# Deleted in the meantime? Whatever.
|
||||
raise
|
||||
|
||||
@retrying.retry(stop_max_attempt_number=4,
|
||||
wait_fixed=500,
|
||||
retry_on_result=retry_if_result_empty)
|
||||
def _get_measures(self, metric, timestamp_key, aggregation, granularity):
|
||||
def _get_measures(self, metric, timestamp_key, aggregation, granularity,
|
||||
version=3):
|
||||
try:
|
||||
headers, contents = self.swift.get_object(
|
||||
self._container_name(metric), self._object_name(
|
||||
timestamp_key, aggregation, granularity))
|
||||
timestamp_key, aggregation, granularity, version))
|
||||
except swclient.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
try:
|
||||
|
@ -221,7 +248,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
raise
|
||||
return contents
|
||||
|
||||
def _list_split_keys_for_metric(self, metric, aggregation, granularity):
|
||||
def _list_split_keys_for_metric(self, metric, aggregation, granularity,
|
||||
version=None):
|
||||
container = self._container_name(metric)
|
||||
try:
|
||||
headers, files = self.swift.get_container(
|
||||
|
@ -230,61 +258,43 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage):
|
|||
if e.http_status == 404:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
raise
|
||||
keys = []
|
||||
keys = set()
|
||||
for f in files:
|
||||
try:
|
||||
key, agg, g = f['name'].split('_', 2)
|
||||
except ValueError:
|
||||
meta = f['name'].split('_')
|
||||
if (aggregation == meta[1] and granularity == float(meta[2])
|
||||
and self._version_check(f['name'], version)):
|
||||
keys.add(meta[0])
|
||||
except (ValueError, IndexError):
|
||||
# Might be "none", or any other file. Be resilient.
|
||||
continue
|
||||
if aggregation == agg and granularity == float(g):
|
||||
keys.append(key)
|
||||
return keys
|
||||
|
||||
@retrying.retry(stop_max_attempt_number=4,
|
||||
wait_fixed=500,
|
||||
retry_on_result=retry_if_result_empty)
|
||||
def _get_unaggregated_timeserie(self, metric):
|
||||
@staticmethod
|
||||
def _build_unaggregated_timeserie_path(version):
|
||||
return 'none' + ("_v%s" % version if version else "")
|
||||
|
||||
def _get_unaggregated_timeserie(self, metric, version=3):
|
||||
try:
|
||||
headers, contents = self.swift.get_object(
|
||||
self._container_name(metric), "none")
|
||||
self._container_name(metric),
|
||||
self._build_unaggregated_timeserie_path(version))
|
||||
except swclient.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
raise storage.MetricDoesNotExist(metric)
|
||||
raise
|
||||
return contents
|
||||
|
||||
def _store_unaggregated_timeserie(self, metric, data):
|
||||
self.swift.put_object(self._container_name(metric), "none", data)
|
||||
def _store_unaggregated_timeserie(self, metric, data, version=3):
|
||||
self.swift.put_object(self._container_name(metric),
|
||||
self._build_unaggregated_timeserie_path(version),
|
||||
data)
|
||||
|
||||
def _delete_unaggregated_timeserie(self, metric):
|
||||
def _delete_unaggregated_timeserie(self, metric, version=3):
|
||||
try:
|
||||
self.swift.delete_object(self._container_name(metric), "none")
|
||||
self.swift.delete_object(
|
||||
self._container_name(metric),
|
||||
self._build_unaggregated_timeserie_path(version))
|
||||
except swclient.ClientException as e:
|
||||
if e.http_status != 404:
|
||||
raise
|
||||
|
||||
# The following methods deal with Gnocchi <= 1.3 archives
|
||||
def _get_metric_archive(self, metric, aggregation):
|
||||
"""Retrieve data in the place we used to store TimeSerieArchive."""
|
||||
try:
|
||||
headers, contents = self.swift.get_object(
|
||||
self._container_name(metric), aggregation)
|
||||
except swclient.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
raise storage.AggregationDoesNotExist(metric, aggregation)
|
||||
raise
|
||||
return contents
|
||||
|
||||
def _store_metric_archive(self, metric, aggregation, data):
|
||||
"""Stores data in the place we used to store TimeSerieArchive."""
|
||||
self.swift.put_object(self._container_name(metric), aggregation, data)
|
||||
|
||||
def _delete_metric_archives(self, metric):
|
||||
for aggregation in metric.archive_policy.aggregation_methods:
|
||||
try:
|
||||
self.swift.delete_object(self._container_name(metric),
|
||||
aggregation)
|
||||
except swclient.ClientException as e:
|
||||
if e.http_status != 404:
|
||||
raise
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
|
||||
service_available_group = cfg.OptGroup(name="service_available",
|
||||
title="Available OpenStack Services")
|
||||
|
||||
service_available_opts = [
|
||||
cfg.BoolOpt("gnocchi",
|
||||
default=True,
|
||||
help="Whether or not Gnocchi is expected to be available"),
|
||||
]
|
||||
|
||||
metric_group = cfg.OptGroup(name='metric',
|
||||
title='Metric Service Options')
|
||||
|
||||
metric_opts = [
|
||||
cfg.StrOpt('catalog_type',
|
||||
default='metric',
|
||||
help="Catalog type of the Metric service."),
|
||||
cfg.StrOpt('endpoint_type',
|
||||
default='publicURL',
|
||||
choices=['public', 'admin', 'internal',
|
||||
'publicURL', 'adminURL', 'internalURL'],
|
||||
help="The endpoint type to use for the metric service."),
|
||||
]
|
|
@ -0,0 +1,45 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
|
||||
from tempest import config
|
||||
from tempest.test_discover import plugins
|
||||
|
||||
import gnocchi
|
||||
from gnocchi.tempest import config as tempest_config
|
||||
|
||||
|
||||
class GnocchiTempestPlugin(plugins.TempestPlugin):
|
||||
def load_tests(self):
|
||||
base_path = os.path.split(os.path.dirname(
|
||||
os.path.abspath(gnocchi.__file__)))[0]
|
||||
test_dir = "gnocchi/tempest"
|
||||
full_test_dir = os.path.join(base_path, test_dir)
|
||||
return full_test_dir, base_path
|
||||
|
||||
def register_opts(self, conf):
|
||||
config.register_opt_group(conf,
|
||||
tempest_config.service_available_group,
|
||||
tempest_config.service_available_opts)
|
||||
config.register_opt_group(conf,
|
||||
tempest_config.metric_group,
|
||||
tempest_config.metric_opts)
|
||||
|
||||
def get_opt_lists(self):
|
||||
return [(tempest_config.metric_group.name,
|
||||
tempest_config.metric_opts),
|
||||
('service_available', tempest_config.service_available_opts)]
|
|
@ -0,0 +1,85 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from gabbi import driver
|
||||
import six.moves.urllib.parse as urlparse
|
||||
from tempest import config
|
||||
import tempest.test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class GnocchiGabbiTest(tempest.test.BaseTestCase):
|
||||
credentials = ['admin']
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(GnocchiGabbiTest, cls).skip_checks()
|
||||
if not CONF.service_available.gnocchi:
|
||||
raise cls.skipException("Gnocchi support is required")
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(GnocchiGabbiTest, cls).resource_setup()
|
||||
|
||||
url = cls.os_admin.auth_provider.base_url(
|
||||
{'service': CONF.metric.catalog_type,
|
||||
'endpoint_type': CONF.metric.endpoint_type})
|
||||
token = cls.os_admin.auth_provider.get_token()
|
||||
|
||||
parsed_url = urlparse.urlsplit(url)
|
||||
prefix = parsed_url.path.rstrip('/') # turn it into a prefix
|
||||
if parsed_url.scheme == 'https':
|
||||
port = 443
|
||||
require_ssl = True
|
||||
else:
|
||||
port = 80
|
||||
require_ssl = False
|
||||
host = parsed_url.hostname
|
||||
if parsed_url.port:
|
||||
port = parsed_url.port
|
||||
|
||||
test_dir = os.path.join(os.path.dirname(__file__), '..', '..',
|
||||
'tests', 'gabbi', 'gabbits-live')
|
||||
cls.tests = driver.build_tests(
|
||||
test_dir, unittest.TestLoader(),
|
||||
host=host, port=port, prefix=prefix,
|
||||
test_loader_name='tempest.scenario.gnocchi.test',
|
||||
require_ssl=require_ssl)
|
||||
|
||||
os.environ["GNOCCHI_SERVICE_TOKEN"] = token
|
||||
|
||||
@classmethod
|
||||
def clear_credentials(cls):
|
||||
# FIXME(sileht): We don't want the token to be invalided, but
|
||||
# for some obcurs reason, clear_credentials is called before/during run
|
||||
# So, make the one used by tearDropClass a dump, and call it manually
|
||||
# in run()
|
||||
pass
|
||||
|
||||
def run(self, result=None):
|
||||
self.setUp()
|
||||
try:
|
||||
self.tests.run(result)
|
||||
finally:
|
||||
super(GnocchiGabbiTest, self).clear_credentials()
|
||||
self.tearDown()
|
||||
|
||||
def test_fake(self):
|
||||
# NOTE(sileht): A fake test is needed to have the class loaded
|
||||
# by the test runner
|
||||
pass
|
|
@ -13,7 +13,9 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import errno
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
|
||||
|
@ -21,7 +23,7 @@ import fixtures
|
|||
from oslotest import base
|
||||
from oslotest import mockpatch
|
||||
import six
|
||||
from stevedore import extension
|
||||
from six.moves.urllib.parse import unquote
|
||||
try:
|
||||
from swiftclient import exceptions as swexc
|
||||
except ImportError:
|
||||
|
@ -57,13 +59,53 @@ def _skip_decorator(func):
|
|||
|
||||
|
||||
class FakeRadosModule(object):
|
||||
class OpCtx(object):
|
||||
def __enter__(self):
|
||||
self.ops = []
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
WriteOpCtx = ReadOpCtx = OpCtx
|
||||
|
||||
class OmapIterator(object):
|
||||
class OpRetCode(object):
|
||||
def __init__(self):
|
||||
self.ret = 0
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.ret == other
|
||||
|
||||
def __init__(self, start_filter, prefix_filter, number):
|
||||
self.start_filter = start_filter
|
||||
self.prefix_filter = prefix_filter
|
||||
self.number = number
|
||||
self.data = {}
|
||||
self.op_ret = self.OpRetCode()
|
||||
|
||||
def set_data(self, data):
|
||||
if not data:
|
||||
self.op_ret.ret = errno.ENOENT
|
||||
else:
|
||||
self.data = data
|
||||
|
||||
def __iter__(self):
|
||||
# NOTE(sileht): we use only the prefix for now
|
||||
return ((k, v) for k, v in self.data.items()
|
||||
if k.startswith(self.prefix_filter))
|
||||
|
||||
LIBRADOS_OPERATION_BALANCE_READS = 1
|
||||
LIBRADOS_OPERATION_SKIPRWLOCKS = 16
|
||||
|
||||
class ObjectNotFound(Exception):
|
||||
pass
|
||||
|
||||
class ioctx(object):
|
||||
def __init__(self, kvs, kvs_xattrs):
|
||||
def __init__(self, kvs, kvs_xattrs, kvs_omaps):
|
||||
self.kvs = kvs
|
||||
self.kvs_xattrs = kvs_xattrs
|
||||
self.kvs_omaps = kvs_omaps
|
||||
self.librados = self
|
||||
self.io = self
|
||||
|
||||
|
@ -78,21 +120,7 @@ class FakeRadosModule(object):
|
|||
if key not in self.kvs:
|
||||
self.kvs[key] = ""
|
||||
self.kvs_xattrs[key] = {}
|
||||
|
||||
def rados_lock_exclusive(self, ctx, name, lock, locker, desc, timeval,
|
||||
flags):
|
||||
# Locking a not existing object create an empty one
|
||||
# so, do the same in test
|
||||
key = name.value.decode('ascii')
|
||||
self._ensure_key_exists(key)
|
||||
return 0
|
||||
|
||||
def rados_unlock(self, ctx, name, lock, locker):
|
||||
# Locking a not existing object create an empty one
|
||||
# so, do the same in test
|
||||
key = name.value.decode('ascii')
|
||||
self._ensure_key_exists(key)
|
||||
return 0
|
||||
self.kvs_omaps[key] = {}
|
||||
|
||||
@staticmethod
|
||||
def close():
|
||||
|
@ -108,6 +136,18 @@ class FakeRadosModule(object):
|
|||
self._ensure_key_exists(key)
|
||||
self.kvs[key] = value
|
||||
|
||||
def write(self, key, value, offset):
|
||||
self._validate_key(key)
|
||||
try:
|
||||
current = self.kvs[key]
|
||||
except KeyError:
|
||||
current = b""
|
||||
if len(current) < offset:
|
||||
current += b'\x00' * (offset - len(current))
|
||||
self.kvs[key] = (
|
||||
current[:offset] + value + current[offset + len(value):]
|
||||
)
|
||||
|
||||
def stat(self, key):
|
||||
self._validate_key(key)
|
||||
if key not in self.kvs:
|
||||
|
@ -122,6 +162,33 @@ class FakeRadosModule(object):
|
|||
else:
|
||||
return self.kvs[key][offset:offset+length]
|
||||
|
||||
def operate_read_op(self, op, key, flag=0):
|
||||
for op in op.ops:
|
||||
op(key)
|
||||
|
||||
def get_omap_vals(self, op, start_filter, prefix_filter, number):
|
||||
oi = FakeRadosModule.OmapIterator(start_filter, prefix_filter,
|
||||
number)
|
||||
op.ops.append(lambda oid: oi.set_data(self.kvs_omaps.get(oid)))
|
||||
return oi, oi.op_ret
|
||||
|
||||
def operate_write_op(self, op, key, flags=0):
|
||||
for op in op.ops:
|
||||
op(key)
|
||||
|
||||
def set_omap(self, op, keys, values):
|
||||
def add(oid):
|
||||
self._ensure_key_exists(oid)
|
||||
omaps = self.kvs_omaps.setdefault(oid, {})
|
||||
omaps.update(dict(zip(keys, values)))
|
||||
op.ops.append(add)
|
||||
|
||||
def remove_omap_keys(self, op, keys):
|
||||
def rm(oid):
|
||||
for key in keys:
|
||||
del self.kvs_omaps[oid][key]
|
||||
op.ops.append(rm)
|
||||
|
||||
def get_xattrs(self, key):
|
||||
if key not in self.kvs:
|
||||
raise FakeRadosModule.ObjectNotFound
|
||||
|
@ -143,16 +210,23 @@ class FakeRadosModule(object):
|
|||
raise FakeRadosModule.ObjectNotFound
|
||||
del self.kvs[key]
|
||||
del self.kvs_xattrs[key]
|
||||
del self.kvs_omaps[key]
|
||||
|
||||
def aio_remove(self, key):
|
||||
self._validate_key(key)
|
||||
self.kvs.pop(key, None)
|
||||
self.kvs_xattrs.pop(key, None)
|
||||
self.kvs_omaps.pop(key, None)
|
||||
|
||||
@staticmethod
|
||||
def aio_flush():
|
||||
pass
|
||||
|
||||
class FakeRados(object):
|
||||
def __init__(self, kvs, kvs_xattrs):
|
||||
def __init__(self, kvs, kvs_xattrs, kvs_omaps):
|
||||
self.kvs = kvs
|
||||
self.kvs_xattrs = kvs_xattrs
|
||||
self.kvs_omaps = kvs_omaps
|
||||
|
||||
@staticmethod
|
||||
def connect():
|
||||
|
@ -163,14 +237,17 @@ class FakeRadosModule(object):
|
|||
pass
|
||||
|
||||
def open_ioctx(self, pool):
|
||||
return FakeRadosModule.ioctx(self.kvs, self.kvs_xattrs)
|
||||
return FakeRadosModule.ioctx(self.kvs, self.kvs_xattrs,
|
||||
self.kvs_omaps)
|
||||
|
||||
def __init__(self):
|
||||
self.kvs = {}
|
||||
self.kvs_xattrs = {}
|
||||
self.kvs_omaps = {}
|
||||
|
||||
def Rados(self, *args, **kwargs):
|
||||
return FakeRadosModule.FakeRados(self.kvs, self.kvs_xattrs)
|
||||
return FakeRadosModule.FakeRados(self.kvs, self.kvs_xattrs,
|
||||
self.kvs_omaps)
|
||||
|
||||
@staticmethod
|
||||
def run_in_thread(method, args):
|
||||
|
@ -270,56 +347,36 @@ class FakeSwiftClient(object):
|
|||
raise swexc.ClientException("No such container",
|
||||
http_status=404)
|
||||
|
||||
def post_account(self, headers, query_string=None, data=None,
|
||||
response_dict=None):
|
||||
if query_string == 'bulk-delete':
|
||||
resp = {'Response Status': '200 OK',
|
||||
'Response Body': '',
|
||||
'Number Deleted': 0,
|
||||
'Number Not Found': 0}
|
||||
if response_dict is not None:
|
||||
response_dict['status'] = 200
|
||||
if data:
|
||||
for path in data.splitlines():
|
||||
try:
|
||||
__, container, obj = (unquote(path.decode('utf8'))
|
||||
.split('/', 2))
|
||||
del self.kvs[container][obj]
|
||||
resp['Number Deleted'] += 1
|
||||
except KeyError:
|
||||
resp['Number Not Found'] += 1
|
||||
return {}, json.dumps(resp).encode('utf-8')
|
||||
|
||||
if response_dict is not None:
|
||||
response_dict['status'] = 204
|
||||
|
||||
return {}, None
|
||||
|
||||
|
||||
@six.add_metaclass(SkipNotImplementedMeta)
|
||||
class TestCase(base.BaseTestCase):
|
||||
|
||||
ARCHIVE_POLICIES = {
|
||||
'low': archive_policy.ArchivePolicy(
|
||||
"low",
|
||||
0,
|
||||
[
|
||||
# 5 minutes resolution for an hour
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=300, points=12),
|
||||
# 1 hour resolution for a day
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=3600, points=24),
|
||||
# 1 day resolution for a month
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=3600 * 24, points=30),
|
||||
],
|
||||
),
|
||||
'medium': archive_policy.ArchivePolicy(
|
||||
"medium",
|
||||
0,
|
||||
[
|
||||
# 1 minute resolution for an hour
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=60, points=60),
|
||||
# 1 hour resolution for a week
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=3600, points=7 * 24),
|
||||
# 1 day resolution for a year
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=3600 * 24, points=365),
|
||||
],
|
||||
),
|
||||
'high': archive_policy.ArchivePolicy(
|
||||
"high",
|
||||
0,
|
||||
[
|
||||
# 1 second resolution for a day
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=1, points=3600 * 24),
|
||||
# 1 minute resolution for a month
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=60, points=60 * 24 * 30),
|
||||
# 1 hour resolution for a year
|
||||
archive_policy.ArchivePolicyItem(
|
||||
granularity=3600, points=365 * 24),
|
||||
],
|
||||
),
|
||||
'no_granularity_match': archive_policy.ArchivePolicy(
|
||||
"no_granularity_match",
|
||||
0,
|
||||
|
@ -342,8 +399,9 @@ class TestCase(base.BaseTestCase):
|
|||
return os.path.join(root, project_file)
|
||||
return root
|
||||
|
||||
def setUp(self):
|
||||
super(TestCase, self).setUp()
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
super(TestCase, self).setUpClass()
|
||||
self.conf = service.prepare_service([],
|
||||
default_config_files=[])
|
||||
self.conf.set_override('policy_file',
|
||||
|
@ -362,28 +420,29 @@ class TestCase(base.BaseTestCase):
|
|||
self.conf.storage.coordination_url,
|
||||
str(uuid.uuid4()).encode('ascii'))
|
||||
|
||||
self.coord.start()
|
||||
self.coord.start(start_heart=True)
|
||||
|
||||
with self.coord.get_lock(b"gnocchi-tests-db-lock"):
|
||||
# Force upgrading using Alembic rather than creating the
|
||||
# database from scratch so we are sure we don't miss anything
|
||||
# in the Alembic upgrades. We have a test to check that
|
||||
# upgrades == create but it misses things such as custom CHECK
|
||||
# constraints.
|
||||
self.index.upgrade(nocreate=True)
|
||||
self.index.upgrade()
|
||||
|
||||
self.coord.stop()
|
||||
|
||||
self.archive_policies = self.ARCHIVE_POLICIES
|
||||
# Used in gnocchi.gendoc
|
||||
if not getattr(self, "skip_archive_policies_creation", False):
|
||||
for name, ap in six.iteritems(self.ARCHIVE_POLICIES):
|
||||
# Create basic archive policies
|
||||
try:
|
||||
self.index.create_archive_policy(ap)
|
||||
except indexer.ArchivePolicyAlreadyExists:
|
||||
pass
|
||||
self.archive_policies = self.ARCHIVE_POLICIES.copy()
|
||||
self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES)
|
||||
for name, ap in six.iteritems(self.archive_policies):
|
||||
# Create basic archive policies
|
||||
try:
|
||||
self.index.create_archive_policy(ap)
|
||||
except indexer.ArchivePolicyAlreadyExists:
|
||||
pass
|
||||
|
||||
self.conf.set_override(
|
||||
'driver',
|
||||
os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file"),
|
||||
'storage')
|
||||
|
||||
def setUp(self):
|
||||
super(TestCase, self).setUp()
|
||||
if swexc:
|
||||
self.useFixture(mockpatch.Patch(
|
||||
'swiftclient.client.Connection',
|
||||
|
@ -392,30 +451,11 @@ class TestCase(base.BaseTestCase):
|
|||
self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
|
||||
FakeRadosModule()))
|
||||
|
||||
self.conf.set_override(
|
||||
'driver',
|
||||
os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
|
||||
'storage')
|
||||
|
||||
if self.conf.storage.driver == 'file':
|
||||
tempdir = self.useFixture(fixtures.TempDir())
|
||||
self.conf.set_override('file_basepath',
|
||||
tempdir.path,
|
||||
'storage')
|
||||
elif self.conf.storage.driver == 'influxdb':
|
||||
self.conf.set_override('influxdb_block_until_data_ingested', True,
|
||||
'storage')
|
||||
self.conf.set_override('influxdb_database', 'test', 'storage')
|
||||
self.conf.set_override('influxdb_password', 'root', 'storage')
|
||||
self.conf.set_override('influxdb_port',
|
||||
os.getenv("GNOCCHI_TEST_INFLUXDB_PORT",
|
||||
51234), 'storage')
|
||||
# NOTE(ityaptin) Creating unique database for every test may cause
|
||||
# tests failing by timeout, but in may be useful in some cases
|
||||
if os.getenv("GNOCCHI_TEST_INFLUXDB_UNIQUE_DATABASES"):
|
||||
self.conf.set_override("influxdb_database",
|
||||
"gnocchi_%s" % uuid.uuid4().hex,
|
||||
'storage')
|
||||
|
||||
self.storage = storage.get_driver(self.conf)
|
||||
# NOTE(jd) Do not upgrade the storage. We don't really need the storage
|
||||
|
@ -425,10 +465,6 @@ class TestCase(base.BaseTestCase):
|
|||
# life.
|
||||
# self.storage.upgrade(self.index)
|
||||
|
||||
self.mgr = extension.ExtensionManager('gnocchi.aggregates',
|
||||
invoke_on_load=True)
|
||||
self.custom_agg = dict((x.name, x.obj) for x in self.mgr)
|
||||
|
||||
def tearDown(self):
|
||||
self.index.disconnect()
|
||||
self.storage.stop()
|
||||
|
|
|
@ -20,14 +20,13 @@ import tempfile
|
|||
import threading
|
||||
import time
|
||||
from unittest import case
|
||||
import uuid
|
||||
import warnings
|
||||
|
||||
from gabbi import fixture
|
||||
import sqlalchemy.engine.url as sqlalchemy_url
|
||||
import sqlalchemy_utils
|
||||
|
||||
from gnocchi import indexer
|
||||
from gnocchi.indexer import sqlalchemy
|
||||
from gnocchi.rest import app
|
||||
from gnocchi import service
|
||||
from gnocchi import storage
|
||||
|
@ -84,10 +83,7 @@ class ConfigFixture(fixture.GabbiFixture):
|
|||
self.conf = conf
|
||||
self.tmp_dir = data_tmp_dir
|
||||
|
||||
# TODO(jd) It would be cool if Gabbi was able to use the null://
|
||||
# indexer, but this makes the API returns a lot of 501 error, which
|
||||
# Gabbi does not want to see, so let's just disable it.
|
||||
if conf.indexer.url is None or conf.indexer.url == "null://":
|
||||
if conf.indexer.url is None:
|
||||
raise case.SkipTest("No indexer configured")
|
||||
|
||||
# Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
|
||||
|
@ -102,16 +98,15 @@ class ConfigFixture(fixture.GabbiFixture):
|
|||
|
||||
# NOTE(jd) All of that is still very SQL centric but we only support
|
||||
# SQL for now so let's say it's good enough.
|
||||
url = sqlalchemy_url.make_url(conf.indexer.url)
|
||||
|
||||
url.database = url.database + str(uuid.uuid4()).replace('-', '')
|
||||
db_url = str(url)
|
||||
conf.set_override('url', db_url, 'indexer')
|
||||
sqlalchemy_utils.create_database(db_url)
|
||||
conf.set_override(
|
||||
'url',
|
||||
sqlalchemy.SQLAlchemyIndexer._create_new_database(
|
||||
conf.indexer.url),
|
||||
'indexer')
|
||||
|
||||
index = indexer.get_driver(conf)
|
||||
index.connect()
|
||||
index.upgrade()
|
||||
index.upgrade(create_legacy_resource_types=True)
|
||||
|
||||
conf.set_override('pecan_debug', False, 'api')
|
||||
|
||||
|
@ -143,13 +138,13 @@ class ConfigFixture(fixture.GabbiFixture):
|
|||
if hasattr(self, 'index'):
|
||||
self.index.disconnect()
|
||||
|
||||
if not self.conf.indexer.url.startswith("null://"):
|
||||
# Swallow noise from missing tables when dropping
|
||||
# database.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore',
|
||||
module='sqlalchemy.engine.default')
|
||||
sqlalchemy_utils.drop_database(self.conf.indexer.url)
|
||||
# Swallow noise from missing tables when dropping
|
||||
# database.
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore',
|
||||
module='sqlalchemy.engine.default')
|
||||
sqlalchemy_utils.drop_database(self.conf.indexer.url)
|
||||
|
||||
if self.tmp_dir:
|
||||
shutil.rmtree(self.tmp_dir)
|
||||
|
||||
|
@ -167,7 +162,9 @@ class MetricdThread(threading.Thread):
|
|||
|
||||
def run(self):
|
||||
while self.flag:
|
||||
self.storage.process_background_tasks(self.index)
|
||||
metrics = self.storage.list_metric_with_measures_to_process(
|
||||
None, None, full=True)
|
||||
self.storage.process_background_tasks(self.index, metrics)
|
||||
time.sleep(0.1)
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -8,33 +8,200 @@ defaults:
|
|||
x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN']
|
||||
|
||||
tests:
|
||||
|
||||
- name: check /
|
||||
url: /
|
||||
|
||||
- name: check archive policies
|
||||
# Fail to create archive policy
|
||||
- name: wrong archive policy content type
|
||||
desc: attempt to create archive policy with invalid content-type
|
||||
url: /v1/archive_policy
|
||||
response_headers:
|
||||
content-type: /application/json/
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: text/plain
|
||||
status: 415
|
||||
response_strings:
|
||||
- '{"definition": [{"points": 86400, "timespan": "1 day, 0:00:00", "granularity": "0:00:01"}, {"points": 43200, "timespan": "30 days, 0:00:00", "granularity": "0:01:00"}, {"points": 8760, "timespan": "365 days, 0:00:00", "granularity": "1:00:00"}], "back_window": 0, "name": "high", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}'
|
||||
- '{"definition": [{"points": 12, "timespan": "1:00:00", "granularity": "0:05:00"}, {"points": 24, "timespan": "1 day, 0:00:00", "granularity": "1:00:00"}, {"points": 30, "timespan": "30 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "low", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}'
|
||||
- '{"definition": [{"points": 60, "timespan": "1:00:00", "granularity": "0:01:00"}, {"points": 168, "timespan": "7 days, 0:00:00", "granularity": "1:00:00"}, {"points": 365, "timespan": "365 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "medium", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}'
|
||||
- Unsupported Media Type
|
||||
|
||||
- name: check generic resources with the default one for statsd
|
||||
url: /v1/resource/generic
|
||||
response_headers:
|
||||
content-type: /application/json/
|
||||
response_json_paths:
|
||||
$[0].type: generic
|
||||
$.`len`: 1
|
||||
- name: wrong method
|
||||
desc: attempt to create archive policy with 'PUT' method
|
||||
url: /v1/archive_policy
|
||||
method: PUT
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
status: 405
|
||||
|
||||
- name: post unicode archive policy
|
||||
- name: invalid authZ
|
||||
desc: x-auth-token is invalid
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-auth-token: 'hello'
|
||||
data:
|
||||
name: medium
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
status: 401
|
||||
|
||||
- name: bad archive policy body
|
||||
desc: archive policy contains invalid key 'cowsay'
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
cowsay: moo
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: extra keys not allowed"
|
||||
|
||||
- name: missing definition
|
||||
desc: archive policy is missing 'definition' keyword
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: medium
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: required key not provided"
|
||||
|
||||
- name: empty definition
|
||||
desc: empty definition for archive policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: medium
|
||||
definition: []
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: length of value must be at least 1"
|
||||
|
||||
- name: wrong value definition
|
||||
desc: invalid type of 'definition' key
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: somename
|
||||
definition: foobar
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: expected a list"
|
||||
|
||||
- name: useless definition
|
||||
desc: invalid archive policy definition
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: medium
|
||||
definition:
|
||||
- cowsay: moo
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: extra keys not allowed"
|
||||
|
||||
#
|
||||
# Create archive policy
|
||||
#
|
||||
|
||||
- name: create archive policy
|
||||
desc: create archve policy 'gabbilive' for live tests
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: gabbilive
|
||||
back_window: 0
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
points: 60
|
||||
- granularity: 2 second
|
||||
timespan: 1 minute
|
||||
- points: 5
|
||||
timespan: 5 minute
|
||||
aggregation_methods:
|
||||
- mean
|
||||
- min
|
||||
- max
|
||||
response_headers:
|
||||
location: $SCHEME://$NETLOC/v1/archive_policy/gabbilive
|
||||
status: 201
|
||||
|
||||
# Retrieve it correctly and then poorly
|
||||
|
||||
- name: get archive policy
|
||||
desc: retrieve archive policy 'gabbilive' and asster its values
|
||||
url: $LOCATION
|
||||
response_headers:
|
||||
content-type: /application/json/
|
||||
response_json_paths:
|
||||
$.name: gabbilive
|
||||
$.back_window: 0
|
||||
$.definition[0].granularity: "0:00:01"
|
||||
$.definition[0].points: 60
|
||||
$.definition[0].timespan: "0:01:00"
|
||||
$.definition[1].granularity: "0:00:02"
|
||||
$.definition[1].points: 30
|
||||
$.definition[1].timespan: "0:01:00"
|
||||
$.definition[2].granularity: "0:01:00"
|
||||
$.definition[2].points: 5
|
||||
$.definition[2].timespan: "0:05:00"
|
||||
response_strings:
|
||||
'"aggregation_methods": ["max", "min", "mean"]'
|
||||
|
||||
- name: get wrong accept
|
||||
desc: invalid 'accept' header
|
||||
url: /v1/archive_policy/medium
|
||||
request_headers:
|
||||
accept: text/plain
|
||||
status: 406
|
||||
|
||||
# Unexpected methods
|
||||
|
||||
- name: post single archive
|
||||
desc: unexpected 'POST' request to archive policy
|
||||
url: /v1/archive_policy/gabbilive
|
||||
method: POST
|
||||
status: 405
|
||||
|
||||
- name: put single archive
|
||||
desc: unexpected 'PUT' request to archive policy
|
||||
url: /v1/archive_policy/gabbilive
|
||||
method: PUT
|
||||
status: 405
|
||||
|
||||
# Duplicated archive policy names ain't allowed
|
||||
|
||||
- name: create duplicate archive policy
|
||||
desc: create archve policy 'gabbilive' for live tests
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: gabbilive
|
||||
definition:
|
||||
- granularity: 30 second
|
||||
points: 60
|
||||
status: 409
|
||||
response_strings:
|
||||
- Archive policy gabbilive already exists
|
||||
|
||||
# Create a unicode named policy
|
||||
|
||||
- name: post unicode policy name
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
data:
|
||||
name: ✔éñ☃
|
||||
definition:
|
||||
|
@ -46,36 +213,390 @@ tests:
|
|||
response_json_paths:
|
||||
name: ✔éñ☃
|
||||
|
||||
- name: get unicode archive policy
|
||||
url: $LOCATION
|
||||
- name: retrieve unicode policy name
|
||||
url: $LOCATION
|
||||
response_json_paths:
|
||||
$.name: ✔éñ☃
|
||||
name: ✔éñ☃
|
||||
|
||||
- name: delete unicode archive policy
|
||||
url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
|
||||
method: DELETE
|
||||
status: 204
|
||||
|
||||
- name: post instance resource
|
||||
url: /v1/resource/instance
|
||||
# It really is gone
|
||||
|
||||
- name: confirm delete
|
||||
desc: assert deleted unicode policy is not available
|
||||
method: GET
|
||||
url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
|
||||
status: 404
|
||||
|
||||
# Fail to delete one that does not exist
|
||||
|
||||
- name: delete missing archive
|
||||
desc: delete non-existent archive policy
|
||||
url: /v1/archive_policy/grandiose
|
||||
method: DELETE
|
||||
status: 404
|
||||
response_strings:
|
||||
- Archive policy grandiose does not exist
|
||||
|
||||
# Attempt to create illogical policies
|
||||
|
||||
- name: create illogical policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: complex
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
points: 60
|
||||
timespan: "0:01:01"
|
||||
status: 400
|
||||
response_strings:
|
||||
- timespan ≠ granularity × points
|
||||
|
||||
- name: create identical granularities policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: complex
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
points: 60
|
||||
- granularity: 1 second
|
||||
points: 120
|
||||
status: 400
|
||||
response_strings:
|
||||
- "More than one archive policy uses granularity `1.0'"
|
||||
|
||||
- name: policy invalid unit
|
||||
desc: invalid unit for archive policy 'timespan' key
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: 227d0e1f-4295-4e4b-8515-c296c47d71d3
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
timespan: "1 shenanigan"
|
||||
status: 400
|
||||
|
||||
#
|
||||
# Archive policy rules
|
||||
#
|
||||
|
||||
- name: create archive policy rule1
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: gabbilive_rule
|
||||
metric_pattern: "live.*"
|
||||
archive_policy_name: gabbilive
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.metric_pattern: "live.*"
|
||||
$.archive_policy_name: gabbilive
|
||||
$.name: gabbilive_rule
|
||||
|
||||
- name: create invalid archive policy rule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: test_rule
|
||||
metric_pattern: "disk.foo.*"
|
||||
status: 400
|
||||
|
||||
- name: missing auth archive policy rule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-auth-token: 'hello'
|
||||
data:
|
||||
name: test_rule
|
||||
metric_pattern: "disk.foo.*"
|
||||
archive_policy_name: low
|
||||
status: 401
|
||||
|
||||
- name: wrong archive policy rule content type
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: text/plain
|
||||
status: 415
|
||||
response_strings:
|
||||
- Unsupported Media Type
|
||||
|
||||
- name: bad archive policy rule body
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
whaa: foobar
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: extra keys not allowed"
|
||||
|
||||
# get an archive policy rules
|
||||
|
||||
- name: get all archive policy rules
|
||||
url: /v1/archive_policy_rule
|
||||
status: 200
|
||||
response_strings:
|
||||
'"metric_pattern": "live.*", "archive_policy_name": "gabbilive", "name": "gabbilive_rule"'
|
||||
|
||||
- name: get unknown archive policy rule
|
||||
url: /v1/archive_policy_rule/foo
|
||||
status: 404
|
||||
|
||||
|
||||
- name: get archive policy rule
|
||||
url: /v1/archive_policy_rule/gabbilive_rule
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.metric_pattern: "live.*"
|
||||
$.archive_policy_name: "gabbilive"
|
||||
$.name: "gabbilive_rule"
|
||||
|
||||
- name: delete archive policy in use
|
||||
desc: fails due to https://bugs.launchpad.net/gnocchi/+bug/1569781
|
||||
url: /v1/archive_policy/gabbilive
|
||||
method: DELETE
|
||||
status: 400
|
||||
|
||||
#
|
||||
# Metrics
|
||||
#
|
||||
|
||||
|
||||
- name: get all metrics
|
||||
url: /v1/metric
|
||||
status: 200
|
||||
|
||||
- name: create metric with name and rule
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "live.io.rate"
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.archive_policy_name: gabbilive
|
||||
$.name: live.io.rate
|
||||
|
||||
- name: delete metric
|
||||
url: $LOCATION
|
||||
method: DELETE
|
||||
status: 204
|
||||
|
||||
- name: create metric with name and policy
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "aagabbi.live.metric"
|
||||
archive_policy_name: "gabbilive"
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.archive_policy_name: gabbilive
|
||||
$.name: "aagabbi.live.metric"
|
||||
|
||||
- name: get valid metric id
|
||||
url: $LOCATION
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.archive_policy.name: gabbilive
|
||||
|
||||
- name: delete the metric
|
||||
url: /v1/metric/$RESPONSE['$.id']
|
||||
method: DELETE
|
||||
status: 204
|
||||
|
||||
- name: create metric bad archive policy
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
archive_policy_name: 2e2675aa-105e-4664-a30d-c407e6a0ea7f
|
||||
status: 400
|
||||
response_strings:
|
||||
- Archive policy 2e2675aa-105e-4664-a30d-c407e6a0ea7f does not exist
|
||||
|
||||
- name: create metric bad content-type
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: plain/text
|
||||
data:
|
||||
archive_policy_name: cookies
|
||||
status: 415
|
||||
|
||||
|
||||
#
|
||||
# Cleanup
|
||||
#
|
||||
|
||||
- name: delete archive policy rule
|
||||
url: /v1/archive_policy_rule/gabbilive_rule
|
||||
method: DELETE
|
||||
status: 204
|
||||
|
||||
- name: confirm delete archive policy rule
|
||||
url: /v1/archive_policy_rule/gabbilive_rule
|
||||
method: DELETE
|
||||
status: 404
|
||||
|
||||
|
||||
#
|
||||
# Resources section
|
||||
#
|
||||
|
||||
- name: root of resource
|
||||
url: /v1/resource
|
||||
response_json_paths:
|
||||
$.generic: $SCHEME://$NETLOC/v1/resource/generic
|
||||
|
||||
- name: typo of resource
|
||||
url: /v1/resoue
|
||||
status: 404
|
||||
|
||||
- name: typo of resource extra
|
||||
url: /v1/resource/foobar
|
||||
status: 404
|
||||
|
||||
- name: generic resource
|
||||
url: /v1/resource/generic
|
||||
status: 200
|
||||
|
||||
- name: post resource type
|
||||
url: /v1/resource_type
|
||||
method: post
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
id: 75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
|
||||
project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
|
||||
flavor_id: "2"
|
||||
image_ref: http://image
|
||||
host: compute1
|
||||
name: myresource
|
||||
attributes:
|
||||
display_name:
|
||||
type: string
|
||||
required: true
|
||||
max_length: 5
|
||||
min_length: 2
|
||||
status: 201
|
||||
response_headers:
|
||||
location: $SCHEME://$NETLOC/v1/resource_type/myresource
|
||||
|
||||
- name: myresource resource bad accept
|
||||
desc: Expect 406 on bad accept type
|
||||
request_headers:
|
||||
accept: text/plain
|
||||
url: /v1/resource/myresource
|
||||
status: 406
|
||||
response_strings:
|
||||
- 406 Not Acceptable
|
||||
|
||||
- name: myresource resource complex accept
|
||||
desc: failover accept media type appropriately
|
||||
request_headers:
|
||||
accept: text/plain, application/json; q=0.8
|
||||
url: /v1/resource/myresource
|
||||
status: 200
|
||||
|
||||
- name: post myresource resource
|
||||
url: /v1/resource/myresource
|
||||
method: post
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
|
||||
user_id: 126204ef-989a-46fd-999b-ee45c8108f31
|
||||
project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
|
||||
display_name: myvm
|
||||
metrics:
|
||||
vcpus:
|
||||
archive_policy_name: medium
|
||||
archive_policy_name: gabbilive
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
|
||||
$.user_id: 126204ef-989a-46fd-999b-ee45c8108f31
|
||||
$.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
|
||||
$.display_name: "myvm"
|
||||
|
||||
- name: post some measures to the metric on instance
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/metric/vcpus/measures
|
||||
- name: get myresource resource
|
||||
url: $LOCATION
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
|
||||
$.user_id: 126204ef-989a-46fd-999b-ee45c8108f31
|
||||
$.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
|
||||
$.display_name: "myvm"
|
||||
|
||||
- name: search for myresource resource via user_id
|
||||
#url: /v1/search/resource/generic
|
||||
url: /v1/search/resource/myresource
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
=:
|
||||
user_id: "126204ef-989a-46fd-999b-ee45c8108f31"
|
||||
response_json_paths:
|
||||
$..id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e
|
||||
$..user_id: 126204ef-989a-46fd-999b-ee45c8108f31
|
||||
$..project_id: 98e785d7-9487-4159-8ab8-8230ec37537a
|
||||
$..display_name: myvm
|
||||
|
||||
- name: search for myresource resource via user_id and 'generic' type
|
||||
url: /v1/search/resource/generic
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
=:
|
||||
id: "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"
|
||||
response_strings:
|
||||
'"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"'
|
||||
|
||||
- name: search for myresource resource via user_id and project_id
|
||||
url: /v1/search/resource/generic
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
and:
|
||||
- =:
|
||||
user_id: "126204ef-989a-46fd-999b-ee45c8108f31"
|
||||
- =:
|
||||
project_id: "98e785d7-9487-4159-8ab8-8230ec37537a"
|
||||
response_strings:
|
||||
'"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"'
|
||||
|
||||
- name: patch myresource resource
|
||||
url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e
|
||||
method: patch
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
display_name: myvm2
|
||||
status: 200
|
||||
response_json_paths:
|
||||
display_name: myvm2
|
||||
|
||||
- name: post some measures to the metric on myresource
|
||||
url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: POST
|
||||
|
@ -86,9 +607,9 @@ tests:
|
|||
value: 2
|
||||
status: 202
|
||||
|
||||
- name: get instance measures with poll
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/metric/vcpus/measures
|
||||
# wait up to 60 seconds
|
||||
- name: get myresource measures with poll
|
||||
url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures
|
||||
# wait up to 60 seconds before policy is deleted
|
||||
poll:
|
||||
count: 60
|
||||
delay: 1
|
||||
|
@ -96,7 +617,81 @@ tests:
|
|||
$[0][2]: 2
|
||||
$[1][2]: 2
|
||||
|
||||
- name: delete the instance resource
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
- name: post some more measures to the metric on myresource
|
||||
POST: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:34:15"
|
||||
value: 5
|
||||
- timestamp: "2015-03-06T14:34:20"
|
||||
value: 5
|
||||
status: 202
|
||||
|
||||
- name: get myresource measures with refresh
|
||||
GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures?refresh=true
|
||||
response_json_paths:
|
||||
$[0][2]: 2
|
||||
$[1][2]: 4
|
||||
$[2][2]: 2
|
||||
$[3][2]: 2
|
||||
$[4][2]: 5
|
||||
$[5][2]: 5
|
||||
|
||||
#
|
||||
# Search for resources
|
||||
#
|
||||
|
||||
- name: typo of search
|
||||
url: /v1/search/notexists
|
||||
status: 404
|
||||
|
||||
- name: typo of search in resource
|
||||
url: /v1/search/resource/foobar
|
||||
status: 404
|
||||
|
||||
- name: search with invalid uuid
|
||||
url: /v1/search/resource/generic
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
=:
|
||||
id: "cd9eef"
|
||||
|
||||
|
||||
- name: delete myresource resource
|
||||
url: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e
|
||||
method: DELETE
|
||||
status: 204
|
||||
|
||||
# assert resource is really deleted
|
||||
- name: assert resource resource is deleted
|
||||
url: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e
|
||||
method: GET
|
||||
status: 404
|
||||
|
||||
- name: post myresource resource no data
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
status: 400
|
||||
|
||||
- name: delete single archive policy cleanup
|
||||
url: /v1/archive_policy/gabbilive
|
||||
method: DELETE
|
||||
poll:
|
||||
count: 360
|
||||
delay: 1
|
||||
status: 204
|
||||
|
||||
# It really is gone
|
||||
|
||||
- name: delete our resource type
|
||||
DELETE: /v1/resource_type/myresource
|
||||
status: 204
|
||||
|
||||
- name: confirm delete of cleanup
|
||||
url: /v1/archive_policy/gabbilive
|
||||
status: 404
|
||||
|
|
|
@ -4,8 +4,7 @@ fixtures:
|
|||
tests:
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -19,19 +18,17 @@ tests:
|
|||
# Aggregation by metric ids
|
||||
|
||||
- name: create metric 1
|
||||
url: /v1/metric
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: low
|
||||
status: 201
|
||||
|
||||
- name: create metric 2
|
||||
url: /v1/metric
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: low
|
||||
status: 201
|
||||
|
@ -40,10 +37,9 @@ tests:
|
|||
url: /v1/metric
|
||||
|
||||
- name: push measurements to metric 1
|
||||
url: /v1/metric/$RESPONSE['$[0].id']/measures
|
||||
POST: /v1/metric/$RESPONSE['$[0].id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 43.1
|
||||
|
@ -52,13 +48,12 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: get metric list to push metric 2
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
|
||||
- name: push measurements to metric 2
|
||||
url: /v1/metric/$RESPONSE['$[1].id']/measures
|
||||
POST: /v1/metric/$RESPONSE['$[1].id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 3.1
|
||||
|
@ -67,17 +62,27 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: get metric list to get aggregates
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
|
||||
- name: get measure aggregates by granularity not float
|
||||
url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar
|
||||
GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar
|
||||
status: 400
|
||||
|
||||
- name: get metric list to get aggregates for get with refresh
|
||||
GET: /v1/metric
|
||||
|
||||
- name: get measure aggregates by granularity with refresh
|
||||
GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&refresh=true
|
||||
response_json_paths:
|
||||
$:
|
||||
- ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
|
||||
- ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
|
||||
|
||||
- name: get metric list to get aggregates 2
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
|
||||
- name: get measure aggregates by granularity
|
||||
url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1
|
||||
GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1
|
||||
poll:
|
||||
count: 10
|
||||
delay: 1
|
||||
|
@ -87,10 +92,10 @@ tests:
|
|||
- ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
|
||||
|
||||
- name: get metric list to push metric 3
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
|
||||
- name: get measure aggregates by granularity with timestamps
|
||||
url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00
|
||||
GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00
|
||||
poll:
|
||||
count: 10
|
||||
delay: 1
|
||||
|
@ -99,11 +104,24 @@ tests:
|
|||
- ['2015-03-06T14:30:00+00:00', 300.0, 15.05]
|
||||
- ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
|
||||
|
||||
- name: get metric list to push metric 4
|
||||
GET: /v1/metric
|
||||
|
||||
- name: get measure aggregates and reaggregate
|
||||
GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&reaggregation=min
|
||||
poll:
|
||||
count: 10
|
||||
delay: 1
|
||||
response_json_paths:
|
||||
$:
|
||||
- ['2015-03-06T14:30:00+00:00', 300.0, 2.55]
|
||||
- ['2015-03-06T14:33:57+00:00', 1.0, 3.1]
|
||||
- ['2015-03-06T14:34:12+00:00', 1.0, 2.0]
|
||||
|
||||
# Aggregation by resource and metric_name
|
||||
|
||||
- name: post a resource
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -116,8 +134,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post another resource
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -130,12 +147,11 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: push measurements to resource 1
|
||||
url: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures
|
||||
POST: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 43.1
|
||||
|
@ -144,12 +160,11 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: push measurements to resource 2
|
||||
url: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures
|
||||
POST: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 3.1
|
||||
|
@ -157,9 +172,19 @@ tests:
|
|||
value: 2
|
||||
status: 202
|
||||
|
||||
- name: get measure aggregates by granularity from resources with refresh
|
||||
POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&refresh=true
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$:
|
||||
- ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
|
||||
- ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
|
||||
|
||||
- name: get measure aggregates by granularity from resources
|
||||
method: POST
|
||||
url: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1
|
||||
POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -173,8 +198,7 @@ tests:
|
|||
- ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
|
||||
|
||||
- name: get measure aggregates by granularity with timestamps from resources
|
||||
method: POST
|
||||
url: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00
|
||||
POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -187,15 +211,28 @@ tests:
|
|||
- ['2015-03-06T14:30:00+00:00', 300.0, 15.05]
|
||||
- ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
|
||||
|
||||
- name: get measure aggregates by granularity from resources and reaggregate
|
||||
POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregate=min
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
poll:
|
||||
count: 10
|
||||
delay: 1
|
||||
response_json_paths:
|
||||
$:
|
||||
- ['2015-03-06T14:33:57+00:00', 1.0, 23.1]
|
||||
- ['2015-03-06T14:34:12+00:00', 1.0, 7.0]
|
||||
|
||||
# Some negative tests
|
||||
|
||||
- name: get measure aggregates with wrong GET
|
||||
url: /v1/aggregation/resource/generic/metric/agg_meter
|
||||
GET: /v1/aggregation/resource/generic/metric/agg_meter
|
||||
status: 405
|
||||
|
||||
- name: get measure aggregates with wrong metric_name
|
||||
method: POST
|
||||
url: /v1/aggregation/resource/generic/metric/notexists
|
||||
POST: /v1/aggregation/resource/generic/metric/notexists
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -205,8 +242,7 @@ tests:
|
|||
$.`len`: 0
|
||||
|
||||
- name: get measure aggregates with wrong resource
|
||||
method: POST
|
||||
url: /v1/aggregation/resource/notexits/metric/agg_meter
|
||||
POST: /v1/aggregation/resource/notexits/metric/agg_meter
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -216,8 +252,7 @@ tests:
|
|||
- Resource type notexits does not exist
|
||||
|
||||
- name: get measure aggregates with wrong path
|
||||
method: POST
|
||||
url: /v1/aggregation/re/generic/metric/agg_meter
|
||||
POST: /v1/aggregation/re/generic/metric/agg_meter
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -225,8 +260,7 @@ tests:
|
|||
status: 404
|
||||
|
||||
- name: get measure aggregates with wrong path 2
|
||||
method: POST
|
||||
url: /v1/aggregation/resource/generic/notexists/agg_meter
|
||||
POST: /v1/aggregation/resource/generic/notexists/agg_meter
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -234,8 +268,7 @@ tests:
|
|||
status: 404
|
||||
|
||||
- name: get measure aggregates with no resource name
|
||||
method: POST
|
||||
url: /v1/aggregation/resource/generic/metric
|
||||
POST: /v1/aggregation/resource/generic/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
|
|
@ -10,8 +10,7 @@ tests:
|
|||
|
||||
# create dependent policy
|
||||
- name: create archive policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -19,14 +18,14 @@ tests:
|
|||
name: low
|
||||
definition:
|
||||
- granularity: 1 hour
|
||||
status: 201
|
||||
response_headers:
|
||||
location: $SCHEME://$NETLOC/v1/archive_policy/low
|
||||
status: 201
|
||||
|
||||
# Attempt to create an archive policy rule
|
||||
|
||||
- name: create archive policy rule1
|
||||
url: /v1/archive_policy_rule
|
||||
POST: /v1/archive_policy_rule
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
|
@ -42,7 +41,7 @@ tests:
|
|||
$.name: test_rule1
|
||||
|
||||
- name: create archive policy rule 2
|
||||
url: /v1/archive_policy_rule
|
||||
POST: /v1/archive_policy_rule
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
|
@ -58,8 +57,7 @@ tests:
|
|||
$.name: test_rule2
|
||||
|
||||
- name: create archive policy rule 3
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -77,8 +75,7 @@ tests:
|
|||
# Attempt to create an invalid policy rule
|
||||
|
||||
- name: create invalid archive policy rule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -88,8 +85,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: missing auth archive policy rule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -99,8 +95,7 @@ tests:
|
|||
status: 403
|
||||
|
||||
- name: wrong content type
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: text/plain
|
||||
x-roles: admin
|
||||
|
@ -109,8 +104,7 @@ tests:
|
|||
- Unsupported Media Type
|
||||
|
||||
- name: wrong auth create rule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: foo
|
||||
|
@ -121,8 +115,7 @@ tests:
|
|||
status: 403
|
||||
|
||||
- name: missing auth createrule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -132,8 +125,7 @@ tests:
|
|||
status: 403
|
||||
|
||||
- name: bad request body
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -146,7 +138,7 @@ tests:
|
|||
# get an archive policy rules
|
||||
|
||||
- name: get archive policy rule
|
||||
url: /v1/archive_policy_rule
|
||||
GET: /v1/archive_policy_rule
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.[0].metric_pattern: disk.foo.*
|
||||
|
@ -154,36 +146,38 @@ tests:
|
|||
$.[2].metric_pattern: "*"
|
||||
|
||||
- name: get unknown archive policy rule
|
||||
url: /v1/archive_policy_rule/foo
|
||||
GET: /v1/archive_policy_rule/foo
|
||||
status: 404
|
||||
|
||||
- name: delete used archive policy
|
||||
DELETE: /v1/archive_policy/low
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 400
|
||||
|
||||
# delete rule as non admin
|
||||
|
||||
- name: delete archive policy rule non admin
|
||||
url: /v1/archive_policy_rule/test_rule1
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy_rule/test_rule1
|
||||
status: 403
|
||||
|
||||
# delete rule
|
||||
|
||||
- name: delete archive policy rule1
|
||||
url: /v1/archive_policy_rule/test_rule1
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy_rule/test_rule1
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
||||
- name: delete archive policy rule2
|
||||
url: /v1/archive_policy_rule/test_rule2
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy_rule/test_rule2
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
||||
|
||||
- name: delete archive policy rule3
|
||||
url: /v1/archive_policy_rule/test_rule3
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy_rule/test_rule3
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
@ -191,8 +185,15 @@ tests:
|
|||
# delete again
|
||||
|
||||
- name: confirm delete archive policy rule
|
||||
url: /v1/archive_policy_rule/test_rule1
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy_rule/test_rule1
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 404
|
||||
|
||||
- name: delete missing archive policy rule utf8
|
||||
DELETE: /v1/archive_policy_rule/%E2%9C%94%C3%A9%C3%B1%E2%98%83
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 404
|
||||
response_strings:
|
||||
- Archive policy rule ✔éñ☃ does not exist
|
|
@ -25,20 +25,20 @@ tests:
|
|||
# Do we care?
|
||||
|
||||
- name: empty archive policy list
|
||||
url: /v1/archive_policy
|
||||
GET: /v1/archive_policy
|
||||
response_headers:
|
||||
content-type: /application/json/
|
||||
response_strings:
|
||||
- "[]"
|
||||
|
||||
- name: empty list text
|
||||
url: /v1/archive_policy
|
||||
GET: /v1/archive_policy
|
||||
request_headers:
|
||||
accept: text/plain
|
||||
status: 406
|
||||
|
||||
- name: empty list html
|
||||
url: /v1/archive_policy
|
||||
GET: /v1/archive_policy
|
||||
request_headers:
|
||||
accept: text/html
|
||||
status: 406
|
||||
|
@ -46,8 +46,7 @@ tests:
|
|||
# Fail to create an archive policy for various reasons.
|
||||
|
||||
- name: wrong content type
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: text/plain
|
||||
x-roles: admin
|
||||
|
@ -56,16 +55,14 @@ tests:
|
|||
- Unsupported Media Type
|
||||
|
||||
- name: wrong method
|
||||
url: /v1/archive_policy
|
||||
method: PUT
|
||||
PUT: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
status: 405
|
||||
|
||||
- name: wrong authZ
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: clancy
|
||||
|
@ -76,8 +73,7 @@ tests:
|
|||
status: 403
|
||||
|
||||
- name: missing authZ
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -87,8 +83,7 @@ tests:
|
|||
status: 403
|
||||
|
||||
- name: bad request body
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -99,8 +94,7 @@ tests:
|
|||
- "Invalid input: extra keys not allowed"
|
||||
|
||||
- name: missing definition
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -111,8 +105,7 @@ tests:
|
|||
- "Invalid input: required key not provided"
|
||||
|
||||
- name: empty definition
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -124,8 +117,7 @@ tests:
|
|||
- "Invalid input: length of value must be at least 1"
|
||||
|
||||
- name: wrong value definition
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -137,8 +129,7 @@ tests:
|
|||
- "Invalid input: expected a list"
|
||||
|
||||
- name: useless definition
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -153,8 +144,7 @@ tests:
|
|||
# Create a valid archive policy.
|
||||
|
||||
- name: create archive policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -171,7 +161,7 @@ tests:
|
|||
# Retrieve it correctly and then poorly
|
||||
|
||||
- name: get archive policy
|
||||
url: $LOCATION
|
||||
GET: $LOCATION
|
||||
response_headers:
|
||||
content-type: /application/json/
|
||||
response_json_paths:
|
||||
|
@ -184,28 +174,96 @@ tests:
|
|||
$.definition[1].timespan: null
|
||||
|
||||
- name: get wrong accept
|
||||
url: /v1/archive_policy/medium
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
accept: text/plain
|
||||
status: 406
|
||||
|
||||
# Update archive policy
|
||||
|
||||
- name: patch archive policy with bad definition
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
data:
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
points: 50
|
||||
timespan: 1 hour
|
||||
- granularity: 2 second
|
||||
status: 400
|
||||
response_strings:
|
||||
- timespan ≠ granularity × points
|
||||
|
||||
- name: patch archive policy with missing granularity
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
data:
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
points: 50
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Archive policy medium does not support change: Cannot add or drop granularities"
|
||||
|
||||
- name: patch archive policy with non-matching granularity
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
data:
|
||||
definition:
|
||||
- granularity: 5 second
|
||||
points: 20
|
||||
- granularity: 2 second
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Archive policy medium does not support change: 1.0 granularity interval was changed"
|
||||
|
||||
- name: patch archive policy
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
data:
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
points: 50
|
||||
- granularity: 2 second
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.name: medium
|
||||
$.definition[0].granularity: "0:00:01"
|
||||
$.definition[0].points: 50
|
||||
$.definition[0].timespan: "0:00:50"
|
||||
|
||||
- name: get patched archive policy
|
||||
GET: $LAST_URL
|
||||
response_headers:
|
||||
content-type: /application/json/
|
||||
response_json_paths:
|
||||
$.name: medium
|
||||
$.definition[0].granularity: "0:00:01"
|
||||
$.definition[0].points: 50
|
||||
$.definition[0].timespan: "0:00:50"
|
||||
|
||||
# Unexpected methods
|
||||
|
||||
- name: post single archive
|
||||
url: /v1/archive_policy/medium
|
||||
method: POST
|
||||
POST: $LAST_URL
|
||||
status: 405
|
||||
|
||||
- name: put single archive
|
||||
url: /v1/archive_policy/medium
|
||||
method: PUT
|
||||
PUT: $LAST_URL
|
||||
status: 405
|
||||
|
||||
# Create another one and then test duplication
|
||||
|
||||
- name: create second policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -218,8 +276,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create duplicate policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -234,8 +291,7 @@ tests:
|
|||
# Create a unicode named policy
|
||||
|
||||
- name: post unicode policy name
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -251,13 +307,12 @@ tests:
|
|||
name: ✔éñ☃
|
||||
|
||||
- name: retrieve unicode policy name
|
||||
url: $LOCATION
|
||||
GET: $LOCATION
|
||||
response_json_paths:
|
||||
name: ✔éñ☃
|
||||
|
||||
- name: post small unicode policy name
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -273,31 +328,31 @@ tests:
|
|||
name: æ
|
||||
|
||||
- name: retrieve small unicode policy name
|
||||
url: $LOCATION
|
||||
GET: $LOCATION
|
||||
response_json_paths:
|
||||
name: æ
|
||||
|
||||
# List the collection
|
||||
|
||||
- name: get archive policy list
|
||||
url: /v1/archive_policy
|
||||
# TODO(chdent): We do not know the order, should we?
|
||||
GET: /v1/archive_policy
|
||||
response_strings:
|
||||
- '"name": "medium"'
|
||||
- '"name": "large"'
|
||||
response_json_paths:
|
||||
$[?name = "large"].definition[?granularity = "1:00:00"].points: null
|
||||
$[?name = "medium"].definition[?granularity = "0:00:02"].points: null
|
||||
|
||||
# Delete one as non-admin
|
||||
|
||||
- name: delete single archive non admin
|
||||
url: /v1/archive_policy/medium
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy/medium
|
||||
status: 403
|
||||
|
||||
# Delete one
|
||||
|
||||
- name: delete single archive
|
||||
url: /v1/archive_policy/medium
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy/medium
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
@ -305,25 +360,37 @@ tests:
|
|||
# It really is gone
|
||||
|
||||
- name: confirm delete
|
||||
url: /v1/archive_policy/medium
|
||||
GET: $LAST_URL
|
||||
status: 404
|
||||
|
||||
# Fail to delete one that does not exist
|
||||
|
||||
- name: delete missing archive
|
||||
url: /v1/archive_policy/grandiose
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy/grandiose
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 404
|
||||
response_strings:
|
||||
- Archive policy grandiose does not exist
|
||||
|
||||
- name: delete archive utf8
|
||||
DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
||||
- name: delete missing archive utf8 again
|
||||
DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 404
|
||||
response_strings:
|
||||
- Archive policy ✔éñ☃ does not exist
|
||||
|
||||
# Add metric using the policy and then be unable to delete policy
|
||||
|
||||
- name: create metric
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
|
||||
|
@ -333,8 +400,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: delete in use policy
|
||||
url: /v1/archive_policy/large
|
||||
method: DELETE
|
||||
DELETE: /v1/archive_policy/large
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 400
|
||||
|
@ -344,8 +410,7 @@ tests:
|
|||
# Attempt to create illogical policies
|
||||
|
||||
- name: create illogical policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -360,8 +425,7 @@ tests:
|
|||
- timespan ≠ granularity × points
|
||||
|
||||
- name: create invalid points policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -375,8 +439,7 @@ tests:
|
|||
- "Invalid input: not a valid value for dictionary value"
|
||||
|
||||
- name: create invalid granularity policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -390,8 +453,7 @@ tests:
|
|||
- "Invalid input: not a valid value for dictionary value"
|
||||
|
||||
- name: create identical granularities policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -407,8 +469,7 @@ tests:
|
|||
- "More than one archive policy uses granularity `1.0'"
|
||||
|
||||
- name: policy invalid unit
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -422,8 +483,7 @@ tests:
|
|||
# Non admin user attempt
|
||||
|
||||
- name: fail to create policy non-admin
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
x-user-id: b45187c5-150b-4730-bcb2-b5e04e234220
|
||||
x-project-id: 16764ee0-bffe-4843-aa36-04b002cdbc7c
|
||||
|
@ -439,8 +499,7 @@ tests:
|
|||
# Back windows
|
||||
|
||||
- name: policy with back window
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -457,8 +516,7 @@ tests:
|
|||
|
||||
- name: policy no back window
|
||||
desc: and default seconds on int granularity
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -477,8 +535,7 @@ tests:
|
|||
# Timespan, points, granularity input tests
|
||||
|
||||
- name: policy float granularity
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -494,8 +551,7 @@ tests:
|
|||
$.definition[0].timespan: "1:06:40"
|
||||
|
||||
- name: policy float timespan
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
|
|
@ -8,8 +8,7 @@ fixtures:
|
|||
tests:
|
||||
|
||||
- name: create archive policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -20,8 +19,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: make a generic resource
|
||||
url: /v1/resource/generic
|
||||
method: POST
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147
|
||||
x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d
|
||||
|
@ -35,17 +33,16 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: confirm no metrics yet
|
||||
GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
|
||||
request_headers:
|
||||
x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147
|
||||
x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d
|
||||
content-type: application/json
|
||||
url: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
|
||||
response_json_paths:
|
||||
$: []
|
||||
|
||||
- name: post some measures
|
||||
url: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
|
||||
method: post
|
||||
POST: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
|
||||
request_headers:
|
||||
x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147
|
||||
x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d
|
||||
|
@ -61,7 +58,7 @@ tests:
|
|||
# aggregated.
|
||||
|
||||
- name: get some measures
|
||||
url: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
|
||||
GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures
|
||||
poll:
|
||||
count: 50
|
||||
delay: .1
|
||||
|
|
|
@ -5,7 +5,7 @@ tests:
|
|||
|
||||
- name: get information on APIs
|
||||
desc: Root URL must return information about API versions
|
||||
url: /
|
||||
GET: /
|
||||
response_headers:
|
||||
content-type: application/json; charset=UTF-8
|
||||
response_json_paths:
|
||||
|
@ -13,8 +13,7 @@ tests:
|
|||
$.versions.[0].status: "CURRENT"
|
||||
|
||||
- name: archive policy post success
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -35,8 +34,7 @@ tests:
|
|||
|
||||
- name: post archive policy no auth
|
||||
desc: this confirms that auth handling comes before data validation
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -46,13 +44,12 @@ tests:
|
|||
status: 403
|
||||
|
||||
- name: post metric with archive policy
|
||||
url: /v1/metric
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
|
||||
x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
|
||||
method: POST
|
||||
data:
|
||||
archive_policy_name: test1
|
||||
status: 201
|
||||
|
@ -62,7 +59,7 @@ tests:
|
|||
$.archive_policy_name: test1
|
||||
|
||||
- name: retrieve metric info
|
||||
url: $LOCATION
|
||||
GET: $LOCATION
|
||||
status: 200
|
||||
request_headers:
|
||||
content_type: /application\/json/
|
||||
|
@ -73,15 +70,14 @@ tests:
|
|||
$.created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f
|
||||
|
||||
- name: list the one metric
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$[0].archive_policy.name: test1
|
||||
|
||||
- name: post a single measure
|
||||
desc: post one measure
|
||||
url: /v1/metric/$RESPONSE['$[0].id']/measures
|
||||
method: POST
|
||||
POST: /v1/metric/$RESPONSE['$[0].id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
|
||||
|
@ -93,7 +89,7 @@ tests:
|
|||
|
||||
- name: Get list of resource type and URL
|
||||
desc: Resources index page should return list of type associated with a URL
|
||||
url: /v1/resource/
|
||||
GET: /v1/resource/
|
||||
response_headers:
|
||||
content-type: application/json; charset=UTF-8
|
||||
status: 200
|
||||
|
@ -101,8 +97,7 @@ tests:
|
|||
$.generic: $SCHEME://$NETLOC/v1/resource/generic
|
||||
|
||||
- name: post generic resource
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
|
||||
|
@ -122,8 +117,7 @@ tests:
|
|||
created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f
|
||||
|
||||
- name: post generic resource bad id
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
|
||||
|
@ -145,5 +139,25 @@ tests:
|
|||
original_resource_id: 1.2.3.4
|
||||
|
||||
- name: get status denied
|
||||
url: /v1/status
|
||||
GET: /v1/status
|
||||
status: 403
|
||||
|
||||
- name: get status
|
||||
GET: /v1/status
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
|
||||
x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
|
||||
x-roles: admin
|
||||
response_json_paths:
|
||||
$.storage.`len`: 2
|
||||
|
||||
- name: get status, no details
|
||||
GET: /v1/status?details=False
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee
|
||||
x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f
|
||||
x-roles: admin
|
||||
response_json_paths:
|
||||
$.storage.`len`: 1
|
||||
|
|
|
@ -4,8 +4,7 @@ fixtures:
|
|||
tests:
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -16,19 +15,17 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create metric
|
||||
url: /v1/metric
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: simple
|
||||
status: 201
|
||||
|
||||
- name: push measurements to metric
|
||||
url: /v1/batch/metrics/measures
|
||||
POST: /v1/batch/metrics/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
$RESPONSE['$.id']:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
|
@ -38,10 +35,9 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: push measurements to unknown metrics
|
||||
url: /v1/batch/metrics/measures
|
||||
POST: /v1/batch/metrics/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5C:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
|
@ -58,10 +54,9 @@ tests:
|
|||
- "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5c, 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d"
|
||||
|
||||
- name: push measurements to unknown named metrics
|
||||
url: /v1/batch/resources/metrics/measures
|
||||
POST: /v1/batch/resources/metrics/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D:
|
||||
cpu_util:
|
||||
|
@ -80,17 +75,15 @@ tests:
|
|||
- "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d/cpu_util, 46c9418d-d63b-4cdd-be89-8f57ffc5952e/disk.iops"
|
||||
|
||||
- name: create second metric
|
||||
url: /v1/metric
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: simple
|
||||
status: 201
|
||||
|
||||
- name: post a resource
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -105,8 +98,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post a second resource
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -121,13 +113,12 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: list metrics
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
|
||||
- name: push measurements to two metrics
|
||||
url: /v1/batch/metrics/measures
|
||||
POST: /v1/batch/metrics/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
$RESPONSE['$[0].id']:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
|
@ -142,10 +133,9 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: push measurements to two named metrics
|
||||
url: /v1/batch/resources/metrics/measures
|
||||
POST: /v1/batch/resources/metrics/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
46c9418d-d63b-4cdd-be89-8f57ffc5952e:
|
||||
disk.iops:
|
||||
|
@ -169,5 +159,4 @@ tests:
|
|||
value: 43.1
|
||||
- timestamp: "2015-03-06T14:34:12"
|
||||
value: 12
|
||||
|
||||
status: 202
|
|
@ -7,8 +7,7 @@ fixtures:
|
|||
|
||||
tests:
|
||||
- name: create archive policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -16,15 +15,14 @@ tests:
|
|||
name: low
|
||||
definition:
|
||||
- granularity: 1 hour
|
||||
status: 201
|
||||
response_headers:
|
||||
location: $SCHEME://$NETLOC/v1/archive_policy/low
|
||||
status: 201
|
||||
|
||||
# Try creating a new generic resource
|
||||
|
||||
- name: post generic resource
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -45,8 +43,7 @@ tests:
|
|||
|
||||
# Update it twice
|
||||
- name: patch resource user_id
|
||||
url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
|
||||
method: patch
|
||||
PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -59,7 +56,7 @@ tests:
|
|||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
||||
- name: patch resource project_id
|
||||
url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
|
||||
PATCH: $LAST_URL
|
||||
method: patch
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
|
@ -78,7 +75,7 @@ tests:
|
|||
# List resources
|
||||
|
||||
- name: list all resources without history
|
||||
url: /v1/resource/generic
|
||||
GET: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -87,7 +84,7 @@ tests:
|
|||
$[0].project_id: fe20a931-1012-4cc6-addc-39556ec60907
|
||||
|
||||
- name: list all resources with history
|
||||
url: /v1/resource/generic
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
accept: application/json; details=True; history=True
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
|
@ -105,8 +102,7 @@ tests:
|
|||
$[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907
|
||||
|
||||
- name: patch resource metrics
|
||||
url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
|
||||
method: patch
|
||||
PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -118,7 +114,7 @@ tests:
|
|||
status: 200
|
||||
|
||||
- name: list all resources with history no change after metrics update
|
||||
url: /v1/resource/generic
|
||||
GET: /v1/resource/generic
|
||||
request_headers:
|
||||
accept: application/json; details=True; history=True
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
|
@ -136,8 +132,7 @@ tests:
|
|||
$[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907
|
||||
|
||||
- name: create new metrics
|
||||
url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric
|
||||
method: post
|
||||
POST: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -148,7 +143,7 @@ tests:
|
|||
status: 204
|
||||
|
||||
- name: list all resources with history no change after metrics creation
|
||||
url: /v1/resource/generic
|
||||
GET: /v1/resource/generic
|
||||
request_headers:
|
||||
accept: application/json; details=True; history=True
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
|
|
|
@ -4,8 +4,7 @@ fixtures:
|
|||
tests:
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -16,8 +15,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create valid metric
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -25,10 +23,9 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: push measurements to metric
|
||||
url: /v1/metric/$RESPONSE['$.id']/measures
|
||||
POST: /v1/metric/$RESPONSE['$.id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 43.1
|
||||
|
@ -37,21 +34,21 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: get metric list for invalid granularity
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
|
||||
- name: get measurements invalid granularity
|
||||
url: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42
|
||||
GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42
|
||||
status: 404
|
||||
response_strings:
|
||||
- Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist
|
||||
|
||||
- name: get metric list for granularity
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
|
||||
- name: get measurements granularity
|
||||
url: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=1
|
||||
GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=1
|
||||
status: 200
|
||||
poll:
|
||||
count: 50
|
||||
|
@ -59,4 +56,4 @@ tests:
|
|||
response_json_paths:
|
||||
$:
|
||||
- ["2015-03-06T14:33:57+00:00", 1.0, 43.1]
|
||||
- ["2015-03-06T14:34:12+00:00", 1.0, 12.0]
|
||||
- ["2015-03-06T14:34:12+00:00", 1.0, 12.0]
|
|
@ -4,13 +4,12 @@ fixtures:
|
|||
tests:
|
||||
- name: wrong metric
|
||||
desc: https://bugs.launchpad.net/gnocchi/+bug/1429949
|
||||
url: /v1/metric/foobar
|
||||
GET: /v1/metric/foobar
|
||||
status: 404
|
||||
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -21,8 +20,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create archive policy rule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -33,8 +31,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create alt archive policy
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -46,8 +43,7 @@ tests:
|
|||
|
||||
- name: create alt archive policy rule
|
||||
desc: extra rule that won't be matched
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -58,28 +54,47 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: get metric empty
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
response_strings:
|
||||
- "[]"
|
||||
|
||||
- name: create metric with name
|
||||
url: /v1/metric
|
||||
- name: get metric list with nonexistent sort key
|
||||
GET: /v1/metric?sort=nonexistent_key:asc
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Sort key supplied is invalid: nonexistent_key"
|
||||
|
||||
- name: create metric with name and unit
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "disk.io.rate"
|
||||
unit: "B/s"
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.archive_policy_name: cookies
|
||||
$.name: disk.io.rate
|
||||
$.unit: B/s
|
||||
|
||||
- name: create metric with name no rule
|
||||
url: /v1/metric
|
||||
- name: create metric with name and over length unit
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
name: "disk.io.rate"
|
||||
unit: "over_length_unit_over_length_unit"
|
||||
status: 400
|
||||
response_strings:
|
||||
# split to not match the u' in py2
|
||||
- "Invalid input: length of value must be at most 31 for dictionary value @ data["
|
||||
- "'unit']"
|
||||
|
||||
- name: create metric with name no rule
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "volume.io.rate"
|
||||
status: 400
|
||||
|
@ -87,8 +102,7 @@ tests:
|
|||
- No archive policy name specified and no archive policy rule found matching the metric name volume.io.rate
|
||||
|
||||
- name: create metric bad archive policy
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -98,8 +112,7 @@ tests:
|
|||
- Archive policy bad-cookie does not exist
|
||||
|
||||
- name: create metric bad content-type
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: plain/text
|
||||
data:
|
||||
|
@ -107,8 +120,7 @@ tests:
|
|||
status: 415
|
||||
|
||||
- name: create valid metric
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -118,16 +130,30 @@ tests:
|
|||
$.archive_policy_name: cookies
|
||||
|
||||
- name: get valid metric id
|
||||
url: /v1/metric/$RESPONSE['$.id']
|
||||
GET: /v1/metric/$RESPONSE['$.id']
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.archive_policy.name: cookies
|
||||
|
||||
- name: push measurements to metric
|
||||
url: /v1/metric/$RESPONSE['$.id']/measures
|
||||
- name: push measurements to metric before epoch
|
||||
POST: /v1/metric/$RESPONSE['$.id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
- timestamp: "1915-03-06T14:33:57"
|
||||
value: 43.1
|
||||
status: 400
|
||||
response_strings:
|
||||
- Timestamp must be after Epoch
|
||||
|
||||
- name: get valid metric id again
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
|
||||
- name: push measurements to metric
|
||||
POST: /v1/metric/$RESPONSE['$[0].id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 43.1
|
||||
|
@ -136,8 +162,7 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: create valid metric two
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -147,10 +172,9 @@ tests:
|
|||
$.archive_policy_name: cookies
|
||||
|
||||
- name: push invalid measurements to metric
|
||||
url: /v1/metric/$RESPONSE['$.id']/measures
|
||||
POST: /v1/metric/$RESPONSE['$.id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 12
|
||||
|
@ -159,8 +183,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: create valid metric three
|
||||
url: /v1/metric
|
||||
method: POST
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -170,77 +193,83 @@ tests:
|
|||
$.archive_policy_name: cookies
|
||||
|
||||
- name: push invalid measurements to metric bis
|
||||
url: /v1/metric/$RESPONSE['$.id']/measures
|
||||
POST: /v1/metric/$RESPONSE['$.id']/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data: 1
|
||||
status: 400
|
||||
|
||||
- name: add measure unknown metric
|
||||
url: /v1/metric/fake/measures
|
||||
POST: /v1/metric/fake/measures
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 43.1
|
||||
status: 404
|
||||
|
||||
- name: get metric list for authenticated user
|
||||
request_headers:
|
||||
x-user-id: foo
|
||||
x-project-id: bar
|
||||
GET: /v1/metric
|
||||
|
||||
- name: get metric list
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$[0].archive_policy.name: cookies
|
||||
|
||||
- name: get measurements from metric
|
||||
url: /v1/metric/$RESPONSE['$[0].id']/measures
|
||||
GET: /v1/metric/$RESPONSE['$[0].id']/measures
|
||||
status: 200
|
||||
|
||||
- name: get metric list for start test
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$[0].archive_policy.name: cookies
|
||||
|
||||
- name: get measurements by start
|
||||
url: /v1/metric/$RESPONSE['$[0].id']/measures?start=2015-03-06T14:33:57
|
||||
GET: /v1/metric/$RESPONSE['$[0].id']/measures?start=2015-03-06T14:33:57
|
||||
status: 200
|
||||
|
||||
- name: get measures unknown metric
|
||||
url: /v1/metric/fake/measures
|
||||
GET: /v1/metric/fake/measures
|
||||
status: 404
|
||||
|
||||
- name: get metric list for aggregates
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$[0].archive_policy.name: cookies
|
||||
|
||||
- name: get measure unknown aggregates
|
||||
url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&aggregation=last
|
||||
GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&aggregation=last
|
||||
status: 404
|
||||
response_strings:
|
||||
- Aggregation method 'last' for metric $RESPONSE['$[0].id'] does not exist
|
||||
|
||||
- name: aggregate measure unknown metric
|
||||
url: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37
|
||||
GET: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37
|
||||
status: 404
|
||||
response_strings:
|
||||
- Metric cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 does not exist
|
||||
|
||||
- name: get metric list for delete
|
||||
url: /v1/metric
|
||||
GET: /v1/metric
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$[0].archive_policy.name: cookies
|
||||
|
||||
- name: delete metric
|
||||
url: /v1/metric/$RESPONSE['$[0].id']
|
||||
method: DELETE
|
||||
DELETE: /v1/metric/$RESPONSE['$[0].id']
|
||||
status: 204
|
||||
|
||||
- name: delete non existent metric
|
||||
url: /v1/metric/foo
|
||||
method: DELETE
|
||||
- name: delete metric again
|
||||
DELETE: $LAST_URL
|
||||
status: 404
|
||||
|
||||
- name: delete non existent metric
|
||||
DELETE: /v1/metric/foo
|
||||
status: 404
|
||||
|
|
|
@ -11,8 +11,7 @@ tests:
|
|||
# Creation resources for this scenarion
|
||||
#
|
||||
- name: post resource 1
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -25,8 +24,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post resource 2
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -39,8 +37,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post resource 3
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -53,8 +50,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post resource 4
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -67,8 +63,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post resource 5
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -84,8 +79,7 @@ tests:
|
|||
# Basic resource limit/ordering tests
|
||||
#
|
||||
- name: list first two items default order
|
||||
url: /v1/resource/generic?limit=2
|
||||
method: get
|
||||
GET: /v1/resource/generic?limit=2
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -96,8 +90,7 @@ tests:
|
|||
$[1].id: 4facbf7e-a900-406d-a828-82393f7006b3
|
||||
|
||||
- name: list next third items default order
|
||||
url: /v1/resource/generic?limit=4&marker=4facbf7e-a900-406d-a828-82393f7006b3
|
||||
method: get
|
||||
GET: /v1/resource/generic?limit=4&marker=4facbf7e-a900-406d-a828-82393f7006b3
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -109,8 +102,7 @@ tests:
|
|||
$[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
|
||||
|
||||
- name: list first two items order by id witouth direction
|
||||
url: /v1/resource/generic?limit=2&sort=id
|
||||
method: get
|
||||
GET: /v1/resource/generic?limit=2&sort=id
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -122,8 +114,7 @@ tests:
|
|||
$[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a
|
||||
|
||||
- name: list first two items order by id
|
||||
url: /v1/resource/generic?limit=2&sort=id:asc
|
||||
method: get
|
||||
GET: /v1/resource/generic?limit=2&sort=id:asc
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -134,8 +125,7 @@ tests:
|
|||
$[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a
|
||||
|
||||
- name: list next third items order by id
|
||||
url: /v1/resource/generic?limit=4&sort=id:asc&marker=28593168-52bb-43b5-a6db-fc2343aac02a
|
||||
method: get
|
||||
GET: /v1/resource/generic?limit=4&sort=id:asc&marker=28593168-52bb-43b5-a6db-fc2343aac02a
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -147,8 +137,7 @@ tests:
|
|||
$[2].id: 57a9e836-87b8-4a21-9e30-18a474b98fef
|
||||
|
||||
- name: search for some resources with limit, order and marker
|
||||
url: /v1/search/resource/generic?limit=2&sort=id:asc&marker=36775172-ebc9-4060-9870-a649361bc3ab
|
||||
method: POST
|
||||
POST: /v1/search/resource/generic?limit=2&sort=id:asc&marker=36775172-ebc9-4060-9870-a649361bc3ab
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -165,11 +154,10 @@ tests:
|
|||
$[1].id: 57a9e836-87b8-4a21-9e30-18a474b98fef
|
||||
|
||||
#
|
||||
# Invalid limit/ordering
|
||||
# Invalid resource limit/ordering
|
||||
#
|
||||
- name: invalid sort_key
|
||||
url: /v1/resource/generic?sort=invalid:asc
|
||||
method: get
|
||||
GET: /v1/resource/generic?sort=invalid:asc
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -177,8 +165,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: invalid sort_dir
|
||||
url: /v1/resource/generic?sort=id:invalid
|
||||
method: get
|
||||
GET: /v1/resource/generic?sort=id:invalid
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -186,8 +173,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: invalid marker
|
||||
url: /v1/resource/generic?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874
|
||||
method: get
|
||||
GET: /v1/resource/generic?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -195,8 +181,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: invalid negative limit
|
||||
url: /v1/resource/generic?limit=-2
|
||||
method: get
|
||||
GET: /v1/resource/generic?limit=-2
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -204,8 +189,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: invalid limit
|
||||
url: /v1/resource/generic?limit=invalid
|
||||
method: get
|
||||
GET: /v1/resource/generic?limit=invalid
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -217,8 +201,7 @@ tests:
|
|||
#
|
||||
|
||||
- name: post resource 6
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -231,8 +214,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post resource 7
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -245,8 +227,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post resource 8
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -259,8 +240,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: default limit
|
||||
url: /v1/resource/generic
|
||||
method: get
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -271,8 +251,7 @@ tests:
|
|||
|
||||
|
||||
- name: update resource 5
|
||||
url: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
|
||||
method: patch
|
||||
PATCH: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -280,9 +259,8 @@ tests:
|
|||
data:
|
||||
ended_at: "2014-01-30T02:02:02.000000"
|
||||
|
||||
- name: update resource 5 bis
|
||||
url: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
|
||||
method: patch
|
||||
- name: update resource 5 again
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -291,8 +269,7 @@ tests:
|
|||
ended_at: "2014-01-31T02:02:02.000000"
|
||||
|
||||
- name: default limit with history and multiple sort key
|
||||
url: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:desc-nullslast
|
||||
method: get
|
||||
GET: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:desc-nullslast
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -305,3 +282,254 @@ tests:
|
|||
$[1].ended_at: "2014-01-30T02:02:02+00:00"
|
||||
$[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150
|
||||
$[2].ended_at: null
|
||||
|
||||
#
|
||||
# Create metrics
|
||||
#
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
data:
|
||||
name: dummy_policy
|
||||
definition:
|
||||
- granularity: 1 second
|
||||
status: 201
|
||||
|
||||
- name: create metric with name1
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "dummy1"
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: create metric with name2
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "dummy2"
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: create metric with name3
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "dummy3"
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: create metric with name4
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "dummy4"
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: create metric with name5
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
name: "dummy5"
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: list all default order
|
||||
url: /v1/metric
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
|
||||
- name: list first two metrics default order
|
||||
url: /v1/metric?limit=2
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$.`len`: 2
|
||||
$[0].name: $RESPONSE['$[0].name']
|
||||
$[1].name: $RESPONSE['$[1].name']
|
||||
|
||||
- name: list all default order again
|
||||
url: /v1/metric
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
|
||||
- name: list next three metrics default order
|
||||
url: /v1/metric?limit=4&marker=$RESPONSE['$[1].id']
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$.`len`: 3
|
||||
$[0].name: $RESPONSE['$[2].name']
|
||||
$[1].name: $RESPONSE['$[3].name']
|
||||
$[2].name: $RESPONSE['$[4].name']
|
||||
|
||||
- name: list first two metrics order by user without direction
|
||||
url: /v1/metric?limit=2&sort=name
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.`len`: 2
|
||||
$[0].name: dummy1
|
||||
$[1].name: dummy2
|
||||
|
||||
- name: list first two metrics order by user
|
||||
url: /v1/metric?limit=2&sort=name:asc
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$.`len`: 2
|
||||
$[0].name: dummy1
|
||||
$[1].name: dummy2
|
||||
|
||||
- name: list next third metrics order by user
|
||||
url: /v1/metric?limit=4&sort=name:asc&marker=$RESPONSE['$[1].id']
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$.`len`: 3
|
||||
$[0].name: dummy3
|
||||
$[1].name: dummy4
|
||||
$[2].name: dummy5
|
||||
|
||||
#
|
||||
# Default metric limit
|
||||
#
|
||||
|
||||
- name: create metric with name6
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: create metric with name7
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: create metric with name8
|
||||
url: /v1/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: dummy_policy
|
||||
status: 201
|
||||
|
||||
- name: default metric limit
|
||||
url: /v1/metric
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$.`len`: 7
|
||||
|
||||
#
|
||||
# Invalid metrics limit/ordering
|
||||
#
|
||||
|
||||
- name: metric invalid sort_key
|
||||
url: /v1/metric?sort=invalid:asc
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 400
|
||||
|
||||
- name: metric invalid sort_dir
|
||||
url: /v1/metric?sort=id:invalid
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 400
|
||||
|
||||
- name: metric invalid marker
|
||||
url: /v1/metric?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 400
|
||||
|
||||
- name: metric invalid negative limit
|
||||
url: /v1/metric?limit=-2
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 400
|
||||
|
||||
- name: metric invalid limit
|
||||
url: /v1/metric?limit=invalid
|
||||
method: get
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 400
|
||||
|
|
|
@ -4,8 +4,7 @@ fixtures:
|
|||
tests:
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -17,8 +16,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create resource 1
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -33,8 +31,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post cpuutil measures 1
|
||||
url: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures
|
||||
method: post
|
||||
POST: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -47,8 +44,7 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: create resource 2
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -63,8 +59,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post cpuutil measures 2
|
||||
url: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures
|
||||
method: post
|
||||
POST: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -77,8 +72,7 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: create resource 3
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -93,8 +87,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post cpuutil measures 3
|
||||
url: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures
|
||||
method: post
|
||||
POST: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -107,8 +100,7 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: aggregate metric with groupby on project_id
|
||||
url: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id
|
||||
method: post
|
||||
POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -135,8 +127,7 @@ tests:
|
|||
project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171
|
||||
|
||||
- name: aggregate metric with groupby on project_id and invalid group
|
||||
url: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=thisisdumb
|
||||
method: post
|
||||
POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=thisisdumb
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
||||
|
@ -149,8 +140,7 @@ tests:
|
|||
- Invalid groupby attribute
|
||||
|
||||
- name: aggregate metric with groupby on project_id and user_id
|
||||
url: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id
|
||||
method: post
|
||||
POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id
|
||||
request_headers:
|
||||
x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667
|
||||
x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8
|
|
@ -0,0 +1,609 @@
|
|||
#
|
||||
# Test the resource type API to achieve coverage of just the
|
||||
# ResourceTypesController and ResourceTypeController class code.
|
||||
#
|
||||
|
||||
fixtures:
|
||||
- ConfigFixture
|
||||
|
||||
tests:
|
||||
|
||||
- name: list resource type
|
||||
desc: only legacy resource types are present
|
||||
GET: /v1/resource_type
|
||||
response_json_paths:
|
||||
$.`len`: 15
|
||||
|
||||
# Some bad cases
|
||||
|
||||
- name: post resource type as non-admin
|
||||
POST: $LAST_URL
|
||||
data:
|
||||
name: my_custom_resource
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
status: 403
|
||||
|
||||
- name: post resource type with existing name
|
||||
url: /v1/resource_type
|
||||
method: post
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json
|
||||
data:
|
||||
name: my_custom_resource
|
||||
attributes:
|
||||
project_id:
|
||||
type: string
|
||||
status: 400
|
||||
|
||||
- name: post resource type bad string
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json
|
||||
data:
|
||||
name: my_custom_resource
|
||||
attributes:
|
||||
foo:
|
||||
type: string
|
||||
max_length: 32
|
||||
min_length: 5
|
||||
noexist: foo
|
||||
status: 400
|
||||
response_strings:
|
||||
# NOTE(sileht): We would prefer to have a better message but voluptuous seems a bit lost when
|
||||
# an Any have many dict with the same key, here "type"
|
||||
# - "Invalid input: extra keys not allowed @ data[u'attributes'][u'foo'][u'noexist']"
|
||||
# - "Invalid input: not a valid value for dictionary value @ data[u'attributes'][u'foo'][u'type']"
|
||||
- "Invalid input:"
|
||||
|
||||
- name: post resource type bad min_length value
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json
|
||||
data:
|
||||
name: my_custom_resource
|
||||
attributes:
|
||||
name:
|
||||
type: string
|
||||
required: true
|
||||
max_length: 2
|
||||
min_length: 5
|
||||
status: 400
|
||||
|
||||
- name: post resource type bad min value
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json
|
||||
data:
|
||||
name: my_custom_resource
|
||||
attributes:
|
||||
int:
|
||||
type: number
|
||||
required: false
|
||||
max: 3
|
||||
min: 8
|
||||
status: 400
|
||||
|
||||
# Create a type
|
||||
|
||||
- name: post resource type
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json
|
||||
data:
|
||||
name: my_custom_resource
|
||||
attributes:
|
||||
name:
|
||||
type: string
|
||||
required: true
|
||||
max_length: 5
|
||||
min_length: 2
|
||||
foobar:
|
||||
type: string
|
||||
required: false
|
||||
uuid:
|
||||
type: uuid
|
||||
int:
|
||||
type: number
|
||||
required: false
|
||||
min: -2
|
||||
max: 3
|
||||
intnomin:
|
||||
type: number
|
||||
required: false
|
||||
max: 3
|
||||
float:
|
||||
type: number
|
||||
required: false
|
||||
min: -2.3
|
||||
bool:
|
||||
type: bool
|
||||
required: false
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.name: my_custom_resource
|
||||
$.state: active
|
||||
$.attributes:
|
||||
name:
|
||||
type: string
|
||||
required: True
|
||||
max_length: 5
|
||||
min_length: 2
|
||||
foobar:
|
||||
type: string
|
||||
required: False
|
||||
max_length: 255
|
||||
min_length: 0
|
||||
uuid:
|
||||
type: uuid
|
||||
required: True
|
||||
int:
|
||||
type: number
|
||||
required: False
|
||||
min: -2
|
||||
max: 3
|
||||
intnomin:
|
||||
type: number
|
||||
required: False
|
||||
min:
|
||||
max: 3
|
||||
float:
|
||||
type: number
|
||||
required: false
|
||||
min: -2.3
|
||||
max:
|
||||
bool:
|
||||
type: bool
|
||||
required: false
|
||||
|
||||
response_headers:
|
||||
location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource
|
||||
|
||||
# Control the created type
|
||||
|
||||
- name: relist resource types
|
||||
desc: we have a resource type now
|
||||
GET: $LAST_URL
|
||||
response_json_paths:
|
||||
$.`len`: 16
|
||||
$.[11].name: my_custom_resource
|
||||
$.[11].state: active
|
||||
|
||||
- name: get the custom resource type
|
||||
GET: /v1/resource_type/my_custom_resource
|
||||
response_json_paths:
|
||||
$.name: my_custom_resource
|
||||
$.state: active
|
||||
$.attributes:
|
||||
name:
|
||||
type: string
|
||||
required: True
|
||||
min_length: 2
|
||||
max_length: 5
|
||||
foobar:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
uuid:
|
||||
type: uuid
|
||||
required: True
|
||||
int:
|
||||
type: number
|
||||
required: False
|
||||
min: -2
|
||||
max: 3
|
||||
intnomin:
|
||||
type: number
|
||||
required: False
|
||||
min:
|
||||
max: 3
|
||||
float:
|
||||
type: number
|
||||
required: false
|
||||
min: -2.3
|
||||
max:
|
||||
bool:
|
||||
type: bool
|
||||
required: false
|
||||
|
||||
# Some bad case case on the type
|
||||
|
||||
- name: delete as non-admin
|
||||
DELETE: $LAST_URL
|
||||
status: 403
|
||||
|
||||
# Bad resources for this type
|
||||
|
||||
- name: post invalid resource
|
||||
POST: /v1/resource/my_custom_resource
|
||||
request_headers:
|
||||
x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
|
||||
x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
name: toolong!!!
|
||||
foobar: what
|
||||
uuid: 07eb339e-23c0-4be2-be43-cd8247afae3b
|
||||
status: 400
|
||||
response_strings:
|
||||
# split to not match the u' in py2
|
||||
- "Invalid input: length of value must be at most 5 for dictionary value @ data["
|
||||
- "'name']"
|
||||
|
||||
- name: post invalid resource uuid
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
|
||||
x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
name: too
|
||||
foobar: what
|
||||
uuid: really!
|
||||
status: 400
|
||||
response_strings:
|
||||
# split to not match the u' in py2
|
||||
- "Invalid input: not a valid value for dictionary value @ data["
|
||||
- "'uuid']"
|
||||
|
||||
# Good resources for this type
|
||||
|
||||
- name: post custom resource
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
|
||||
x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
name: bar
|
||||
foobar: what
|
||||
uuid: e495ebad-be64-46c0-81d6-b079beb48df9
|
||||
int: 1
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
$.name: bar
|
||||
$.foobar: what
|
||||
|
||||
- name: patch custom resource
|
||||
PATCH: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
request_headers:
|
||||
x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
|
||||
x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
name: foo
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
$.name: foo
|
||||
$.foobar: what
|
||||
$.uuid: e495ebad-be64-46c0-81d6-b079beb48df9
|
||||
$.int: 1
|
||||
|
||||
- name: get resource
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$.id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
$.name: foo
|
||||
$.foobar: what
|
||||
$.uuid: e495ebad-be64-46c0-81d6-b079beb48df9
|
||||
$.int: 1
|
||||
|
||||
- name: post resource with default
|
||||
POST: /v1/resource/my_custom_resource
|
||||
request_headers:
|
||||
x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
|
||||
x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59
|
||||
name: foo
|
||||
uuid: e495ebad-be64-46c0-81d6-b079beb48df9
|
||||
status: 201
|
||||
response_json_paths:
|
||||
$.id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59
|
||||
$.name: foo
|
||||
$.foobar:
|
||||
$.uuid: e495ebad-be64-46c0-81d6-b079beb48df9
|
||||
$.int:
|
||||
|
||||
- name: list resource history
|
||||
GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
response_json_paths:
|
||||
$.`len`: 2
|
||||
$[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
$[0].name: bar
|
||||
$[0].foobar: what
|
||||
$[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
$[1].name: foo
|
||||
$[1].foobar: what
|
||||
|
||||
# CRUD resource type attributes
|
||||
|
||||
- name: post a new resource attribute
|
||||
url: /v1/resource_type/my_custom_resource
|
||||
method: patch
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json-patch+json
|
||||
data:
|
||||
- op: add
|
||||
path: /attributes/newstuff
|
||||
value:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
- op: remove
|
||||
path: /attributes/foobar
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.name: my_custom_resource
|
||||
$.attributes:
|
||||
name:
|
||||
type: string
|
||||
required: True
|
||||
min_length: 2
|
||||
max_length: 5
|
||||
uuid:
|
||||
type: uuid
|
||||
required: True
|
||||
int:
|
||||
type: number
|
||||
required: False
|
||||
min: -2
|
||||
max: 3
|
||||
intnomin:
|
||||
type: number
|
||||
required: False
|
||||
min:
|
||||
max: 3
|
||||
float:
|
||||
type: number
|
||||
required: false
|
||||
min: -2.3
|
||||
max:
|
||||
bool:
|
||||
type: bool
|
||||
required: false
|
||||
newstuff:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
|
||||
- name: get the new custom resource type
|
||||
url: /v1/resource_type/my_custom_resource
|
||||
response_json_paths:
|
||||
$.name: my_custom_resource
|
||||
$.attributes:
|
||||
name:
|
||||
type: string
|
||||
required: True
|
||||
min_length: 2
|
||||
max_length: 5
|
||||
uuid:
|
||||
type: uuid
|
||||
required: True
|
||||
int:
|
||||
type: number
|
||||
required: False
|
||||
min: -2
|
||||
max: 3
|
||||
intnomin:
|
||||
type: number
|
||||
required: False
|
||||
min:
|
||||
max: 3
|
||||
float:
|
||||
type: number
|
||||
required: false
|
||||
min: -2.3
|
||||
max:
|
||||
bool:
|
||||
type: bool
|
||||
required: false
|
||||
newstuff:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
|
||||
# Invalid patch
|
||||
|
||||
- name: add/delete the same resource attribute
|
||||
url: /v1/resource_type/my_custom_resource
|
||||
method: patch
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json-patch+json
|
||||
data:
|
||||
- op: add
|
||||
path: /attributes/what
|
||||
value:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
- op: remove
|
||||
path: /attributes/what
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.name: my_custom_resource
|
||||
$.attributes:
|
||||
name:
|
||||
type: string
|
||||
required: True
|
||||
min_length: 2
|
||||
max_length: 5
|
||||
uuid:
|
||||
type: uuid
|
||||
required: True
|
||||
int:
|
||||
type: number
|
||||
required: False
|
||||
min: -2
|
||||
max: 3
|
||||
intnomin:
|
||||
type: number
|
||||
required: False
|
||||
min:
|
||||
max: 3
|
||||
float:
|
||||
type: number
|
||||
required: false
|
||||
min: -2.3
|
||||
max:
|
||||
bool:
|
||||
type: bool
|
||||
required: false
|
||||
newstuff:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
|
||||
- name: delete/add the same resource attribute
|
||||
url: /v1/resource_type/my_custom_resource
|
||||
method: patch
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json-patch+json
|
||||
data:
|
||||
- op: remove
|
||||
path: /attributes/what
|
||||
- op: add
|
||||
path: /attributes/what
|
||||
value:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
status: 400
|
||||
response_strings:
|
||||
- "can't remove non-existent object 'what'"
|
||||
|
||||
- name: patch a resource attribute replace
|
||||
url: /v1/resource_type/my_custom_resource
|
||||
method: patch
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json-patch+json
|
||||
data:
|
||||
- op: replace
|
||||
path: /attributes/newstuff
|
||||
value:
|
||||
type: string
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: not a valid value for dictionary value @ data[0]["
|
||||
- "'op']"
|
||||
|
||||
- name: patch a resource attribute type not exist
|
||||
url: /v1/resource_type/my_custom_resource
|
||||
method: patch
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json-patch+json
|
||||
data:
|
||||
- op: add
|
||||
path: /attributes/newstuff
|
||||
value:
|
||||
type: notexist
|
||||
required: False
|
||||
min_length: 0
|
||||
max_length: 255
|
||||
status: 400
|
||||
|
||||
- name: patch a resource attribute type unknown
|
||||
url: /v1/resource_type/my_custom_resource
|
||||
method: patch
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json-patch+json
|
||||
data:
|
||||
- op: remove
|
||||
path: /attributes/unknown
|
||||
status: 400
|
||||
response_strings:
|
||||
- "can't remove non-existent object 'unknown'"
|
||||
|
||||
# Ensure we can't delete the type
|
||||
|
||||
- name: delete in use resource_type
|
||||
DELETE: /v1/resource_type/my_custom_resource
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 400
|
||||
response_strings:
|
||||
- Resource type my_custom_resource is still in use
|
||||
|
||||
# Delete associated resources
|
||||
|
||||
- name: delete the resource
|
||||
DELETE: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
||||
- name: delete the second resource
|
||||
DELETE: /v1/resource/my_custom_resource/c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
||||
# Now we can deleted the type
|
||||
|
||||
- name: delete the custom resource type
|
||||
DELETE: /v1/resource_type/my_custom_resource
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
||||
|
||||
- name: delete non-existing custom resource type
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 404
|
||||
|
||||
- name: delete missing custom resource type utf8
|
||||
DELETE: /v1/resource_type/%E2%9C%94%C3%A9%C3%B1%E2%98%83
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 404
|
||||
response_strings:
|
||||
- Resource type ✔éñ☃ does not exist
|
||||
|
||||
# Can we readd and delete the same resource type again
|
||||
|
||||
- name: post resource type again
|
||||
POST: /v1/resource_type
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
content-type: application/json
|
||||
data:
|
||||
name: my_custom_resource
|
||||
status: 201
|
||||
|
||||
- name: delete the custom resource type again
|
||||
DELETE: /v1/resource_type/my_custom_resource
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
status: 204
|
|
@ -14,8 +14,7 @@ tests:
|
|||
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -26,8 +25,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create archive policy rule
|
||||
url: /v1/archive_policy_rule
|
||||
method: POST
|
||||
POST: /v1/archive_policy_rule
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -43,32 +41,32 @@ tests:
|
|||
# xfails.
|
||||
|
||||
- name: root of all
|
||||
url: /
|
||||
GET: /
|
||||
response_headers:
|
||||
content-type: /application/json/
|
||||
response_json_paths:
|
||||
$.versions[0].links[0].href: $SCHEME://$NETLOC/v1/
|
||||
|
||||
- name: root of v1
|
||||
url: /v1
|
||||
GET: /v1
|
||||
redirects: true
|
||||
response_json_paths:
|
||||
$.version: "1.0"
|
||||
$.links.`len`: 10
|
||||
$.links.`len`: 11
|
||||
$.links[0].href: $SCHEME://$NETLOC/v1
|
||||
$.links[7].href: $SCHEME://$NETLOC/v1/resource
|
||||
|
||||
- name: root of resource
|
||||
url: /v1/resource
|
||||
GET: /v1/resource
|
||||
response_json_paths:
|
||||
$.volume: $SCHEME://$NETLOC/v1/resource/volume
|
||||
|
||||
- name: typo of resource
|
||||
url: /v1/resoue
|
||||
GET: /v1/resoue
|
||||
status: 404
|
||||
|
||||
- name: typo of resource extra
|
||||
url: /v1/resource/foobar
|
||||
GET: /v1/resource/foobar
|
||||
status: 404
|
||||
|
||||
# Explore that GETting a list of resources demonstrates the expected
|
||||
|
@ -76,24 +74,24 @@ tests:
|
|||
|
||||
- name: instance resource
|
||||
desc: there are no instance resources yet
|
||||
url: /v1/resource/instance
|
||||
GET: /v1/resource/instance
|
||||
response_strings:
|
||||
- "[]"
|
||||
|
||||
- name: instance resource bad accept
|
||||
desc: Expect 406 on bad accept type
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
accept: text/plain
|
||||
url: /v1/resource/instance
|
||||
status: 406
|
||||
response_strings:
|
||||
- 406 Not Acceptable
|
||||
|
||||
- name: instance resource complex accept
|
||||
desc: failover accept media type appropriately
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
accept: text/plain, application/json; q=0.8
|
||||
url: /v1/resource/instance
|
||||
response_strings:
|
||||
- "[]"
|
||||
|
||||
|
@ -101,14 +99,13 @@ tests:
|
|||
|
||||
- name: generic resource
|
||||
desc: there are no generic resources yet
|
||||
url: /v1/resource/generic
|
||||
GET: /v1/resource/generic
|
||||
response_strings:
|
||||
- "[]"
|
||||
|
||||
- name: post resource no user-id
|
||||
desc: https://bugs.launchpad.net/gnocchi/+bug/1424005
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
# Only provide one of these auth headers
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
|
@ -121,8 +118,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post generic resource
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -143,8 +139,7 @@ tests:
|
|||
|
||||
- name: post same resource refuse
|
||||
desc: We can only post one identified resource once
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -157,8 +152,7 @@ tests:
|
|||
status: 409
|
||||
|
||||
- name: post generic resource bad content type
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -174,8 +168,7 @@ tests:
|
|||
# gets a useful 400 response.
|
||||
|
||||
- name: post instance resource no data
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: /v1/resource/instance
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -183,8 +176,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: post instance resource with missing data
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -198,11 +190,11 @@ tests:
|
|||
host: compute1
|
||||
status: 400
|
||||
response_strings:
|
||||
- "Invalid input: required key not provided @ data['display_name']"
|
||||
- "Invalid input: required key not provided @ data["
|
||||
- "'display_name']"
|
||||
|
||||
- name: post instance resource
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -223,8 +215,7 @@ tests:
|
|||
# associate metrics. If a metric does not exist there should be a
|
||||
# graceful failure.
|
||||
- name: patch instance resource
|
||||
url: $LOCATION
|
||||
method: patch
|
||||
PATCH: $LOCATION
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -237,8 +228,7 @@ tests:
|
|||
|
||||
- name: patch instance resource with same data
|
||||
desc: Ensure no useless revision have been created
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -251,8 +241,7 @@ tests:
|
|||
revision_start: $RESPONSE['$.revision_start']
|
||||
|
||||
- name: patch instance resource with id
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -265,8 +254,7 @@ tests:
|
|||
- "'id']"
|
||||
|
||||
- name: patch instance with metrics
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -281,7 +269,7 @@ tests:
|
|||
|
||||
- name: get instance history
|
||||
desc: Ensure we can get the history
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast
|
||||
GET: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast
|
||||
request_headers:
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
|
@ -295,8 +283,7 @@ tests:
|
|||
$[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"]
|
||||
|
||||
- name: patch instance bad metric association
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -309,8 +296,7 @@ tests:
|
|||
- Metric f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea does not exist
|
||||
|
||||
- name: patch instance with bad archive policy
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -324,8 +310,7 @@ tests:
|
|||
- Archive policy noexist does not exist
|
||||
|
||||
- name: patch instance with no archive policy rule
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -338,8 +323,7 @@ tests:
|
|||
- No archive policy name specified and no archive policy rule found matching the metric name disk.iops
|
||||
|
||||
- name: patch instance with archive policy rule
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -351,7 +335,7 @@ tests:
|
|||
|
||||
- name: get patched resource
|
||||
desc: confirm the patched resource is properly patched
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -360,8 +344,7 @@ tests:
|
|||
|
||||
- name: patch resource empty dict
|
||||
desc: an empty dict in patch is an existence check
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: PATCH
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -373,8 +356,7 @@ tests:
|
|||
|
||||
- name: patch resource without change with metrics in response
|
||||
desc: an empty dict in patch is an existence check
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: PATCH
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -388,8 +370,7 @@ tests:
|
|||
|
||||
- name: post instance history
|
||||
desc: should don't work
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history
|
||||
method: POST
|
||||
POST: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -398,8 +379,7 @@ tests:
|
|||
|
||||
- name: delete instance history
|
||||
desc: should don't work
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history
|
||||
method: DELETE
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -410,8 +390,7 @@ tests:
|
|||
|
||||
- name: patch resource no data
|
||||
desc: providing no data is an error
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: PATCH
|
||||
PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -422,8 +401,7 @@ tests:
|
|||
|
||||
- name: patch resource bad data
|
||||
desc: providing data that is not a dict is an error
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
method: PATCH
|
||||
PATCH: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -436,8 +414,7 @@ tests:
|
|||
|
||||
- name: patch noexit resource
|
||||
desc: "patching something that doesn't exist is a 404"
|
||||
url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9
|
||||
method: patch
|
||||
PATCH: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -448,7 +425,7 @@ tests:
|
|||
|
||||
- name: get noexist resource
|
||||
desc: if a resource does not exist 404
|
||||
url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -458,7 +435,7 @@ tests:
|
|||
|
||||
- name: get bad resource id
|
||||
desc: https://bugs.launchpad.net/gnocchi/+bug/1425588
|
||||
url: /v1/resource/instance/noexist
|
||||
GET: /v1/resource/instance/noexist
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -467,7 +444,7 @@ tests:
|
|||
- The resource could not be found.
|
||||
|
||||
- name: get metrics for this not-existing resource
|
||||
url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util
|
||||
GET: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -477,12 +454,12 @@ tests:
|
|||
# List resources
|
||||
|
||||
- name: list instance resources no auth
|
||||
url: /v1/resource/instance
|
||||
GET: /v1/resource/instance
|
||||
response_strings:
|
||||
- "[]"
|
||||
|
||||
- name: list instance resources
|
||||
url: /v1/resource/instance
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -491,7 +468,7 @@ tests:
|
|||
$[-1].host: compute2
|
||||
|
||||
- name: list all resources
|
||||
url: /v1/resource/generic
|
||||
GET: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -501,8 +478,7 @@ tests:
|
|||
# Metric handling when POSTing resources.
|
||||
|
||||
- name: post new instance with non-existent metrics
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: /v1/resource/instance
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -520,8 +496,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: post new instance with metrics bad policy
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -540,8 +515,7 @@ tests:
|
|||
status: 400
|
||||
|
||||
- name: post new instance with metrics no policy rule
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -561,8 +535,7 @@ tests:
|
|||
- No archive policy name specified and no archive policy rule found matching the metric name cpu.util
|
||||
|
||||
- name: post new instance with metrics using policy rule
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -580,8 +553,7 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: post new instance with metrics
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -595,7 +567,7 @@ tests:
|
|||
host: compute3
|
||||
display_name: myvm2
|
||||
metrics:
|
||||
cpu.util:
|
||||
disk.util:
|
||||
archive_policy_name: medium
|
||||
status: 201
|
||||
response_json_paths:
|
||||
|
@ -603,8 +575,7 @@ tests:
|
|||
created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
||||
- name: post new instance with metrics and un-normalized user/project id from keystone middleware
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -626,7 +597,7 @@ tests:
|
|||
|
||||
- name: get metrics for this resource
|
||||
desc: with async measure handling this is a null test
|
||||
url: /v1/resource/instance/$RESPONSE['$.id']/metric/cpu.util/measures
|
||||
GET: /v1/resource/instance/$RESPONSE['$.id']/metric/cpu.util/measures
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -637,13 +608,13 @@ tests:
|
|||
# Interrogate the NamedMetricController
|
||||
|
||||
- name: list the instances
|
||||
url: /v1/resource/instance
|
||||
GET: /v1/resource/instance
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
||||
- name: request metrics from one of the instances
|
||||
url: /v1/resource/instance/$RESPONSE['$[-1].id']/metric
|
||||
GET: /v1/resource/instance/$RESPONSE['$[-1].id']/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -654,7 +625,7 @@ tests:
|
|||
|
||||
- name: request metrics from non uuid metrics
|
||||
desc: 404 from GenericResourceController
|
||||
url: /v1/resource/instance/not.a.uuid/metric
|
||||
GET: /v1/resource/instance/not.a.uuid/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -662,7 +633,7 @@ tests:
|
|||
status: 404
|
||||
|
||||
- name: request cpuutil metric from instance
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util
|
||||
GET: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -671,8 +642,7 @@ tests:
|
|||
$.archive_policy.name: medium
|
||||
|
||||
- name: try post cpuutil metric to instance
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -681,7 +651,7 @@ tests:
|
|||
|
||||
- name: request cpuutil measures from instance
|
||||
desc: with async measure handling this is a null test
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures
|
||||
GET: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -689,8 +659,7 @@ tests:
|
|||
- "[]"
|
||||
|
||||
- name: post cpuutil measures
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -703,7 +672,7 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: request cpuutil measures again
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures
|
||||
GET: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -716,8 +685,7 @@ tests:
|
|||
$[0][2]: 43.100000000000001
|
||||
|
||||
- name: post metric at instance
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric
|
||||
method: post
|
||||
POST: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -729,8 +697,7 @@ tests:
|
|||
response_headers:
|
||||
|
||||
- name: post metric at instance with empty definition
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -742,8 +709,7 @@ tests:
|
|||
- No archive policy name specified and no archive policy rule found matching the metric name foo.bar
|
||||
|
||||
- name: post metric at instance using archive policy rule
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -753,8 +719,7 @@ tests:
|
|||
disk.io.rate: {}
|
||||
|
||||
- name: duplicate metrics at instance
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -767,8 +732,7 @@ tests:
|
|||
- Named metric electron.spin already exists
|
||||
|
||||
- name: post metrics at instance bad policy
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -783,8 +747,7 @@ tests:
|
|||
# Check bad timestamps
|
||||
|
||||
- name: post new instance with bad timestamp
|
||||
url: /v1/resource/instance
|
||||
method: post
|
||||
POST: /v1/resource/instance
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -810,8 +773,7 @@ tests:
|
|||
|
||||
- name: post to non uuid metrics
|
||||
desc: 404 from GenericResourceController
|
||||
url: /v1/resource/instance/not.a.uuid/metric
|
||||
method: post
|
||||
POST: /v1/resource/instance/not.a.uuid/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -823,8 +785,7 @@ tests:
|
|||
|
||||
- name: post to missing uuid metrics
|
||||
desc: 404 from NamedMetricController
|
||||
url: /v1/resource/instance/d5a5994e-ee90-11e4-88cf-685b35afa334/metric
|
||||
method: post
|
||||
POST: /v1/resource/instance/d5a5994e-ee90-11e4-88cf-685b35afa334/metric
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -838,8 +799,7 @@ tests:
|
|||
|
||||
- name: post measure on unknown metric
|
||||
desc: 404 from NamedMetricController with metric error
|
||||
url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures
|
||||
method: post
|
||||
POST: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
|
@ -854,17 +814,320 @@ tests:
|
|||
# DELETE-ing instances
|
||||
|
||||
- name: delete instance
|
||||
url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
DELETE: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
method: DELETE
|
||||
status: 204
|
||||
|
||||
- name: delete noexist instance
|
||||
url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9
|
||||
DELETE: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
method: DELETE
|
||||
status: 404
|
||||
|
||||
# Delete a batch of resources by attributes filter
|
||||
|
||||
- name: create resource one
|
||||
desc: before test batch delete, create some resources
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: f93450f2-aaaa-4d67-9985-02511241e7d1
|
||||
started_at: "2014-01-03T02:02:02.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: create resource two
|
||||
desc: before test batch delete, create some resources
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: f93450f2-bbbb-4d67-9985-02511241e7d1
|
||||
started_at: "2014-01-03T02:02:02.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: create resource three
|
||||
desc: before test batch delete, create some resources
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: f93450f2-cccc-4d67-9985-02511241e7d1
|
||||
started_at: "2014-08-04T00:00:00.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: create resource four
|
||||
desc: before test batch delete, create some resources
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: f93450f2-dddd-4d67-9985-02511241e7d1
|
||||
started_at: "2014-08-04T00:00:00.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: create resource five
|
||||
desc: before test batch delete, create some resources
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: f93450f2-eeee-4d67-9985-02511241e7d1
|
||||
started_at: "2015-08-14T00:00:00.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: create resource six
|
||||
desc: before test batch delete, create some resources
|
||||
POST: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
id: f93450f2-ffff-4d67-9985-02511241e7d1
|
||||
started_at: "2015-08-14T00:00:00.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: get resource one
|
||||
desc: ensure the resources exists
|
||||
GET: /v1/resource/generic/f93450f2-aaaa-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 200
|
||||
|
||||
- name: get resource two
|
||||
desc: ensure the resources exists
|
||||
GET: /v1/resource/generic/f93450f2-bbbb-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 200
|
||||
|
||||
- name: get resource three
|
||||
desc: ensure the resources exists
|
||||
GET: /v1/resource/generic/f93450f2-cccc-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 200
|
||||
|
||||
- name: get resource four
|
||||
desc: ensure the resources exists
|
||||
GET: /v1/resource/generic/f93450f2-dddd-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 200
|
||||
|
||||
- name: get resource five
|
||||
desc: ensure the resources exists
|
||||
GET: /v1/resource/generic/f93450f2-eeee-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 200
|
||||
|
||||
- name: get resource six
|
||||
desc: ensure the resources exists
|
||||
GET: /v1/resource/generic/f93450f2-ffff-4d67-9985-02511241e7d1
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
status: 200
|
||||
|
||||
- name: delete random data structure
|
||||
desc: delete a empty list test
|
||||
DELETE: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
resource_ids:
|
||||
[]
|
||||
attrs:
|
||||
test
|
||||
status: 400
|
||||
|
||||
- name: delete something empty
|
||||
desc: use empty filter for delete
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data: ""
|
||||
status: 400
|
||||
|
||||
- name: delete something empty a
|
||||
desc: use empty filter for delete
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
in:
|
||||
id: []
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.deleted: 0
|
||||
|
||||
- name: delete something empty b
|
||||
desc: use empty filter for delete
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
in: {}
|
||||
status: 400
|
||||
|
||||
- name: delete something empty c
|
||||
desc: use empty filter for delete
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
in:
|
||||
and: []
|
||||
status: 400
|
||||
|
||||
- name: delete something empty d
|
||||
desc: use empty filter for delete
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
in:
|
||||
and:
|
||||
- or: []
|
||||
- id:
|
||||
=: ""
|
||||
status: 400
|
||||
|
||||
- name: delete something empty e
|
||||
desc: use empty filter for delete
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
and: []
|
||||
status: 400
|
||||
|
||||
- name: delete something empty f
|
||||
desc: use empty filter for delete
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
and:
|
||||
- in:
|
||||
id: []
|
||||
- started_at: ""
|
||||
status: 400
|
||||
|
||||
- name: delete batch of resources filter by started_at
|
||||
desc: delete the created resources
|
||||
DELETE: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
eq:
|
||||
started_at: "2014-08-04"
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.deleted: 2
|
||||
|
||||
- name: delete batch of resources filter by mutliple ids
|
||||
desc: delete the created resources
|
||||
DELETE: /v1/resource/generic
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
in:
|
||||
id:
|
||||
- f93450f2-aaaa-4d67-9985-02511241e7d1
|
||||
- f93450f2-bbbb-4d67-9985-02511241e7d1
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.deleted: 2
|
||||
|
||||
- name: delete both existent and non-existent data
|
||||
desc: delete exits and non-exist data
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
in:
|
||||
id:
|
||||
- f93450f2-eeee-4d67-9985-02511241e7d1
|
||||
- f93450f2-ffff-4d67-9985-02511241e7d1
|
||||
- f93450f2-yyyy-4d67-9985-02511241e7d1
|
||||
- f93450f2-xxxx-4d67-9985-02511241e7d1
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.deleted: 2
|
||||
|
||||
- name: delete multiple non-existent resources
|
||||
desc: delete a batch of non-existent resources
|
||||
DELETE: $LAST_URL
|
||||
request_headers:
|
||||
x-user-id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
content-type: application/json
|
||||
data:
|
||||
in:
|
||||
id:
|
||||
- f93450f2-zzzz-4d67-9985-02511241e7d1
|
||||
- f93450f2-kkkk-4d67-9985-02511241e7d1
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.deleted: 0
|
||||
|
|
|
@ -9,8 +9,7 @@ fixtures:
|
|||
tests:
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
x-roles: admin
|
||||
|
@ -21,17 +20,15 @@ tests:
|
|||
status: 201
|
||||
|
||||
- name: create metric
|
||||
url: /v1/metric
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: "high"
|
||||
status: 201
|
||||
|
||||
- name: search measure with wrong start
|
||||
url: /v1/search/metric?metric_id=$RESPONSE['$.id']&start=foobar
|
||||
method: post
|
||||
POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&start=foobar
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -42,17 +39,15 @@ tests:
|
|||
- Invalid value for start
|
||||
|
||||
- name: create metric 2
|
||||
url: /v1/metric
|
||||
POST: /v1/metric
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
method: post
|
||||
data:
|
||||
archive_policy_name: "high"
|
||||
status: 201
|
||||
|
||||
- name: search measure with wrong stop
|
||||
url: /v1/search/metric?metric_id=$RESPONSE['$.id']&stop=foobar
|
||||
method: post
|
||||
POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&stop=foobar
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
|
@ -60,4 +55,4 @@ tests:
|
|||
- ≥: 1000
|
||||
status: 400
|
||||
response_strings:
|
||||
- Invalid value for stop
|
||||
- Invalid value for stop
|
|
@ -8,18 +8,51 @@ fixtures:
|
|||
|
||||
tests:
|
||||
- name: typo of search
|
||||
url: /v1/search/notexists
|
||||
GET: /v1/search/notexists
|
||||
status: 404
|
||||
|
||||
- name: typo of search in resource
|
||||
url: /v1/search/resource/foobar
|
||||
GET: /v1/search/resource/foobar
|
||||
status: 404
|
||||
|
||||
- name: search with invalid uuid
|
||||
url: /v1/search/resource/generic
|
||||
method: POST
|
||||
POST: /v1/search/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
=:
|
||||
id: "cd9eef"
|
||||
id: "cd9eef"
|
||||
|
||||
- name: post generic resource
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
id: faef212f-0bf4-4030-a461-2186fef79be0
|
||||
started_at: "2014-01-03T02:02:02.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: post generic resource twice
|
||||
POST: /v1/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
id: df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e
|
||||
started_at: "2014-01-03T02:02:02.000000"
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
status: 201
|
||||
|
||||
- name: search in_
|
||||
POST: /v1/search/resource/generic
|
||||
request_headers:
|
||||
content-type: application/json
|
||||
data:
|
||||
in:
|
||||
id:
|
||||
- faef212f-0bf4-4030-a461-2186fef79be0
|
||||
- df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e
|
||||
response_json_paths:
|
||||
$.`len`: 2
|
||||
|
|
|
@ -20,8 +20,7 @@ tests:
|
|||
|
||||
- name: create archive policy
|
||||
desc: for later use
|
||||
url: /v1/archive_policy
|
||||
method: POST
|
||||
POST: /v1/archive_policy
|
||||
request_headers:
|
||||
x-roles: admin
|
||||
data:
|
||||
|
@ -32,8 +31,7 @@ tests:
|
|||
# Check transformed uuids across the URL hierarchy
|
||||
|
||||
- name: post new resource non uuid
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
data:
|
||||
id: generic one
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
|
@ -50,18 +48,17 @@ tests:
|
|||
location: /v1/resource/generic/[a-f0-9-]{36}/
|
||||
|
||||
- name: get new non uuid resource by external id
|
||||
url: /v1/resource/generic/generic%20one
|
||||
GET: /v1/resource/generic/generic%20one
|
||||
response_json_paths:
|
||||
$.id: $RESPONSE['$.id']
|
||||
|
||||
- name: get new non uuid resource by internal id
|
||||
url: /v1/resource/generic/$RESPONSE['$.id']
|
||||
GET: /v1/resource/generic/$RESPONSE['$.id']
|
||||
response_json_paths:
|
||||
$.id: $RESPONSE['$.id']
|
||||
|
||||
- name: patch by external id
|
||||
url: /v1/resource/generic/generic%20one
|
||||
method: PATCH
|
||||
PATCH: /v1/resource/generic/generic%20one
|
||||
data:
|
||||
metrics:
|
||||
cattle:
|
||||
|
@ -71,18 +68,17 @@ tests:
|
|||
- '"cattle"'
|
||||
|
||||
- name: list metric by external resource id
|
||||
url: /v1/resource/generic/generic%20one/metric
|
||||
GET: /v1/resource/generic/generic%20one/metric
|
||||
response_json_paths:
|
||||
$[0].name: cattle
|
||||
|
||||
- name: list empty measures by external resource id
|
||||
url: /v1/resource/generic/generic%20one/metric/cattle/measures
|
||||
GET: /v1/resource/generic/generic%20one/metric/cattle/measures
|
||||
response_json_paths:
|
||||
$: []
|
||||
|
||||
- name: post measures by external resource id
|
||||
url: /v1/resource/generic/generic%20one/metric/cattle/measures
|
||||
method: POST
|
||||
POST: /v1/resource/generic/generic%20one/metric/cattle/measures
|
||||
data:
|
||||
- timestamp: "2015-03-06T14:33:57"
|
||||
value: 43.1
|
||||
|
@ -91,7 +87,7 @@ tests:
|
|||
status: 202
|
||||
|
||||
- name: list two measures by external resource id
|
||||
url: /v1/resource/generic/generic%20one/metric/cattle/measures
|
||||
GET: $LAST_URL
|
||||
poll:
|
||||
count: 10
|
||||
delay: 1
|
||||
|
@ -100,17 +96,15 @@ tests:
|
|||
$[1][2]: 12
|
||||
|
||||
- name: delete the resource by external id
|
||||
url: /v1/resource/generic/generic%20one
|
||||
method: DELETE
|
||||
DELETE: /v1/resource/generic/generic%20one
|
||||
status: 204
|
||||
|
||||
# Check length handling
|
||||
|
||||
- name: fail to post too long non uuid resource id
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: /v1/resource/generic
|
||||
data:
|
||||
id: four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts
|
||||
id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
metrics:
|
||||
|
@ -121,11 +115,10 @@ tests:
|
|||
- not a valid value for
|
||||
|
||||
- name: post long non uuid resource id
|
||||
url: /v1/resource/generic
|
||||
method: post
|
||||
POST: $LAST_URL
|
||||
data:
|
||||
# 255 char string
|
||||
id: four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue
|
||||
id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue
|
||||
user_id: 0fbb231484614b1a80131fc22f6afc9c
|
||||
project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea
|
||||
metrics:
|
||||
|
|
|
@ -14,10 +14,15 @@
|
|||
# under the License.
|
||||
import abc
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
from oslo_db.sqlalchemy import test_migrations
|
||||
import six
|
||||
import sqlalchemy as sa
|
||||
import sqlalchemy_utils
|
||||
|
||||
from gnocchi import indexer
|
||||
from gnocchi.indexer import sqlalchemy
|
||||
from gnocchi.indexer import sqlalchemy_base
|
||||
from gnocchi.tests import base
|
||||
|
||||
|
@ -31,9 +36,24 @@ class ModelsMigrationsSync(
|
|||
base.TestCase,
|
||||
test_migrations.ModelsMigrationsSync)):
|
||||
|
||||
def _set_timeout(self):
|
||||
self.useFixture(fixtures.Timeout(120, gentle=True))
|
||||
|
||||
def setUp(self):
|
||||
super(ModelsMigrationsSync, self).setUp()
|
||||
self.db = mock.Mock()
|
||||
self.conf.set_override(
|
||||
'url',
|
||||
sqlalchemy.SQLAlchemyIndexer._create_new_database(
|
||||
self.conf.indexer.url),
|
||||
'indexer')
|
||||
self.index = indexer.get_driver(self.conf)
|
||||
self.index.connect()
|
||||
self.index.upgrade(nocreate=True, create_legacy_resource_types=True)
|
||||
|
||||
def tearDown(self):
|
||||
sqlalchemy_utils.drop_database(self.conf.indexer.url)
|
||||
super(ModelsMigrationsSync, self).tearDown()
|
||||
|
||||
@staticmethod
|
||||
def get_metadata():
|
||||
|
@ -42,8 +62,26 @@ class ModelsMigrationsSync(
|
|||
def get_engine(self):
|
||||
return self.index.get_engine()
|
||||
|
||||
@staticmethod
|
||||
def db_sync(engine):
|
||||
# NOTE(jd) Nothing to do here as setUp() in the base class is already
|
||||
# creating table using upgrade
|
||||
pass
|
||||
def db_sync(self, engine):
|
||||
# NOTE(sileht): We ensure all resource type sqlalchemy model are loaded
|
||||
# in this process
|
||||
for rt in self.index.list_resource_types():
|
||||
if rt.state == "active":
|
||||
self.index._RESOURCE_TYPE_MANAGER.get_classes(rt)
|
||||
|
||||
def filter_metadata_diff(self, diff):
|
||||
tables_to_keep = []
|
||||
for rt in self.index.list_resource_types():
|
||||
if rt.name.startswith("indexer_test"):
|
||||
tables_to_keep.extend([rt.tablename,
|
||||
"%s_history" % rt.tablename])
|
||||
new_diff = []
|
||||
for line in diff:
|
||||
if len(line) >= 2:
|
||||
item = line[1]
|
||||
# NOTE(sileht): skip resource types created for tests
|
||||
if (isinstance(item, sa.Table)
|
||||
and item.name in tables_to_keep):
|
||||
continue
|
||||
new_diff.append(line)
|
||||
return new_diff
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2015 Red Hat, Inc.
|
||||
# Copyright © 2015-2016 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
|
@ -14,10 +14,11 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import datetime
|
||||
import itertools
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
import pandas
|
||||
import msgpack
|
||||
import six
|
||||
|
||||
from gnocchi import carbonara
|
||||
|
@ -27,16 +28,11 @@ from gnocchi.tests import base as tests_base
|
|||
from gnocchi import utils
|
||||
|
||||
|
||||
def _to_dict_v1_3(self):
|
||||
def _serialize_v2(split):
|
||||
d = {'values': dict((timestamp.value, float(v))
|
||||
for timestamp, v
|
||||
in six.iteritems(self.ts.dropna()))}
|
||||
sampling = pandas.tseries.offsets.Nano(self.sampling * 10e8)
|
||||
d.update({
|
||||
'aggregation_method': self.aggregation_method,
|
||||
'max_size': self.max_size,
|
||||
'sampling': six.text_type(sampling.n) + sampling.rule_code})
|
||||
return d
|
||||
in six.iteritems(split.ts.dropna()))}
|
||||
return msgpack.dumps(d)
|
||||
|
||||
|
||||
class TestCarbonaraMigration(tests_base.TestCase):
|
||||
|
@ -48,69 +44,164 @@ class TestCarbonaraMigration(tests_base.TestCase):
|
|||
self.metric = storage.Metric(uuid.uuid4(),
|
||||
self.archive_policies['low'])
|
||||
|
||||
archive = carbonara.TimeSerieArchive.from_definitions(
|
||||
[(v.granularity, v.points)
|
||||
for v in self.metric.archive_policy.definition]
|
||||
)
|
||||
|
||||
archive_max = carbonara.TimeSerieArchive.from_definitions(
|
||||
[(v.granularity, v.points)
|
||||
for v in self.metric.archive_policy.definition],
|
||||
aggregation_method='max',
|
||||
)
|
||||
|
||||
for a in (archive, archive_max):
|
||||
a.update(carbonara.TimeSerie.from_data(
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 9)],
|
||||
[4, 5, 6]))
|
||||
|
||||
self.storage._create_metric(self.metric)
|
||||
|
||||
# serialise in old format
|
||||
with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.to_dict',
|
||||
autospec=True) as f:
|
||||
f.side_effect = _to_dict_v1_3
|
||||
with mock.patch('gnocchi.carbonara.SplitKey.'
|
||||
'POINTS_PER_SPLIT', 14400):
|
||||
bts = carbonara.BoundTimeSerie(
|
||||
block_size=self.metric.archive_policy.max_block_size,
|
||||
back_window=self.metric.archive_policy.back_window)
|
||||
# NOTE: there is a split at 2016-07-18 on granularity 300
|
||||
values = ((datetime.datetime(2016, 7, 17, 23, 59, 0), 4),
|
||||
(datetime.datetime(2016, 7, 17, 23, 59, 4), 5),
|
||||
(datetime.datetime(2016, 7, 17, 23, 59, 9), 6),
|
||||
(datetime.datetime(2016, 7, 18, 0, 0, 0), 7),
|
||||
(datetime.datetime(2016, 7, 18, 0, 0, 4), 8),
|
||||
(datetime.datetime(2016, 7, 18, 0, 0, 9), 9))
|
||||
|
||||
self.storage._store_metric_archive(
|
||||
self.metric,
|
||||
archive.agg_timeseries[0].aggregation_method,
|
||||
archive.serialize())
|
||||
def _before_truncate(bound_timeserie):
|
||||
for d, agg in itertools.product(
|
||||
self.metric.archive_policy.definition,
|
||||
['mean', 'max']):
|
||||
grouped = bound_timeserie.group_serie(
|
||||
d.granularity, carbonara.round_timestamp(
|
||||
bound_timeserie.first, d.granularity * 10e8))
|
||||
|
||||
self.storage._store_metric_archive(
|
||||
self.metric,
|
||||
archive_max.agg_timeseries[0].aggregation_method,
|
||||
archive_max.serialize())
|
||||
aggts = carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped, d.granularity, agg, max_size=d.points)
|
||||
|
||||
for key, split in aggts.split():
|
||||
self.storage._store_metric_measures(
|
||||
self.metric,
|
||||
str(key),
|
||||
agg, d.granularity,
|
||||
_serialize_v2(split), offset=None, version=None)
|
||||
|
||||
bts.set_values(values, before_truncate_callback=_before_truncate)
|
||||
self.storage._store_unaggregated_timeserie(self.metric,
|
||||
_serialize_v2(bts),
|
||||
version=None)
|
||||
|
||||
def upgrade(self):
|
||||
with mock.patch.object(self.index, 'list_metrics') as f:
|
||||
f.return_value = [self.metric]
|
||||
f.side_effect = [[self.metric], []]
|
||||
self.storage.upgrade(self.index)
|
||||
|
||||
def test_get_measures(self):
|
||||
# This is to make gordc safer
|
||||
self.assertIsNotNone(self.storage._get_metric_archive(
|
||||
self.metric, "mean"))
|
||||
with mock.patch.object(
|
||||
self.storage, '_get_measures_and_unserialize',
|
||||
side_effect=self.storage._get_measures_and_unserialize_v2):
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 5),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 8)
|
||||
], self.storage.get_measures(self.metric))
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 6),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 9)
|
||||
], self.storage.get_measures(self.metric, aggregation='max'))
|
||||
|
||||
self.upgrade()
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400, 5),
|
||||
(utils.datetime_utc(2014, 1, 1, 12), 3600, 5),
|
||||
(utils.datetime_utc(2014, 1, 1, 12), 300, 5)
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 5),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 8)
|
||||
], self.storage.get_measures(self.metric))
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400, 6),
|
||||
(utils.datetime_utc(2014, 1, 1, 12), 3600, 6),
|
||||
(utils.datetime_utc(2014, 1, 1, 12), 300, 6)
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 6),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 9)
|
||||
], self.storage.get_measures(self.metric, aggregation='max'))
|
||||
|
||||
self.assertRaises(
|
||||
storage.AggregationDoesNotExist,
|
||||
self.storage._get_metric_archive,
|
||||
self.metric, "mean")
|
||||
with mock.patch.object(
|
||||
self.storage, '_get_measures_and_unserialize',
|
||||
side_effect=self.storage._get_measures_and_unserialize_v2):
|
||||
self.assertRaises(
|
||||
storage.AggregationDoesNotExist,
|
||||
self.storage.get_measures, self.metric)
|
||||
|
||||
self.assertRaises(
|
||||
storage.AggregationDoesNotExist,
|
||||
self.storage.get_measures, self.metric, aggregation='max')
|
||||
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(utils.datetime_utc(2016, 7, 18), 69),
|
||||
storage.Measure(utils.datetime_utc(2016, 7, 18, 1, 1), 64),
|
||||
])
|
||||
|
||||
with mock.patch.object(self.index, 'list_metrics') as f:
|
||||
f.side_effect = [[self.metric], []]
|
||||
self.storage.process_background_tasks(
|
||||
self.index, [str(self.metric.id)], sync=True)
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 6),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 69),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 69),
|
||||
(utils.datetime_utc(2016, 7, 18, 1), 3600, 64),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 69),
|
||||
(utils.datetime_utc(2016, 7, 18, 1), 300, 64)
|
||||
], self.storage.get_measures(self.metric, aggregation='max'))
|
||||
|
||||
def test_upgrade_upgraded_storage(self):
|
||||
with mock.patch.object(
|
||||
self.storage, '_get_measures_and_unserialize',
|
||||
side_effect=self.storage._get_measures_and_unserialize_v2):
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 5),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 8)
|
||||
], self.storage.get_measures(self.metric))
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 6),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 9)
|
||||
], self.storage.get_measures(self.metric, aggregation='max'))
|
||||
|
||||
self.upgrade()
|
||||
self.upgrade()
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 5),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 8),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 8)
|
||||
], self.storage.get_measures(self.metric))
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 7, 17), 86400, 6),
|
||||
(utils.datetime_utc(2016, 7, 18), 86400, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23), 3600, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 3600, 9),
|
||||
(utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6),
|
||||
(utils.datetime_utc(2016, 7, 18, 0), 300, 9)
|
||||
], self.storage.get_measures(self.metric, aggregation='max'))
|
||||
|
||||
def test_delete_metric_not_upgraded(self):
|
||||
# Make sure that we delete everything (e.g. objects + container)
|
||||
|
|
|
@ -17,6 +17,7 @@ import datetime
|
|||
import uuid
|
||||
|
||||
import pandas
|
||||
from stevedore import extension
|
||||
|
||||
from gnocchi import aggregates
|
||||
from gnocchi.aggregates import moving_stats
|
||||
|
@ -27,6 +28,12 @@ from gnocchi import utils
|
|||
|
||||
class TestAggregates(tests_base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestAggregates, self).setUp()
|
||||
mgr = extension.ExtensionManager('gnocchi.aggregates',
|
||||
invoke_on_load=True)
|
||||
self.custom_agg = dict((x.name, x.obj) for x in mgr)
|
||||
|
||||
def test_extension_dict(self):
|
||||
self.assertIsInstance(self.custom_agg['moving-average'],
|
||||
moving_stats.MovingAverage)
|
||||
|
@ -51,7 +58,7 @@ class TestAggregates(tests_base.TestCase):
|
|||
def _test_create_metric_and_data(self, data, spacing):
|
||||
metric = storage.Metric(
|
||||
uuid.uuid4(), self.archive_policies['medium'])
|
||||
start_time = datetime.datetime(2014, 1, 1, 12)
|
||||
start_time = utils.datetime_utc(2014, 1, 1, 12)
|
||||
incr = datetime.timedelta(seconds=spacing)
|
||||
measures = [storage.Measure(start_time + incr * n, val)
|
||||
for n, val in enumerate(data)]
|
||||
|
@ -59,7 +66,9 @@ class TestAggregates(tests_base.TestCase):
|
|||
str(uuid.uuid4()), str(uuid.uuid4()),
|
||||
'medium')
|
||||
self.storage.add_measures(metric, measures)
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
metrics = self.storage.list_metric_with_measures_to_process(
|
||||
None, None, full=True)
|
||||
self.storage.process_background_tasks(self.index, metrics, sync=True)
|
||||
|
||||
return metric
|
||||
|
||||
|
|
|
@ -14,8 +14,10 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import datetime
|
||||
import functools
|
||||
import math
|
||||
|
||||
import fixtures
|
||||
from oslo_utils import timeutils
|
||||
from oslotest import base
|
||||
# TODO(jd) We shouldn't use pandas here
|
||||
|
@ -26,6 +28,10 @@ from gnocchi import carbonara
|
|||
|
||||
|
||||
class TestBoundTimeSerie(base.BaseTestCase):
|
||||
def test_benchmark(self):
|
||||
self.useFixture(fixtures.Timeout(120, gentle=True))
|
||||
carbonara.BoundTimeSerie.benchmark()
|
||||
|
||||
@staticmethod
|
||||
def test_base():
|
||||
carbonara.BoundTimeSerie.from_data(
|
||||
|
@ -97,23 +103,28 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
@staticmethod
|
||||
def test_base():
|
||||
carbonara.AggregatedTimeSerie.from_data(
|
||||
3,
|
||||
3, 'mean',
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 9)],
|
||||
[3, 5, 6])
|
||||
carbonara.AggregatedTimeSerie.from_data(
|
||||
"4s",
|
||||
"4s", 'mean',
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 9)],
|
||||
[3, 5, 6])
|
||||
|
||||
def test_benchmark(self):
|
||||
self.useFixture(fixtures.Timeout(120, gentle=True))
|
||||
carbonara.AggregatedTimeSerie.benchmark()
|
||||
|
||||
def test_fetch_basic(self):
|
||||
ts = carbonara.AggregatedTimeSerie.from_data(
|
||||
timestamps=[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 9)],
|
||||
aggregation_method='mean',
|
||||
values=[3, 5, 6],
|
||||
sampling="1s")
|
||||
self.assertEqual(
|
||||
|
@ -138,6 +149,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
from_timestamp=timeutils.parse_isotime(
|
||||
"2014-01-01 13:00:04+01:00")))
|
||||
|
||||
def test_before_epoch(self):
|
||||
ts = carbonara.TimeSerie.from_tuples(
|
||||
[(datetime.datetime(1950, 1, 1, 12), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12), 5),
|
||||
(datetime.datetime(2014, 1, 1, 12), 6)])
|
||||
|
||||
self.assertRaises(carbonara.BeforeEpochError,
|
||||
ts.group_serie, 60)
|
||||
|
||||
def test_bad_percentile(self):
|
||||
for bad_percentile in ('0pct', '100pct', '-1pct', '123pct'):
|
||||
self.assertRaises(carbonara.UnknownAggregationMethod,
|
||||
|
@ -145,35 +165,44 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
sampling='1Min',
|
||||
aggregation_method=bad_percentile)
|
||||
|
||||
@staticmethod
|
||||
def _resample(ts, sampling, agg, max_size=None):
|
||||
grouped = ts.group_serie(sampling)
|
||||
return carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped, sampling, agg, max_size=max_size)
|
||||
|
||||
def test_74_percentile_serialized(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling='1Min',
|
||||
aggregation_method='74pct')
|
||||
ts.update(carbonara.TimeSerie.from_tuples(
|
||||
ts = carbonara.TimeSerie.from_tuples(
|
||||
[(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 4), 5),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]))
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])
|
||||
ts = self._resample(ts, 60, '74pct')
|
||||
|
||||
self.assertEqual(1, len(ts))
|
||||
self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
|
||||
|
||||
# Serialize and unserialize
|
||||
ts = carbonara.AggregatedTimeSerie.unserialize(ts.serialize())
|
||||
key = ts.get_split_key()
|
||||
o, s = ts.serialize(key)
|
||||
saved_ts = carbonara.AggregatedTimeSerie.unserialize(
|
||||
s, key, '74pct', ts.sampling)
|
||||
|
||||
ts.update(carbonara.TimeSerie.from_tuples(
|
||||
ts = carbonara.TimeSerie.from_tuples(
|
||||
[(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 4), 5),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]))
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])
|
||||
ts = self._resample(ts, 60, '74pct')
|
||||
ts.merge(saved_ts)
|
||||
|
||||
self.assertEqual(1, len(ts))
|
||||
self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
|
||||
|
||||
def test_95_percentile(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling='1Min',
|
||||
aggregation_method='95pct')
|
||||
ts.update(carbonara.TimeSerie.from_tuples(
|
||||
ts = carbonara.TimeSerie.from_tuples(
|
||||
[(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 4), 5),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]))
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])
|
||||
ts = self._resample(ts, 60, '95pct')
|
||||
|
||||
self.assertEqual(1, len(ts))
|
||||
self.assertEqual(5.9000000000000004,
|
||||
|
@ -182,108 +211,109 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
def test_different_length_in_timestamps_and_data(self):
|
||||
self.assertRaises(ValueError,
|
||||
carbonara.AggregatedTimeSerie.from_data,
|
||||
3,
|
||||
3, 'mean',
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 9)],
|
||||
[3, 5])
|
||||
|
||||
def test_max_size(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=2)
|
||||
ts.update(carbonara.TimeSerie.from_data(
|
||||
ts = carbonara.TimeSerie.from_data(
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 9)],
|
||||
[3, 5, 6]))
|
||||
[3, 5, 6])
|
||||
ts = self._resample(ts, 1, 'mean', max_size=2)
|
||||
|
||||
self.assertEqual(2, len(ts))
|
||||
self.assertEqual(5, ts[0])
|
||||
self.assertEqual(6, ts[1])
|
||||
|
||||
def test_down_sampling(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling='5Min')
|
||||
ts.update(carbonara.TimeSerie.from_data(
|
||||
ts = carbonara.TimeSerie.from_data(
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 0, 9)],
|
||||
[3, 5, 7]))
|
||||
[3, 5, 7])
|
||||
ts = self._resample(ts, 300, 'mean')
|
||||
|
||||
self.assertEqual(1, len(ts))
|
||||
self.assertEqual(5, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)])
|
||||
|
||||
def test_down_sampling_with_max_size(self):
|
||||
ts = carbonara.AggregatedTimeSerie(
|
||||
sampling='1Min',
|
||||
max_size=2)
|
||||
ts.update(carbonara.TimeSerie.from_data(
|
||||
ts = carbonara.TimeSerie.from_data(
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 1, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 1, 9),
|
||||
datetime.datetime(2014, 1, 1, 12, 2, 12)],
|
||||
[3, 5, 7, 1]))
|
||||
[3, 5, 7, 1])
|
||||
ts = self._resample(ts, 60, 'mean', max_size=2)
|
||||
|
||||
self.assertEqual(2, len(ts))
|
||||
self.assertEqual(6, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)])
|
||||
self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)])
|
||||
|
||||
def test_down_sampling_with_max_size_and_method_max(self):
|
||||
ts = carbonara.AggregatedTimeSerie(
|
||||
sampling='1Min',
|
||||
max_size=2,
|
||||
aggregation_method='max')
|
||||
ts.update(carbonara.TimeSerie.from_data(
|
||||
ts = carbonara.TimeSerie.from_data(
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 1, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 1, 9),
|
||||
datetime.datetime(2014, 1, 1, 12, 2, 12)],
|
||||
[3, 5, 70, 1]))
|
||||
[3, 5, 70, 1])
|
||||
ts = self._resample(ts, 60, 'max', max_size=2)
|
||||
|
||||
self.assertEqual(2, len(ts))
|
||||
self.assertEqual(70, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)])
|
||||
self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)])
|
||||
|
||||
def test_to_dict_from_dict(self):
|
||||
ts = carbonara.AggregatedTimeSerie(
|
||||
sampling='1Min',
|
||||
max_size=2,
|
||||
aggregation_method='max')
|
||||
ts.update(carbonara.TimeSerie.from_data(
|
||||
[datetime.datetime(2014, 1, 1, 12, 0, 0),
|
||||
datetime.datetime(2014, 1, 1, 12, 1, 4),
|
||||
datetime.datetime(2014, 1, 1, 12, 1, 9),
|
||||
datetime.datetime(2014, 1, 1, 12, 2, 12)],
|
||||
[3, 5, 7, 1]))
|
||||
ts2 = carbonara.AggregatedTimeSerie.from_dict(ts.to_dict())
|
||||
self.assertEqual(ts, ts2)
|
||||
@staticmethod
|
||||
def _resample_and_merge(ts, agg_dict):
|
||||
"""Helper method that mimics _add_measures workflow."""
|
||||
grouped = ts.group_serie(agg_dict['sampling'])
|
||||
existing = agg_dict.get('return')
|
||||
agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped, agg_dict['sampling'], agg_dict['agg'],
|
||||
max_size=agg_dict.get('size'))
|
||||
if existing:
|
||||
agg_dict['return'].merge(existing)
|
||||
|
||||
def test_aggregated_different_archive_no_overlap(self):
|
||||
tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50)
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling)
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50)
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling)
|
||||
tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'}
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
|
||||
tsc2 = {'sampling': 60, 'size': 50, 'agg': 'mean'}
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
|
||||
|
||||
tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)],
|
||||
before_truncate_callback=tsc1.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc1))
|
||||
tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)],
|
||||
before_truncate_callback=tsc2.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc2))
|
||||
|
||||
dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0)
|
||||
self.assertRaises(carbonara.UnAggregableTimeseries,
|
||||
carbonara.AggregatedTimeSerie.aggregated,
|
||||
[tsc1, tsc2], from_timestamp=dtfrom)
|
||||
[tsc1['return'], tsc2['return']],
|
||||
from_timestamp=dtfrom, aggregation='mean')
|
||||
|
||||
def test_aggregated_different_archive_no_overlap2(self):
|
||||
tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50)
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling)
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50)
|
||||
tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'}
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50,
|
||||
aggregation_method='mean')
|
||||
|
||||
tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)],
|
||||
before_truncate_callback=tsc1.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc1))
|
||||
self.assertRaises(carbonara.UnAggregableTimeseries,
|
||||
carbonara.AggregatedTimeSerie.aggregated,
|
||||
[tsc1, tsc2])
|
||||
[tsc1['return'], tsc2], aggregation='mean')
|
||||
|
||||
def test_aggregated_different_archive_overlap(self):
|
||||
tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling)
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling)
|
||||
tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
|
||||
tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
|
||||
|
||||
# NOTE(sileht): minute 8 is missing in both and
|
||||
# minute 7 in tsc2 too, but it looks like we have
|
||||
|
@ -298,7 +328,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 6, 0), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 7, 0), 10),
|
||||
(datetime.datetime(2014, 1, 1, 12, 9, 0), 2),
|
||||
], before_truncate_callback=tsc1.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc1))
|
||||
|
||||
tsb2.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 1, 0), 3),
|
||||
|
@ -310,7 +341,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 9, 0), 2),
|
||||
(datetime.datetime(2014, 1, 1, 12, 11, 0), 2),
|
||||
(datetime.datetime(2014, 1, 1, 12, 12, 0), 2),
|
||||
], before_truncate_callback=tsc2.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc2))
|
||||
|
||||
dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0)
|
||||
dtto = datetime.datetime(2014, 1, 1, 12, 10, 0)
|
||||
|
@ -319,13 +351,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
# so that fail
|
||||
self.assertRaises(carbonara.UnAggregableTimeseries,
|
||||
carbonara.AggregatedTimeSerie.aggregated,
|
||||
[tsc1, tsc2], from_timestamp=dtfrom,
|
||||
to_timestamp=dtto)
|
||||
[tsc1['return'], tsc2['return']],
|
||||
from_timestamp=dtfrom,
|
||||
to_timestamp=dtto, aggregation='mean')
|
||||
|
||||
# Retry with 80% and it works
|
||||
output = carbonara.AggregatedTimeSerie.aggregated([
|
||||
tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto,
|
||||
needed_percent_of_overlap=80.0)
|
||||
tsc1['return'], tsc2['return']],
|
||||
from_timestamp=dtfrom, to_timestamp=dtto,
|
||||
aggregation='mean', needed_percent_of_overlap=80.0)
|
||||
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 3.0),
|
||||
|
@ -339,10 +373,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
], output)
|
||||
|
||||
def test_aggregated_different_archive_overlap_edge_missing1(self):
|
||||
tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling)
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling)
|
||||
tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
|
||||
tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
|
||||
|
||||
tsb1.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 3, 0), 9),
|
||||
|
@ -351,7 +385,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 6, 0), 7),
|
||||
(datetime.datetime(2014, 1, 1, 12, 7, 0), 5),
|
||||
(datetime.datetime(2014, 1, 1, 12, 8, 0), 3),
|
||||
], before_truncate_callback=tsc1.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc1))
|
||||
|
||||
tsb2.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 11, 0, 0), 6),
|
||||
|
@ -361,13 +396,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 4, 0), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 5, 0), 16),
|
||||
(datetime.datetime(2014, 1, 1, 12, 6, 0), 12),
|
||||
], before_truncate_callback=tsc2.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc2))
|
||||
|
||||
# By default we require 100% of point that overlap
|
||||
# but we allow that the last datapoint is missing
|
||||
# of the precisest granularity
|
||||
output = carbonara.AggregatedTimeSerie.aggregated([
|
||||
tsc1, tsc2], aggregation='sum')
|
||||
tsc1['return'], tsc2['return']], aggregation='sum')
|
||||
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 33.0),
|
||||
|
@ -377,28 +413,31 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
], output)
|
||||
|
||||
def test_aggregated_different_archive_overlap_edge_missing2(self):
|
||||
tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling)
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling)
|
||||
tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
|
||||
tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
|
||||
|
||||
tsb1.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 3, 0), 4),
|
||||
], before_truncate_callback=tsc1.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc1))
|
||||
|
||||
tsb2.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 11, 0, 0), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 3, 0), 4),
|
||||
], before_truncate_callback=tsc2.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc2))
|
||||
|
||||
output = carbonara.AggregatedTimeSerie.aggregated([tsc1, tsc2])
|
||||
output = carbonara.AggregatedTimeSerie.aggregated(
|
||||
[tsc1['return'], tsc2['return']], aggregation='mean')
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0),
|
||||
], output)
|
||||
|
||||
def test_fetch(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
ts = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 11, 46, 4), 4),
|
||||
|
@ -417,11 +456,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 5, 1), 15),
|
||||
(datetime.datetime(2014, 1, 1, 12, 5, 12), 1),
|
||||
(datetime.datetime(2014, 1, 1, 12, 6, 0, 2), 3),
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 6), 5),
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual([
|
||||
(datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.0),
|
||||
|
@ -434,7 +475,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0),
|
||||
(datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0),
|
||||
(datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0)
|
||||
], ts.fetch())
|
||||
], ts['return'].fetch())
|
||||
|
||||
self.assertEqual([
|
||||
(datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5),
|
||||
|
@ -443,19 +484,19 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0),
|
||||
(datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0),
|
||||
(datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0)
|
||||
], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
|
||||
def test_fetch_agg_pct(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=3600 * 24,
|
||||
aggregation_method='90pct')
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
ts = {'sampling': 1, 'size': 3600 * 24, 'agg': '90pct'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 0, 123), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 2), 4)],
|
||||
before_truncate_callback=ts.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))
|
||||
result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))
|
||||
reference = [
|
||||
(pandas.Timestamp('2014-01-01 12:00:00'),
|
||||
1.0, 3.9),
|
||||
|
@ -472,9 +513,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
self.assertAlmostEqual(ref[2], res[2])
|
||||
|
||||
tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 2, 113), 110)],
|
||||
before_truncate_callback=ts.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))
|
||||
result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))
|
||||
reference = [
|
||||
(pandas.Timestamp('2014-01-01 12:00:00'),
|
||||
1.0, 3.9),
|
||||
|
@ -491,8 +533,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
self.assertAlmostEqual(ref[2], res[2])
|
||||
|
||||
def test_fetch_nano(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=0.2, max_size=10)
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
ts = {'sampling': 0.2, 'size': 10, 'agg': 'mean'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 11, 46, 0, 200123), 4),
|
||||
|
@ -500,78 +542,84 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 11, 47, 0, 323154), 50),
|
||||
(datetime.datetime(2014, 1, 1, 11, 48, 0, 590903), 4),
|
||||
(datetime.datetime(2014, 1, 1, 11, 48, 0, 903291), 4),
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 11, 48, 0, 821312), 5),
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual([
|
||||
(datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), 0.2, 6.0),
|
||||
(datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0),
|
||||
(datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0),
|
||||
(datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5)
|
||||
], ts.fetch())
|
||||
], ts['return'].fetch())
|
||||
|
||||
def test_fetch_agg_std(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60,
|
||||
aggregation_method='std')
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
# NOTE (gordc): this is a good test to ensure we drop NaN entries
|
||||
# 2014-01-01 12:00:00 will appear if we don't dropna()
|
||||
ts = {'sampling': 60, 'size': 60, 'agg': 'std'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 1, 4), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 1, 9), 7),
|
||||
(datetime.datetime(2014, 1, 1, 12, 2, 1), 15),
|
||||
(datetime.datetime(2014, 1, 1, 12, 2, 12), 1)],
|
||||
before_truncate_callback=ts.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2014-01-01 12:01:00'),
|
||||
60.0, 2.1213203435596424),
|
||||
(pandas.Timestamp('2014-01-01 12:02:00'),
|
||||
60.0, 9.8994949366116654),
|
||||
], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
|
||||
tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)],
|
||||
before_truncate_callback=ts.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2014-01-01 12:01:00'),
|
||||
60.0, 2.1213203435596424),
|
||||
(pandas.Timestamp('2014-01-01 12:02:00'),
|
||||
60.0, 59.304300012730948),
|
||||
], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
|
||||
def test_fetch_agg_max(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60,
|
||||
aggregation_method='max')
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
ts = {'sampling': 60, 'size': 60, 'agg': 'max'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 1, 4), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 1, 9), 7),
|
||||
(datetime.datetime(2014, 1, 1, 12, 2, 1), 15),
|
||||
(datetime.datetime(2014, 1, 1, 12, 2, 12), 1)],
|
||||
before_truncate_callback=ts.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3),
|
||||
(pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7),
|
||||
(pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 15),
|
||||
], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
|
||||
tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)],
|
||||
before_truncate_callback=ts.update)
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3),
|
||||
(pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7),
|
||||
(pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 110),
|
||||
], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))
|
||||
|
||||
def test_serialize(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=0.5)
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
ts = {'sampling': 0.5, 'agg': 'mean'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 0, 1234), 3),
|
||||
|
@ -579,24 +627,30 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 1, 4, 234), 5),
|
||||
(datetime.datetime(2014, 1, 1, 12, 1, 9, 32), 7),
|
||||
(datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1),
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual(ts,
|
||||
key = ts['return'].get_split_key()
|
||||
o, s = ts['return'].serialize(key)
|
||||
self.assertEqual(ts['return'],
|
||||
carbonara.AggregatedTimeSerie.unserialize(
|
||||
ts.serialize()))
|
||||
s, key,
|
||||
'mean', 0.5))
|
||||
|
||||
def test_no_truncation(self):
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=60)
|
||||
ts = {'sampling': 60, 'agg': 'mean'}
|
||||
tsb = carbonara.BoundTimeSerie()
|
||||
|
||||
for i in six.moves.range(1, 11):
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, i, i), float(i))
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, i, i + 1), float(i + 1))
|
||||
], before_truncate_callback=ts.update)
|
||||
self.assertEqual(i, len(ts.fetch()))
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
self.assertEqual(i, len(ts['return'].fetch()))
|
||||
|
||||
def test_back_window(self):
|
||||
"""Back window testing.
|
||||
|
@ -604,8 +658,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
Test the back window on an archive is not longer than the window we
|
||||
aggregate on.
|
||||
"""
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60)
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
ts = {'sampling': 1, 'size': 60, 'agg': 'mean'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1),
|
||||
|
@ -613,7 +667,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5),
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual(
|
||||
[
|
||||
|
@ -621,7 +676,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5),
|
||||
(pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5),
|
||||
],
|
||||
ts.fetch())
|
||||
ts['return'].fetch())
|
||||
|
||||
try:
|
||||
tsb.set_values([
|
||||
|
@ -644,8 +699,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
Test the back window on an archive is not longer than the window we
|
||||
aggregate on.
|
||||
"""
|
||||
ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60)
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts.sampling)
|
||||
ts = {'sampling': 1, 'size': 60, 'agg': 'mean'}
|
||||
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1),
|
||||
|
@ -653,7 +708,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5),
|
||||
], before_truncate_callback=ts.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual(
|
||||
[
|
||||
|
@ -661,11 +717,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5),
|
||||
(pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5),
|
||||
],
|
||||
ts.fetch())
|
||||
ts['return'].fetch())
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9),
|
||||
], ignore_too_old_timestamps=True, before_truncate_callback=ts.update)
|
||||
], ignore_too_old_timestamps=True,
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual(
|
||||
[
|
||||
|
@ -673,12 +731,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5),
|
||||
(pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5),
|
||||
],
|
||||
ts.fetch())
|
||||
ts['return'].fetch())
|
||||
|
||||
tsb.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9),
|
||||
(datetime.datetime(2014, 1, 1, 12, 0, 3, 9), 4.5),
|
||||
], ignore_too_old_timestamps=True, before_truncate_callback=ts.update)
|
||||
], ignore_too_old_timestamps=True,
|
||||
before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=ts))
|
||||
|
||||
self.assertEqual(
|
||||
[
|
||||
|
@ -686,23 +746,47 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5),
|
||||
(pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 3.5),
|
||||
],
|
||||
ts.fetch())
|
||||
ts['return'].fetch())
|
||||
|
||||
def test_aggregated_nominal(self):
|
||||
tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsc12 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6)
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc12.sampling)
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10)
|
||||
tsc22 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6)
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc22.sampling)
|
||||
tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsc12 = {'sampling': 300, 'size': 6, 'agg': 'mean'}
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling'])
|
||||
tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'}
|
||||
tsc22 = {'sampling': 300, 'size': 6, 'agg': 'mean'}
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling'])
|
||||
|
||||
def ts1_update(ts):
|
||||
tsc1.update(ts)
|
||||
tsc12.update(ts)
|
||||
grouped = ts.group_serie(tsc1['sampling'])
|
||||
existing = tsc1.get('return')
|
||||
tsc1['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped, tsc1['sampling'], tsc1['agg'],
|
||||
max_size=tsc1['size'])
|
||||
if existing:
|
||||
tsc1['return'].merge(existing)
|
||||
grouped = ts.group_serie(tsc12['sampling'])
|
||||
existing = tsc12.get('return')
|
||||
tsc12['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped, tsc12['sampling'], tsc12['agg'],
|
||||
max_size=tsc12['size'])
|
||||
if existing:
|
||||
tsc12['return'].merge(existing)
|
||||
|
||||
def ts2_update(ts):
|
||||
tsc2.update(ts)
|
||||
tsc22.update(ts)
|
||||
grouped = ts.group_serie(tsc2['sampling'])
|
||||
existing = tsc2.get('return')
|
||||
tsc2['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped, tsc2['sampling'], tsc2['agg'],
|
||||
max_size=tsc2['size'])
|
||||
if existing:
|
||||
tsc2['return'].merge(existing)
|
||||
grouped = ts.group_serie(tsc22['sampling'])
|
||||
existing = tsc22.get('return')
|
||||
tsc22['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
|
||||
grouped, tsc22['sampling'], tsc22['agg'],
|
||||
max_size=tsc22['size'])
|
||||
if existing:
|
||||
tsc22['return'].merge(existing)
|
||||
|
||||
tsb1.set_values([
|
||||
(datetime.datetime(2014, 1, 1, 11, 46, 4), 4),
|
||||
|
@ -742,8 +826,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
(datetime.datetime(2014, 1, 1, 12, 6, 0), 1),
|
||||
], before_truncate_callback=ts2_update)
|
||||
|
||||
output = carbonara.AggregatedTimeSerie.aggregated([tsc1, tsc12,
|
||||
tsc2, tsc22])
|
||||
output = carbonara.AggregatedTimeSerie.aggregated(
|
||||
[tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']],
|
||||
'mean')
|
||||
self.assertEqual([
|
||||
(datetime.datetime(2014, 1, 1, 11, 45), 300.0, 5.75),
|
||||
(datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.5),
|
||||
|
@ -763,27 +848,29 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
], output)
|
||||
|
||||
def test_aggregated_partial_overlap(self):
|
||||
tsc1 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400)
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling)
|
||||
tsc2 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400)
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling)
|
||||
tsc1 = {'sampling': 1, 'size': 86400, 'agg': 'mean'}
|
||||
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
|
||||
tsc2 = {'sampling': 1, 'size': 60, 'agg': 'mean'}
|
||||
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
|
||||
|
||||
tsb1.set_values([
|
||||
(datetime.datetime(2015, 12, 3, 13, 19, 15), 1),
|
||||
(datetime.datetime(2015, 12, 3, 13, 20, 15), 1),
|
||||
(datetime.datetime(2015, 12, 3, 13, 21, 15), 1),
|
||||
(datetime.datetime(2015, 12, 3, 13, 22, 15), 1),
|
||||
], before_truncate_callback=tsc1.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc1))
|
||||
|
||||
tsb2.set_values([
|
||||
(datetime.datetime(2015, 12, 3, 13, 21, 15), 10),
|
||||
(datetime.datetime(2015, 12, 3, 13, 22, 15), 10),
|
||||
(datetime.datetime(2015, 12, 3, 13, 23, 15), 10),
|
||||
(datetime.datetime(2015, 12, 3, 13, 24, 15), 10),
|
||||
], before_truncate_callback=tsc2.update)
|
||||
], before_truncate_callback=functools.partial(
|
||||
self._resample_and_merge, agg_dict=tsc2))
|
||||
|
||||
output = carbonara.AggregatedTimeSerie.aggregated(
|
||||
[tsc1, tsc2], aggregation="sum")
|
||||
[tsc1['return'], tsc2['return']], aggregation="sum")
|
||||
|
||||
self.assertEqual([
|
||||
(pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0),
|
||||
|
@ -794,7 +881,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
dtto = datetime.datetime(2015, 12, 3, 13, 25, 0)
|
||||
|
||||
output = carbonara.AggregatedTimeSerie.aggregated(
|
||||
[tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto,
|
||||
[tsc1['return'], tsc2['return']],
|
||||
from_timestamp=dtfrom, to_timestamp=dtto,
|
||||
aggregation="sum", needed_percent_of_overlap=0)
|
||||
|
||||
self.assertEqual([
|
||||
|
@ -810,14 +898,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
# so that fail if from or to is set
|
||||
self.assertRaises(carbonara.UnAggregableTimeseries,
|
||||
carbonara.AggregatedTimeSerie.aggregated,
|
||||
[tsc1, tsc2], to_timestamp=dtto)
|
||||
[tsc1['return'], tsc2['return']],
|
||||
to_timestamp=dtto, aggregation='mean')
|
||||
self.assertRaises(carbonara.UnAggregableTimeseries,
|
||||
carbonara.AggregatedTimeSerie.aggregated,
|
||||
[tsc1, tsc2], from_timestamp=dtfrom)
|
||||
[tsc1['return'], tsc2['return']],
|
||||
from_timestamp=dtfrom, aggregation='mean')
|
||||
|
||||
# Retry with 50% and it works
|
||||
output = carbonara.AggregatedTimeSerie.aggregated(
|
||||
[tsc1, tsc2], from_timestamp=dtfrom,
|
||||
[tsc1['return'], tsc2['return']], from_timestamp=dtfrom,
|
||||
aggregation="sum",
|
||||
needed_percent_of_overlap=50.0)
|
||||
self.assertEqual([
|
||||
|
@ -828,7 +918,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
], output)
|
||||
|
||||
output = carbonara.AggregatedTimeSerie.aggregated(
|
||||
[tsc1, tsc2], to_timestamp=dtto,
|
||||
[tsc1['return'], tsc2['return']], to_timestamp=dtto,
|
||||
aggregation="sum",
|
||||
needed_percent_of_overlap=50.0)
|
||||
self.assertEqual([
|
||||
|
@ -840,24 +930,24 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
|
||||
def test_split_key(self):
|
||||
self.assertEqual(
|
||||
"1420128000.0",
|
||||
carbonara.AggregatedTimeSerie.get_split_key(
|
||||
datetime.datetime(2015, 1, 1, 23, 34), 5))
|
||||
self.assertEqual(
|
||||
"1420056000.0",
|
||||
carbonara.AggregatedTimeSerie.get_split_key(
|
||||
datetime.datetime(2015, 1, 1, 15, 3), 5))
|
||||
|
||||
def test_split_key_datetime(self):
|
||||
self.assertEqual(
|
||||
datetime.datetime(2014, 5, 10),
|
||||
carbonara.AggregatedTimeSerie.get_split_key_datetime(
|
||||
datetime.datetime(2014, 10, 7),
|
||||
carbonara.SplitKey.from_timestamp_and_sampling(
|
||||
datetime.datetime(2015, 1, 1, 15, 3), 3600))
|
||||
self.assertEqual(
|
||||
datetime.datetime(2014, 12, 29, 8),
|
||||
carbonara.AggregatedTimeSerie.get_split_key_datetime(
|
||||
datetime.datetime(2014, 12, 31, 18),
|
||||
carbonara.SplitKey.from_timestamp_and_sampling(
|
||||
datetime.datetime(2015, 1, 1, 15, 3), 58))
|
||||
|
||||
def test_split_key_next(self):
|
||||
self.assertEqual(
|
||||
datetime.datetime(2015, 3, 6),
|
||||
next(carbonara.SplitKey.from_timestamp_and_sampling(
|
||||
datetime.datetime(2015, 1, 1, 15, 3), 3600)))
|
||||
self.assertEqual(
|
||||
datetime.datetime(2015, 8, 3),
|
||||
next(next(carbonara.SplitKey.from_timestamp_and_sampling(
|
||||
datetime.datetime(2015, 1, 1, 15, 3), 3600))))
|
||||
|
||||
def test_split(self):
|
||||
sampling = 5
|
||||
points = 100000
|
||||
|
@ -865,21 +955,20 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
timestamps=map(datetime.datetime.utcfromtimestamp,
|
||||
six.moves.range(points)),
|
||||
values=six.moves.range(points))
|
||||
agg = carbonara.AggregatedTimeSerie(sampling=sampling)
|
||||
agg.update(ts)
|
||||
agg = self._resample(ts, sampling, 'mean')
|
||||
|
||||
grouped_points = list(agg.split())
|
||||
|
||||
self.assertEqual(
|
||||
math.ceil((points / float(sampling))
|
||||
/ carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT),
|
||||
/ carbonara.SplitKey.POINTS_PER_SPLIT),
|
||||
len(grouped_points))
|
||||
self.assertEqual("0.0",
|
||||
grouped_points[0][0])
|
||||
# 14400 × 5s = 20 hours
|
||||
self.assertEqual("72000.0",
|
||||
str(carbonara.SplitKey(grouped_points[0][0])))
|
||||
# 3600 × 5s = 5 hours
|
||||
self.assertEqual(datetime.datetime(1970, 1, 1, 5),
|
||||
grouped_points[1][0])
|
||||
self.assertEqual(carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT,
|
||||
self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT,
|
||||
len(grouped_points[0][1]))
|
||||
|
||||
def test_from_timeseries(self):
|
||||
|
@ -889,8 +978,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase):
|
|||
timestamps=map(datetime.datetime.utcfromtimestamp,
|
||||
six.moves.range(points)),
|
||||
values=six.moves.range(points))
|
||||
agg = carbonara.AggregatedTimeSerie(sampling=sampling)
|
||||
agg.update(ts)
|
||||
agg = self._resample(ts, sampling, 'mean')
|
||||
|
||||
split = [t[1] for t in list(agg.split())]
|
||||
|
||||
|
|
|
@ -17,12 +17,18 @@ import datetime
|
|||
import operator
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
|
||||
from gnocchi import archive_policy
|
||||
from gnocchi import indexer
|
||||
from gnocchi.tests import base as tests_base
|
||||
from gnocchi import utils
|
||||
|
||||
|
||||
class MockException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TestIndexer(tests_base.TestCase):
|
||||
def test_get_driver(self):
|
||||
driver = indexer.get_driver(self.conf)
|
||||
|
@ -50,6 +56,55 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
{u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
|
||||
'name': u'low'}, dict(ap))
|
||||
|
||||
def test_update_archive_policy(self):
|
||||
self.assertRaises(indexer.UnsupportedArchivePolicyChange,
|
||||
self.index.update_archive_policy, "low",
|
||||
[archive_policy.ArchivePolicyItem(granularity=300,
|
||||
points=10)])
|
||||
self.assertRaises(indexer.UnsupportedArchivePolicyChange,
|
||||
self.index.update_archive_policy, "low",
|
||||
[archive_policy.ArchivePolicyItem(granularity=300,
|
||||
points=12),
|
||||
archive_policy.ArchivePolicyItem(granularity=3600,
|
||||
points=12),
|
||||
archive_policy.ArchivePolicyItem(granularity=5,
|
||||
points=6)])
|
||||
apname = str(uuid.uuid4())
|
||||
self.index.create_archive_policy(archive_policy.ArchivePolicy(
|
||||
apname, 0, [(12, 300), (24, 3600), (30, 86400)]))
|
||||
ap = self.index.update_archive_policy(
|
||||
apname, [archive_policy.ArchivePolicyItem(granularity=300,
|
||||
points=6),
|
||||
archive_policy.ArchivePolicyItem(granularity=3600,
|
||||
points=24),
|
||||
archive_policy.ArchivePolicyItem(granularity=86400,
|
||||
points=30)])
|
||||
self.assertEqual({
|
||||
'back_window': 0,
|
||||
'aggregation_methods':
|
||||
set(self.conf.archive_policy.default_aggregation_methods),
|
||||
'definition': [
|
||||
{u'granularity': 300, u'points': 6, u'timespan': 1800},
|
||||
{u'granularity': 3600, u'points': 24, u'timespan': 86400},
|
||||
{u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
|
||||
'name': apname}, dict(ap))
|
||||
ap = self.index.update_archive_policy(
|
||||
apname, [archive_policy.ArchivePolicyItem(granularity=300,
|
||||
points=12),
|
||||
archive_policy.ArchivePolicyItem(granularity=3600,
|
||||
points=24),
|
||||
archive_policy.ArchivePolicyItem(granularity=86400,
|
||||
points=30)])
|
||||
self.assertEqual({
|
||||
'back_window': 0,
|
||||
'aggregation_methods':
|
||||
set(self.conf.archive_policy.default_aggregation_methods),
|
||||
'definition': [
|
||||
{u'granularity': 300, u'points': 12, u'timespan': 3600},
|
||||
{u'granularity': 3600, u'points': 24, u'timespan': 86400},
|
||||
{u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
|
||||
'name': apname}, dict(ap))
|
||||
|
||||
def test_delete_archive_policy(self):
|
||||
name = str(uuid.uuid4())
|
||||
self.index.create_archive_policy(
|
||||
|
@ -77,10 +132,21 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
self.index.create_archive_policy_rule('rule2', 'abc.xyz.*', name)
|
||||
self.index.create_archive_policy_rule('rule3', 'abc.xyz', name)
|
||||
rules = self.index.list_archive_policy_rules()
|
||||
self.assertEqual(3, len(rules))
|
||||
self.assertEqual('abc.xyz.*', rules[0]['metric_pattern'])
|
||||
self.assertEqual('abc.xyz', rules[1]['metric_pattern'])
|
||||
self.assertEqual('abc.*', rules[2]['metric_pattern'])
|
||||
# NOTE(jd) The test is not isolated, there might be more than 3 rules
|
||||
found = 0
|
||||
for r in rules:
|
||||
if r['metric_pattern'] == 'abc.xyz.*':
|
||||
found = 1
|
||||
if found == 1 and r['metric_pattern'] == 'abc.xyz':
|
||||
found = 2
|
||||
if found == 2 and r['metric_pattern'] == 'abc.*':
|
||||
break
|
||||
else:
|
||||
self.fail("Metric patterns are not ordered")
|
||||
|
||||
# Ensure we can't delete the archive policy
|
||||
self.assertRaises(indexer.ArchivePolicyInUse,
|
||||
self.index.delete_archive_policy, name)
|
||||
|
||||
def test_create_metric(self):
|
||||
r1 = uuid.uuid4()
|
||||
|
@ -91,6 +157,7 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
self.assertEqual(m.created_by_user_id, user)
|
||||
self.assertEqual(m.created_by_project_id, project)
|
||||
self.assertIsNone(m.name)
|
||||
self.assertIsNone(m.unit)
|
||||
self.assertIsNone(m.resource_id)
|
||||
m2 = self.index.list_metrics(id=r1)
|
||||
self.assertEqual([m], m2)
|
||||
|
@ -170,52 +237,6 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
m = self.index.list_metrics(id=rc.metrics[0].id)
|
||||
self.assertEqual(m[0], rc.metrics[0])
|
||||
|
||||
def _do_test_create_instance(self, server_group=None, image_ref=None):
|
||||
r1 = uuid.uuid4()
|
||||
user = str(uuid.uuid4())
|
||||
project = str(uuid.uuid4())
|
||||
kwargs = {'server_group': server_group} if server_group else {}
|
||||
|
||||
rc = self.index.create_resource('instance', r1, user, project,
|
||||
flavor_id="1",
|
||||
image_ref=image_ref,
|
||||
host="foo",
|
||||
display_name="lol", **kwargs)
|
||||
self.assertIsNotNone(rc.started_at)
|
||||
self.assertIsNotNone(rc.revision_start)
|
||||
self.assertEqual({"id": r1,
|
||||
"revision_start": rc.revision_start,
|
||||
"revision_end": None,
|
||||
"type": "instance",
|
||||
"created_by_user_id": user,
|
||||
"created_by_project_id": project,
|
||||
"user_id": None,
|
||||
"project_id": None,
|
||||
"started_at": rc.started_at,
|
||||
"ended_at": None,
|
||||
"display_name": "lol",
|
||||
"server_group": server_group,
|
||||
"host": "foo",
|
||||
"image_ref": image_ref,
|
||||
"flavor_id": "1",
|
||||
"original_resource_id": None,
|
||||
"metrics": {}},
|
||||
rc.jsonify())
|
||||
rg = self.index.get_resource('generic', r1, with_metrics=True)
|
||||
self.assertEqual(rc.id, rg.id)
|
||||
self.assertEqual(rc.revision_start, rg.revision_start)
|
||||
self.assertEqual(rc.metrics, rg.metrics)
|
||||
|
||||
def test_create_instance(self):
|
||||
self._do_test_create_instance(image_ref='http://foo/bar')
|
||||
|
||||
def test_create_instance_with_server_group(self):
|
||||
self._do_test_create_instance('my_autoscaling_group',
|
||||
image_ref='http://foo/bar')
|
||||
|
||||
def test_create_instance_without_image_ref(self):
|
||||
self._do_test_create_instance(image_ref=None)
|
||||
|
||||
def test_delete_resource(self):
|
||||
r1 = uuid.uuid4()
|
||||
self.index.create_resource('generic', r1, str(uuid.uuid4()),
|
||||
|
@ -426,30 +447,40 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
self.assertEqual(e1, r.metrics[0].id)
|
||||
|
||||
def test_update_resource_attribute(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
rtype = mgr.resource_type_from_dict(resource_type, {
|
||||
"col1": {"type": "string", "required": True,
|
||||
"min_length": 2, "max_length": 15}
|
||||
}, 'creating')
|
||||
r1 = uuid.uuid4()
|
||||
user = str(uuid.uuid4())
|
||||
project = str(uuid.uuid4())
|
||||
rc = self.index.create_resource('instance', r1, user, project,
|
||||
flavor_id="1",
|
||||
image_ref="http://foo/bar",
|
||||
host="foo",
|
||||
display_name="lol")
|
||||
rc = self.index.update_resource('instance', r1, host="bar")
|
||||
r = self.index.get_resource('instance', r1, with_metrics=True)
|
||||
# Create
|
||||
self.index.create_resource_type(rtype)
|
||||
|
||||
rc = self.index.create_resource(resource_type, r1, user, project,
|
||||
col1="foo")
|
||||
rc = self.index.update_resource(resource_type, r1, col1="foo")
|
||||
r = self.index.get_resource(resource_type, r1, with_metrics=True)
|
||||
self.assertEqual(rc, r)
|
||||
|
||||
def test_update_resource_no_change(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
rtype = mgr.resource_type_from_dict(resource_type, {
|
||||
"col1": {"type": "string", "required": True,
|
||||
"min_length": 2, "max_length": 15}
|
||||
}, 'creating')
|
||||
self.index.create_resource_type(rtype)
|
||||
r1 = uuid.uuid4()
|
||||
user = str(uuid.uuid4())
|
||||
project = str(uuid.uuid4())
|
||||
rc = self.index.create_resource('instance', r1, user, project,
|
||||
flavor_id="1",
|
||||
image_ref="http://foo/bar",
|
||||
host="foo",
|
||||
display_name="lol")
|
||||
updated = self.index.update_resource('instance', r1, host="foo",
|
||||
rc = self.index.create_resource(resource_type, r1, user, project,
|
||||
col1="foo")
|
||||
updated = self.index.update_resource(resource_type, r1, col1="foo",
|
||||
create_revision=False)
|
||||
r = self.index.list_resources('instance',
|
||||
r = self.index.list_resources(resource_type,
|
||||
{"=": {"id": r1}},
|
||||
history=True)
|
||||
self.assertEqual(1, len(r))
|
||||
|
@ -460,28 +491,27 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
r1 = uuid.uuid4()
|
||||
user = str(uuid.uuid4())
|
||||
project = str(uuid.uuid4())
|
||||
self.index.create_resource('instance', r1, user, project,
|
||||
flavor_id="1",
|
||||
image_ref="http://foo/bar",
|
||||
host="foo",
|
||||
display_name="lol")
|
||||
self.index.create_resource('generic', r1, user, project)
|
||||
self.assertRaises(
|
||||
indexer.ResourceValueError,
|
||||
self.index.update_resource,
|
||||
'instance', r1,
|
||||
'generic', r1,
|
||||
ended_at=utils.datetime_utc(2010, 1, 1, 1, 1, 1))
|
||||
|
||||
def test_update_resource_unknown_attribute(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
rtype = mgr.resource_type_from_dict(resource_type, {
|
||||
"col1": {"type": "string", "required": False,
|
||||
"min_length": 1, "max_length": 2},
|
||||
}, 'creating')
|
||||
self.index.create_resource_type(rtype)
|
||||
r1 = uuid.uuid4()
|
||||
self.index.create_resource('instance', r1, str(uuid.uuid4()),
|
||||
str(uuid.uuid4()),
|
||||
flavor_id="1",
|
||||
image_ref="http://foo/bar",
|
||||
host="foo",
|
||||
display_name="lol")
|
||||
self.index.create_resource(resource_type, r1,
|
||||
str(uuid.uuid4()), str(uuid.uuid4()))
|
||||
self.assertRaises(indexer.ResourceAttributeError,
|
||||
self.index.update_resource,
|
||||
'instance',
|
||||
resource_type,
|
||||
r1, foo="bar")
|
||||
|
||||
def test_update_non_existent_metric(self):
|
||||
|
@ -526,13 +556,7 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
rc = self.index.create_resource('generic', r1, user, project,
|
||||
metrics={'foo': e1, 'bar': e2})
|
||||
self.index.delete_metric(e1)
|
||||
# It can be called twice
|
||||
try:
|
||||
self.index.delete_metric(e1)
|
||||
except indexer.NoSuchMetric:
|
||||
# It's possible that the metric has been expunged by another
|
||||
# parallel test. No worry.
|
||||
pass
|
||||
self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, e1)
|
||||
r = self.index.get_resource('generic', r1, with_metrics=True)
|
||||
self.assertIsNotNone(r.started_at)
|
||||
self.assertIsNotNone(r.revision_start)
|
||||
|
@ -549,19 +573,25 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
"type": "generic",
|
||||
"metrics": {'bar': str(e2)}}, r.jsonify())
|
||||
|
||||
def test_delete_instance(self):
|
||||
def test_delete_resource_custom(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(resource_type, {
|
||||
"flavor_id": {"type": "string",
|
||||
"min_length": 1,
|
||||
"max_length": 20,
|
||||
"required": True}
|
||||
}, 'creating'))
|
||||
r1 = uuid.uuid4()
|
||||
created = self.index.create_resource('instance', r1,
|
||||
created = self.index.create_resource(resource_type, r1,
|
||||
str(uuid.uuid4()),
|
||||
str(uuid.uuid4()),
|
||||
flavor_id="123",
|
||||
image_ref="foo",
|
||||
host="dwq",
|
||||
display_name="foobar")
|
||||
got = self.index.get_resource('instance', r1, with_metrics=True)
|
||||
flavor_id="foo")
|
||||
got = self.index.get_resource(resource_type, r1, with_metrics=True)
|
||||
self.assertEqual(created, got)
|
||||
self.index.delete_resource(r1)
|
||||
got = self.index.get_resource('instance', r1)
|
||||
got = self.index.get_resource(resource_type, r1)
|
||||
self.assertIsNone(got)
|
||||
|
||||
def test_list_resources_by_unknown_field(self):
|
||||
|
@ -607,14 +637,14 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
project = str(uuid.uuid4())
|
||||
g = self.index.create_resource('generic', r1, user, project,
|
||||
user, project)
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(resource_type, {}, 'creating'))
|
||||
r2 = uuid.uuid4()
|
||||
i = self.index.create_resource('instance', r2,
|
||||
i = self.index.create_resource(resource_type, r2,
|
||||
user, project,
|
||||
user, project,
|
||||
flavor_id="123",
|
||||
image_ref="foo",
|
||||
host="dwq",
|
||||
display_name="foobar")
|
||||
user, project)
|
||||
resources = self.index.list_resources(
|
||||
'generic',
|
||||
attribute_filter={"=": {"user_id": user}},
|
||||
|
@ -672,13 +702,13 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
r1 = uuid.uuid4()
|
||||
g = self.index.create_resource('generic', r1,
|
||||
str(uuid.uuid4()), str(uuid.uuid4()))
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(resource_type, {}, 'creating'))
|
||||
r2 = uuid.uuid4()
|
||||
i = self.index.create_resource('instance', r2,
|
||||
str(uuid.uuid4()), str(uuid.uuid4()),
|
||||
flavor_id="123",
|
||||
image_ref="foo",
|
||||
host="dwq",
|
||||
display_name="foobar")
|
||||
i = self.index.create_resource(resource_type, r2,
|
||||
str(uuid.uuid4()), str(uuid.uuid4()))
|
||||
resources = self.index.list_resources('generic')
|
||||
self.assertGreaterEqual(len(resources), 2)
|
||||
g_found = False
|
||||
|
@ -694,7 +724,7 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
else:
|
||||
self.fail("Some resources were not found")
|
||||
|
||||
resources = self.index.list_resources('instance')
|
||||
resources = self.index.list_resources(resource_type)
|
||||
self.assertGreaterEqual(len(resources), 1)
|
||||
for r in resources:
|
||||
if r.id == r2:
|
||||
|
@ -713,6 +743,21 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
'generic',
|
||||
attribute_filter={"=": {"id": "f00bar" * 50}})
|
||||
|
||||
def test_list_resource_attribute_type_numeric(self):
|
||||
"""Test that we can pass an integer to filter on a string type."""
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(resource_type, {
|
||||
"flavor_id": {"type": "string",
|
||||
"min_length": 1,
|
||||
"max_length": 20,
|
||||
"required": False},
|
||||
}, 'creating'))
|
||||
r = self.index.list_resources(
|
||||
resource_type, attribute_filter={"=": {"flavor_id": 1.0}})
|
||||
self.assertEqual(0, len(r))
|
||||
|
||||
def test_list_resource_weird_date(self):
|
||||
self.assertRaises(
|
||||
indexer.QueryValueError,
|
||||
|
@ -788,7 +833,7 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
key=operator.itemgetter("revision_start"))
|
||||
self.assertEqual([r1, r2], resources)
|
||||
|
||||
def test_list_resources_instance_with_history(self):
|
||||
def test_list_resources_custom_with_history(self):
|
||||
e1 = uuid.uuid4()
|
||||
e2 = uuid.uuid4()
|
||||
rid = uuid.uuid4()
|
||||
|
@ -797,6 +842,14 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
new_user = str(uuid.uuid4())
|
||||
new_project = str(uuid.uuid4())
|
||||
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(resource_type, {
|
||||
"col1": {"type": "string", "required": True,
|
||||
"min_length": 2, "max_length": 15}
|
||||
}, 'creating'))
|
||||
|
||||
self.index.create_metric(e1, user, project,
|
||||
archive_policy_name="low")
|
||||
self.index.create_metric(e2, user, project,
|
||||
|
@ -804,17 +857,14 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
self.index.create_metric(uuid.uuid4(), user, project,
|
||||
archive_policy_name="low")
|
||||
|
||||
r1 = self.index.create_resource('instance', rid, user, project,
|
||||
r1 = self.index.create_resource(resource_type, rid, user, project,
|
||||
user, project,
|
||||
flavor_id="123",
|
||||
image_ref="foo",
|
||||
host="dwq",
|
||||
display_name="foobar_history",
|
||||
col1="foo",
|
||||
metrics={'foo': e1, 'bar': e2}
|
||||
).jsonify()
|
||||
r2 = self.index.update_resource('instance', rid, user_id=new_user,
|
||||
r2 = self.index.update_resource(resource_type, rid, user_id=new_user,
|
||||
project_id=new_project,
|
||||
host="other",
|
||||
col1="bar",
|
||||
append_metrics=True).jsonify()
|
||||
|
||||
r1['revision_end'] = r2['revision_start']
|
||||
|
@ -823,8 +873,8 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
'bar': str(e2)}, r2['metrics'])
|
||||
self.assertEqual(new_user, r2['user_id'])
|
||||
self.assertEqual(new_project, r2['project_id'])
|
||||
self.assertEqual('other', r2['host'])
|
||||
resources = self.index.list_resources('instance', history=True,
|
||||
self.assertEqual('bar', r2['col1'])
|
||||
resources = self.index.list_resources(resource_type, history=True,
|
||||
details=False,
|
||||
attribute_filter={
|
||||
"=": {"id": rid}})
|
||||
|
@ -846,12 +896,12 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23),
|
||||
ended_at=utils.datetime_utc(2000, 1, 3, 23, 23, 23))
|
||||
r2 = uuid.uuid4()
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(resource_type, {}, 'creating'))
|
||||
i = self.index.create_resource(
|
||||
'instance', r2, user, project,
|
||||
flavor_id="123",
|
||||
image_ref="foo",
|
||||
host="dwq",
|
||||
display_name="foobar",
|
||||
resource_type, r2, user, project,
|
||||
started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23),
|
||||
ended_at=utils.datetime_utc(2000, 1, 4, 23, 23, 23))
|
||||
resources = self.index.list_resources(
|
||||
|
@ -877,7 +927,7 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
self.fail("Some resources were not found")
|
||||
|
||||
resources = self.index.list_resources(
|
||||
'instance',
|
||||
resource_type,
|
||||
attribute_filter={
|
||||
">=": {
|
||||
"started_at": datetime.datetime(2000, 1, 1, 23, 23, 23)
|
||||
|
@ -900,6 +950,40 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
})
|
||||
self.assertEqual(0, len(resources))
|
||||
|
||||
def test_deletes_resources(self):
|
||||
r1 = uuid.uuid4()
|
||||
r2 = uuid.uuid4()
|
||||
user = str(uuid.uuid4())
|
||||
project = str(uuid.uuid4())
|
||||
metrics = {'foo': {'archive_policy_name': 'medium'}}
|
||||
g1 = self.index.create_resource('generic', r1, user, project,
|
||||
user, project, metrics=metrics)
|
||||
g2 = self.index.create_resource('generic', r2, user, project,
|
||||
user, project, metrics=metrics)
|
||||
|
||||
metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'],
|
||||
g2['metrics'][0]['id']])
|
||||
self.assertEqual(2, len(metrics))
|
||||
for m in metrics:
|
||||
self.assertEqual('active', m['status'])
|
||||
|
||||
deleted = self.index.delete_resources(
|
||||
'generic',
|
||||
attribute_filter={"=": {"user_id": user}})
|
||||
self.assertEqual(2, deleted)
|
||||
|
||||
resources = self.index.list_resources(
|
||||
'generic',
|
||||
attribute_filter={"=": {"user_id": user}})
|
||||
self.assertEqual(0, len(resources))
|
||||
|
||||
metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'],
|
||||
g2['metrics'][0]['id']],
|
||||
status='delete')
|
||||
self.assertEqual(2, len(metrics))
|
||||
for m in metrics:
|
||||
self.assertEqual('delete', m['status'])
|
||||
|
||||
def test_get_metric(self):
|
||||
e1 = uuid.uuid4()
|
||||
user = str(uuid.uuid4())
|
||||
|
@ -972,3 +1056,128 @@ class TestIndexerDriver(tests_base.TestCase):
|
|||
self.index.delete_metric(e1)
|
||||
metrics = self.index.list_metrics()
|
||||
self.assertNotIn(e1, [m.id for m in metrics])
|
||||
|
||||
def test_resource_type_crud(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
rtype = mgr.resource_type_from_dict("indexer_test", {
|
||||
"col1": {"type": "string", "required": True,
|
||||
"min_length": 2, "max_length": 15}
|
||||
}, "creating")
|
||||
|
||||
# Create
|
||||
self.index.create_resource_type(rtype)
|
||||
self.assertRaises(indexer.ResourceTypeAlreadyExists,
|
||||
self.index.create_resource_type,
|
||||
rtype)
|
||||
|
||||
# Get
|
||||
rtype = self.index.get_resource_type("indexer_test")
|
||||
self.assertEqual("indexer_test", rtype.name)
|
||||
self.assertEqual(1, len(rtype.attributes))
|
||||
self.assertEqual("col1", rtype.attributes[0].name)
|
||||
self.assertEqual("string", rtype.attributes[0].typename)
|
||||
self.assertEqual(15, rtype.attributes[0].max_length)
|
||||
self.assertEqual(2, rtype.attributes[0].min_length)
|
||||
self.assertEqual("active", rtype.state)
|
||||
|
||||
# List
|
||||
rtypes = self.index.list_resource_types()
|
||||
for rtype in rtypes:
|
||||
if rtype.name == "indexer_test":
|
||||
break
|
||||
else:
|
||||
self.fail("indexer_test not found")
|
||||
|
||||
# Test resource itself
|
||||
rid = uuid.uuid4()
|
||||
self.index.create_resource("indexer_test", rid,
|
||||
str(uuid.uuid4()),
|
||||
str(uuid.uuid4()),
|
||||
col1="col1_value")
|
||||
r = self.index.get_resource("indexer_test", rid)
|
||||
self.assertEqual("indexer_test", r.type)
|
||||
self.assertEqual("col1_value", r.col1)
|
||||
|
||||
# Deletion
|
||||
self.assertRaises(indexer.ResourceTypeInUse,
|
||||
self.index.delete_resource_type,
|
||||
"indexer_test")
|
||||
self.index.delete_resource(rid)
|
||||
self.index.delete_resource_type("indexer_test")
|
||||
|
||||
# Ensure it's deleted
|
||||
self.assertRaises(indexer.NoSuchResourceType,
|
||||
self.index.get_resource_type,
|
||||
"indexer_test")
|
||||
|
||||
self.assertRaises(indexer.NoSuchResourceType,
|
||||
self.index.delete_resource_type,
|
||||
"indexer_test")
|
||||
|
||||
def _get_rt_state(self, name):
|
||||
return self.index.get_resource_type(name).state
|
||||
|
||||
def test_resource_type_unexpected_creation_error(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
rtype = mgr.resource_type_from_dict("indexer_test_fail", {
|
||||
"col1": {"type": "string", "required": True,
|
||||
"min_length": 2, "max_length": 15}
|
||||
}, "creating")
|
||||
|
||||
states = {'before': None,
|
||||
'after': None}
|
||||
|
||||
def map_and_create_mock(rt, conn):
|
||||
states['before'] = self._get_rt_state("indexer_test_fail")
|
||||
raise MockException("boom!")
|
||||
|
||||
with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER,
|
||||
"map_and_create_tables",
|
||||
side_effect=map_and_create_mock):
|
||||
self.assertRaises(MockException,
|
||||
self.index.create_resource_type,
|
||||
rtype)
|
||||
states['after'] = self._get_rt_state('indexer_test_fail')
|
||||
|
||||
self.assertEqual([('after', 'creation_error'),
|
||||
('before', 'creating')],
|
||||
sorted(states.items()))
|
||||
|
||||
def test_resource_type_unexpected_deleting_error(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
rtype = mgr.resource_type_from_dict("indexer_test_fail2", {
|
||||
"col1": {"type": "string", "required": True,
|
||||
"min_length": 2, "max_length": 15}
|
||||
}, "creating")
|
||||
self.index.create_resource_type(rtype)
|
||||
|
||||
states = {'before': None,
|
||||
'after': None}
|
||||
|
||||
def map_and_create_mock(rt, conn):
|
||||
states['before'] = self._get_rt_state("indexer_test_fail2")
|
||||
raise MockException("boom!")
|
||||
|
||||
with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER,
|
||||
"unmap_and_delete_tables",
|
||||
side_effect=map_and_create_mock):
|
||||
self.assertRaises(MockException,
|
||||
self.index.delete_resource_type,
|
||||
rtype.name)
|
||||
states['after'] = self._get_rt_state('indexer_test_fail2')
|
||||
|
||||
self.assertEqual([('after', 'deletion_error'),
|
||||
('before', 'deleting')],
|
||||
sorted(states.items()))
|
||||
|
||||
# We can cleanup the mess !
|
||||
self.index.delete_resource_type("indexer_test_fail2")
|
||||
|
||||
# Ensure it's deleted
|
||||
self.assertRaises(indexer.NoSuchResourceType,
|
||||
self.index.get_resource_type,
|
||||
"indexer_test_fail2")
|
||||
|
||||
self.assertRaises(indexer.NoSuchResourceType,
|
||||
self.index.delete_resource_type,
|
||||
"indexer_test_fail2")
|
||||
|
|
|
@ -24,6 +24,8 @@ import uuid
|
|||
|
||||
from keystonemiddleware import fixture as ksm_fixture
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_middleware import cors
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
from stevedore import extension
|
||||
|
@ -53,6 +55,8 @@ class TestingApp(webtest.TestApp):
|
|||
USER_ID_2 = str(uuid.uuid4())
|
||||
PROJECT_ID_2 = str(uuid.uuid4())
|
||||
|
||||
INVALID_TOKEN = str(uuid.uuid4())
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.auth = kwargs.pop('auth')
|
||||
self.storage = kwargs.pop('storage')
|
||||
|
@ -83,11 +87,35 @@ class TestingApp(webtest.TestApp):
|
|||
finally:
|
||||
self.token = old_token
|
||||
|
||||
@contextlib.contextmanager
|
||||
def use_invalid_token(self):
|
||||
if not self.auth:
|
||||
raise testcase.TestSkipped("No auth enabled")
|
||||
old_token = self.token
|
||||
self.token = self.INVALID_TOKEN
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.token = old_token
|
||||
|
||||
@contextlib.contextmanager
|
||||
def use_no_token(self):
|
||||
# We don't skip for no self.auth to ensure
|
||||
# some test returns the same thing with auth or not
|
||||
old_token = self.token
|
||||
self.token = None
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.token = old_token
|
||||
|
||||
def do_request(self, req, *args, **kwargs):
|
||||
if self.auth:
|
||||
if self.auth and self.token is not None:
|
||||
req.headers['X-Auth-Token'] = self.token
|
||||
response = super(TestingApp, self).do_request(req, *args, **kwargs)
|
||||
self.storage.process_background_tasks(self.indexer, sync=True)
|
||||
metrics = self.storage.list_metric_with_measures_to_process(
|
||||
None, None, full=True)
|
||||
self.storage.process_background_tasks(self.indexer, metrics, sync=True)
|
||||
return response
|
||||
|
||||
|
||||
|
@ -104,6 +132,13 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios):
|
|||
self.path_get('etc/gnocchi/api-paste.ini'),
|
||||
group="api")
|
||||
|
||||
# NOTE(sileht): This is not concurrency safe, but only this tests file
|
||||
# deal with cors, so we are fine. set_override don't work because
|
||||
# cors group doesn't yet exists, and we the CORS middleware is created
|
||||
# it register the option and directly copy value of all configurations
|
||||
# options making impossible to override them properly...
|
||||
cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")
|
||||
|
||||
self.auth_token_fixture = self.useFixture(
|
||||
ksm_fixture.AuthTokenFixture())
|
||||
self.auth_token_fixture.add_token_data(
|
||||
|
@ -138,6 +173,40 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios):
|
|||
indexer=self.index,
|
||||
auth=self.auth)
|
||||
|
||||
# NOTE(jd) Used at least by docs
|
||||
@staticmethod
|
||||
def runTest():
|
||||
pass
|
||||
|
||||
|
||||
class RootTest(RestTest):
|
||||
|
||||
def _do_test_cors(self):
|
||||
resp = self.app.options(
|
||||
"/v1/status",
|
||||
headers={'Origin': 'http://notallowed.com',
|
||||
'Access-Control-Request-Method': 'GET'},
|
||||
status=200)
|
||||
headers = dict(resp.headers)
|
||||
self.assertNotIn("Access-Control-Allow-Origin", headers)
|
||||
self.assertNotIn("Access-Control-Allow-Methods", headers)
|
||||
resp = self.app.options(
|
||||
"/v1/status",
|
||||
headers={'origin': 'http://foobar.com',
|
||||
'Access-Control-Request-Method': 'GET'},
|
||||
status=200)
|
||||
headers = dict(resp.headers)
|
||||
self.assertIn("Access-Control-Allow-Origin", headers)
|
||||
self.assertIn("Access-Control-Allow-Methods", headers)
|
||||
|
||||
def test_cors_invalid_token(self):
|
||||
with self.app.use_invalid_token():
|
||||
self._do_test_cors()
|
||||
|
||||
def test_cors_no_token(self):
|
||||
with self.app.use_no_token():
|
||||
self._do_test_cors()
|
||||
|
||||
def test_deserialize_force_json(self):
|
||||
with self.app.use_admin_user():
|
||||
self.app.post(
|
||||
|
@ -147,31 +216,28 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios):
|
|||
|
||||
def test_capabilities(self):
|
||||
custom_agg = extension.Extension('test_aggregation', None, None, None)
|
||||
aggregation_methods = set(
|
||||
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)
|
||||
aggregation_methods.add('test_aggregation')
|
||||
mgr = extension.ExtensionManager.make_test_instance(
|
||||
[custom_agg], 'gnocchi.aggregates')
|
||||
aggregation_methods = set(
|
||||
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)
|
||||
|
||||
with mock.patch.object(extension, 'ExtensionManager',
|
||||
return_value=mgr):
|
||||
result = self.app.get("/v1/capabilities")
|
||||
result = self.app.get("/v1/capabilities").json
|
||||
self.assertEqual(
|
||||
sorted(aggregation_methods),
|
||||
sorted(json.loads(result.text)['aggregation_methods']))
|
||||
sorted(result['aggregation_methods']))
|
||||
self.assertEqual(
|
||||
['test_aggregation'],
|
||||
result['dynamic_aggregation_methods'])
|
||||
|
||||
def test_status(self):
|
||||
with self.app.use_admin_user():
|
||||
r = self.app.get("/v1/status")
|
||||
status = json.loads(r.text)
|
||||
# We are sure this is empty because we call process_measures() each
|
||||
# time we do a REST request in this TestingApp.
|
||||
self.assertEqual({},
|
||||
status['storage']['measures_to_process'])
|
||||
|
||||
@staticmethod
|
||||
def runTest():
|
||||
pass
|
||||
self.assertIsInstance(status['storage']['measures_to_process'], dict)
|
||||
self.assertIsInstance(status['storage']['summary']['metrics'], int)
|
||||
self.assertIsInstance(status['storage']['summary']['measures'], int)
|
||||
|
||||
|
||||
class ArchivePolicyTest(RestTest):
|
||||
|
@ -321,11 +387,6 @@ class MetricTest(RestTest):
|
|||
status=403)
|
||||
|
||||
def test_add_measures_back_window(self):
|
||||
if self.conf.storage.driver == 'influxdb':
|
||||
# FIXME(sileht): Won't pass with influxdb because it doesn't
|
||||
# check archive policy
|
||||
raise testcase.TestSkipped("InfluxDB issue")
|
||||
|
||||
ap_name = str(uuid.uuid4())
|
||||
with self.app.use_admin_user():
|
||||
self.app.post_json(
|
||||
|
@ -503,12 +564,17 @@ class MetricTest(RestTest):
|
|||
self.assertIn('Invalid value for window', ret.text)
|
||||
|
||||
def test_get_resource_missing_named_metric_measure_aggregation(self):
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(resource_type, {
|
||||
"server_group": {"type": "string",
|
||||
"min_length": 1,
|
||||
"max_length": 40,
|
||||
"required": True}
|
||||
}, 'creating'))
|
||||
|
||||
attributes = {
|
||||
"started_at": "2014-01-03T02:02:02.000000",
|
||||
"host": "foo",
|
||||
"image_ref": "imageref!",
|
||||
"flavor_id": "123",
|
||||
"display_name": "myinstance",
|
||||
"server_group": str(uuid.uuid4()),
|
||||
}
|
||||
result = self.app.post_json("/v1/metric",
|
||||
|
@ -531,16 +597,17 @@ class MetricTest(RestTest):
|
|||
|
||||
attributes['id'] = str(uuid.uuid4())
|
||||
attributes['metrics'] = {'foo': metric1['id']}
|
||||
self.app.post_json("/v1/resource/instance",
|
||||
self.app.post_json("/v1/resource/" + resource_type,
|
||||
params=attributes)
|
||||
|
||||
attributes['id'] = str(uuid.uuid4())
|
||||
attributes['metrics'] = {'bar': metric2['id']}
|
||||
self.app.post_json("/v1/resource/instance",
|
||||
self.app.post_json("/v1/resource/" + resource_type,
|
||||
params=attributes)
|
||||
|
||||
result = self.app.post_json(
|
||||
"/v1/aggregation/resource/instance/metric/foo?aggregation=max",
|
||||
"/v1/aggregation/resource/%s/metric/foo?aggregation=max"
|
||||
% resource_type,
|
||||
params={"=": {"server_group": attributes['server_group']}})
|
||||
|
||||
measures = json.loads(result.text)
|
||||
|
@ -582,192 +649,19 @@ class MetricTest(RestTest):
|
|||
|
||||
|
||||
class ResourceTest(RestTest):
|
||||
|
||||
resource_scenarios = [
|
||||
('generic', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='generic')),
|
||||
('instance_disk', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
"name": "disk-name",
|
||||
"instance_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
"name": "new-disk-name",
|
||||
},
|
||||
resource_type='instance_disk')),
|
||||
('instance_network_interface', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
"name": "nic-name",
|
||||
"instance_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
"name": "new-nic-name",
|
||||
},
|
||||
resource_type='instance_network_interface')),
|
||||
('instance', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
# NOTE(jd) We test this one without user_id/project_id!
|
||||
# Just to test that use case. :)
|
||||
"host": "foo",
|
||||
"image_ref": "imageref!",
|
||||
"flavor_id": "123",
|
||||
"display_name": "myinstance",
|
||||
"server_group": "as_group",
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
"host": "fooz",
|
||||
"image_ref": "imageref!z",
|
||||
"flavor_id": "1234",
|
||||
"display_name": "myinstancez",
|
||||
"server_group": "new_as_group",
|
||||
},
|
||||
resource_type='instance')),
|
||||
# swift notifications contain UUID user_id
|
||||
('swift_account', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='swift_account')),
|
||||
# swift pollsters contain None user_id
|
||||
('swift_account_none_user', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": None,
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='swift_account')),
|
||||
# TODO(dbelova): add tests with None project ID when we'll add kwapi,
|
||||
# ipmi, hardware, etc. resources that are passed without project ID
|
||||
('volume', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
"display_name": "test_volume",
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
"display_name": "myvolume",
|
||||
},
|
||||
resource_type='volume')),
|
||||
('ceph_account', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='ceph_account')),
|
||||
('network', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='network')),
|
||||
('identity', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='identity')),
|
||||
('ipmi', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='ipmi')),
|
||||
('stack', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='stack')),
|
||||
# image pollsters contain UUID user_id
|
||||
('image', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
"name": "test-image",
|
||||
"container_format": "aki",
|
||||
"disk_format": "aki",
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='image')),
|
||||
# image pollsters contain None user_id
|
||||
('image_none_user', dict(
|
||||
attributes={
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": None,
|
||||
"project_id": str(uuid.uuid4()),
|
||||
"name": "test-image2",
|
||||
"container_format": "aki",
|
||||
"disk_format": "aki",
|
||||
},
|
||||
patchable_attributes={
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
},
|
||||
resource_type='image')),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def generate_scenarios(cls):
|
||||
cls.scenarios = testscenarios.multiply_scenarios(
|
||||
cls.scenarios,
|
||||
cls.resource_scenarios)
|
||||
|
||||
def setUp(self):
|
||||
super(ResourceTest, self).setUp()
|
||||
# Copy attributes so we can modify them in each test :)
|
||||
self.attributes = self.attributes.copy()
|
||||
# Set an id in the attribute
|
||||
self.attributes['id'] = str(uuid.uuid4())
|
||||
self.attributes = {
|
||||
"id": str(uuid.uuid4()),
|
||||
"started_at": "2014-01-03T02:02:02+00:00",
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"project_id": str(uuid.uuid4()),
|
||||
"name": "my-name",
|
||||
}
|
||||
self.patchable_attributes = {
|
||||
"ended_at": "2014-01-03T02:02:02+00:00",
|
||||
"name": "new-name",
|
||||
}
|
||||
self.resource = self.attributes.copy()
|
||||
# Set original_resource_id
|
||||
self.resource['original_resource_id'] = self.resource['id']
|
||||
|
@ -777,7 +671,6 @@ class ResourceTest(RestTest):
|
|||
else:
|
||||
self.resource['created_by_user_id'] = None
|
||||
self.resource['created_by_project_id'] = None
|
||||
self.resource['type'] = self.resource_type
|
||||
self.resource['ended_at'] = None
|
||||
self.resource['metrics'] = {}
|
||||
if 'user_id' not in self.resource:
|
||||
|
@ -785,6 +678,17 @@ class ResourceTest(RestTest):
|
|||
if 'project_id' not in self.resource:
|
||||
self.resource['project_id'] = None
|
||||
|
||||
mgr = self.index.get_resource_type_schema()
|
||||
self.resource_type = str(uuid.uuid4())
|
||||
self.index.create_resource_type(
|
||||
mgr.resource_type_from_dict(self.resource_type, {
|
||||
"name": {"type": "string",
|
||||
"min_length": 1,
|
||||
"max_length": 40,
|
||||
"required": True}
|
||||
}, "creating"))
|
||||
self.resource['type'] = self.resource_type
|
||||
|
||||
@mock.patch.object(utils, 'utcnow')
|
||||
def test_post_resource(self, utcnow):
|
||||
utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
|
||||
|
@ -1721,10 +1625,9 @@ class ResourceTest(RestTest):
|
|||
|
||||
# NOTE(sileht): because the database is never cleaned between each test
|
||||
# we must ensure that the query will not match resources from an other
|
||||
# test, to achieve this we set a different server_group on each test.
|
||||
server_group = str(uuid.uuid4())
|
||||
if self.resource_type == 'instance':
|
||||
self.attributes['server_group'] = server_group
|
||||
# test, to achieve this we set a different name on each test.
|
||||
name = str(uuid.uuid4())
|
||||
self.attributes['name'] = name
|
||||
|
||||
self.attributes['metrics'] = {'foo': metric1['id']}
|
||||
self.app.post_json("/v1/resource/" + self.resource_type,
|
||||
|
@ -1738,14 +1641,11 @@ class ResourceTest(RestTest):
|
|||
result = self.app.post_json(
|
||||
"/v1/aggregation/resource/"
|
||||
+ self.resource_type + "/metric/foo?aggregation=max",
|
||||
params={"and":
|
||||
[{"=": {"server_group": server_group}},
|
||||
{"=": {"display_name": "myinstance"}}]},
|
||||
params={"=": {"name": name}},
|
||||
status=400)
|
||||
if self.resource_type == 'instance':
|
||||
self.assertIn(b"One of the metrics being aggregated doesn't have "
|
||||
b"matching granularity",
|
||||
result.body)
|
||||
self.assertIn(b"One of the metrics being aggregated doesn't have "
|
||||
b"matching granularity",
|
||||
result.body)
|
||||
|
||||
def test_get_res_named_metric_measure_aggregation_nooverlap(self):
|
||||
result = self.app.post_json("/v1/metric",
|
||||
|
@ -1763,10 +1663,9 @@ class ResourceTest(RestTest):
|
|||
|
||||
# NOTE(sileht): because the database is never cleaned between each test
|
||||
# we must ensure that the query will not match resources from an other
|
||||
# test, to achieve this we set a different server_group on each test.
|
||||
server_group = str(uuid.uuid4())
|
||||
if self.resource_type == 'instance':
|
||||
self.attributes['server_group'] = server_group
|
||||
# test, to achieve this we set a different name on each test.
|
||||
name = str(uuid.uuid4())
|
||||
self.attributes['name'] = name
|
||||
|
||||
self.attributes['metrics'] = {'foo': metric1['id']}
|
||||
self.app.post_json("/v1/resource/" + self.resource_type,
|
||||
|
@ -1780,35 +1679,25 @@ class ResourceTest(RestTest):
|
|||
result = self.app.post_json(
|
||||
"/v1/aggregation/resource/" + self.resource_type
|
||||
+ "/metric/foo?aggregation=max",
|
||||
params={"and":
|
||||
[{"=": {"server_group": server_group}},
|
||||
{"=": {"display_name": "myinstance"}}]},
|
||||
params={"=": {"name": name}},
|
||||
expect_errors=True)
|
||||
|
||||
if self.resource_type == 'instance':
|
||||
self.assertEqual(400, result.status_code, result.text)
|
||||
self.assertIn("No overlap", result.text)
|
||||
else:
|
||||
self.assertEqual(400, result.status_code)
|
||||
self.assertEqual(400, result.status_code, result.text)
|
||||
self.assertIn("No overlap", result.text)
|
||||
|
||||
result = self.app.post_json(
|
||||
"/v1/aggregation/resource/"
|
||||
+ self.resource_type + "/metric/foo?aggregation=min"
|
||||
+ "&needed_overlap=0",
|
||||
params={"and":
|
||||
[{"=": {"server_group": server_group}},
|
||||
{"=": {"display_name": "myinstance"}}]},
|
||||
params={"=": {"name": name}},
|
||||
expect_errors=True)
|
||||
|
||||
if self.resource_type == 'instance':
|
||||
self.assertEqual(200, result.status_code, result.text)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0],
|
||||
['2013-01-01T12:00:00+00:00', 3600.0, 8.0],
|
||||
['2013-01-01T12:00:00+00:00', 60.0, 8.0]],
|
||||
measures)
|
||||
else:
|
||||
self.assertEqual(400, result.status_code)
|
||||
self.assertEqual(200, result.status_code, result.text)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0],
|
||||
['2013-01-01T12:00:00+00:00', 3600.0, 8.0],
|
||||
['2013-01-01T12:00:00+00:00', 60.0, 8.0]],
|
||||
measures)
|
||||
|
||||
def test_get_res_named_metric_measure_aggregation_nominal(self):
|
||||
result = self.app.post_json("/v1/metric",
|
||||
|
@ -1831,10 +1720,9 @@ class ResourceTest(RestTest):
|
|||
|
||||
# NOTE(sileht): because the database is never cleaned between each test
|
||||
# we must ensure that the query will not match resources from an other
|
||||
# test, to achieve this we set a different server_group on each test.
|
||||
server_group = str(uuid.uuid4())
|
||||
if self.resource_type == 'instance':
|
||||
self.attributes['server_group'] = server_group
|
||||
# test, to achieve this we set a different name on each test.
|
||||
name = str(uuid.uuid4())
|
||||
self.attributes['name'] = name
|
||||
|
||||
self.attributes['metrics'] = {'foo': metric1['id']}
|
||||
self.app.post_json("/v1/resource/" + self.resource_type,
|
||||
|
@ -1848,54 +1736,39 @@ class ResourceTest(RestTest):
|
|||
result = self.app.post_json(
|
||||
"/v1/aggregation/resource/" + self.resource_type
|
||||
+ "/metric/foo?aggregation=max",
|
||||
params={"and":
|
||||
[{"=": {"server_group": server_group}},
|
||||
{"=": {"display_name": "myinstance"}}]},
|
||||
params={"=": {"name": name}},
|
||||
expect_errors=True)
|
||||
|
||||
if self.resource_type == 'instance':
|
||||
self.assertEqual(200, result.status_code, result.text)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0],
|
||||
[u'2013-01-01T12:00:00+00:00', 3600.0, 16.0],
|
||||
[u'2013-01-01T12:00:00+00:00', 60.0, 16.0]],
|
||||
measures)
|
||||
else:
|
||||
self.assertEqual(400, result.status_code)
|
||||
self.assertEqual(200, result.status_code, result.text)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0],
|
||||
[u'2013-01-01T12:00:00+00:00', 3600.0, 16.0],
|
||||
[u'2013-01-01T12:00:00+00:00', 60.0, 16.0]],
|
||||
measures)
|
||||
|
||||
result = self.app.post_json(
|
||||
"/v1/aggregation/resource/"
|
||||
+ self.resource_type + "/metric/foo?aggregation=min",
|
||||
params={"and":
|
||||
[{"=": {"server_group": server_group}},
|
||||
{"=": {"display_name": "myinstance"}}]},
|
||||
params={"=": {"name": name}},
|
||||
expect_errors=True)
|
||||
|
||||
if self.resource_type == 'instance':
|
||||
self.assertEqual(200, result.status_code)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 0],
|
||||
['2013-01-01T12:00:00+00:00', 3600.0, 0],
|
||||
['2013-01-01T12:00:00+00:00', 60.0, 0]],
|
||||
measures)
|
||||
else:
|
||||
self.assertEqual(400, result.status_code)
|
||||
self.assertEqual(200, result.status_code)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 0],
|
||||
['2013-01-01T12:00:00+00:00', 3600.0, 0],
|
||||
['2013-01-01T12:00:00+00:00', 60.0, 0]],
|
||||
measures)
|
||||
|
||||
def test_get_aggregated_measures_across_entities_no_match(self):
|
||||
result = self.app.post_json(
|
||||
"/v1/aggregation/resource/"
|
||||
+ self.resource_type + "/metric/foo?aggregation=min",
|
||||
params={"and":
|
||||
[{"=": {"server_group": "notexistentyet"}},
|
||||
{"=": {"display_name": "myinstance"}}]},
|
||||
params={"=": {"name": "none!"}},
|
||||
expect_errors=True)
|
||||
|
||||
if self.resource_type == 'instance':
|
||||
self.assertEqual(200, result.status_code)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([], measures)
|
||||
else:
|
||||
self.assertEqual(400, result.status_code)
|
||||
self.assertEqual(200, result.status_code)
|
||||
measures = json.loads(result.text)
|
||||
self.assertEqual([], measures)
|
||||
|
||||
def test_get_aggregated_measures_across_entities(self):
|
||||
result = self.app.post_json("/v1/metric",
|
||||
|
@ -1934,6 +1807,27 @@ class ResourceTest(RestTest):
|
|||
[u'2013-01-01T12:00:00+00:00', 60.0, 7.0]],
|
||||
measures)
|
||||
|
||||
def test_search_resources_with_like(self):
|
||||
result = self.app.post_json(
|
||||
"/v1/resource/" + self.resource_type,
|
||||
params=self.attributes)
|
||||
created_resource = json.loads(result.text)
|
||||
|
||||
result = self.app.post_json(
|
||||
"/v1/search/resource/" + self.resource_type,
|
||||
params={"like": {"name": "my%"}},
|
||||
status=200)
|
||||
|
||||
resources = json.loads(result.text)
|
||||
self.assertIn(created_resource, resources)
|
||||
|
||||
result = self.app.post_json(
|
||||
"/v1/search/resource/" + self.resource_type,
|
||||
params={"like": {"name": str(uuid.uuid4())}},
|
||||
status=200)
|
||||
resources = json.loads(result.text)
|
||||
self.assertEqual([], resources)
|
||||
|
||||
|
||||
class GenericResourceTest(RestTest):
|
||||
def test_list_resources_tied_to_user(self):
|
||||
|
@ -1984,35 +1878,3 @@ class GenericResourceTest(RestTest):
|
|||
"Invalid input: extra keys not allowed @ data["
|
||||
+ repr(u'wrongoperator') + "]",
|
||||
result.text)
|
||||
|
||||
def test_search_resources_with_like(self):
|
||||
attributes = {
|
||||
"id": str(uuid.uuid4()),
|
||||
"started_at": "2014-01-03T02:02:02.000000",
|
||||
"host": "computenode42",
|
||||
"image_ref": "imageref!",
|
||||
"flavor_id": "123",
|
||||
"display_name": "myinstance",
|
||||
}
|
||||
result = self.app.post_json(
|
||||
"/v1/resource/instance",
|
||||
params=attributes)
|
||||
created_resource = json.loads(result.text)
|
||||
|
||||
result = self.app.post_json(
|
||||
"/v1/search/resource/instance",
|
||||
params={"like": {"host": "computenode%"}},
|
||||
status=200)
|
||||
|
||||
resources = json.loads(result.text)
|
||||
self.assertIn(created_resource, resources)
|
||||
|
||||
result = self.app.post_json(
|
||||
"/v1/search/resource/instance",
|
||||
params={"like": {"host": str(uuid.uuid4())}},
|
||||
status=200)
|
||||
resources = json.loads(result.text)
|
||||
self.assertEqual([], resources)
|
||||
|
||||
|
||||
ResourceTest.generate_scenarios()
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2016 Red Hat, Inc.
|
||||
# Copyright © 2015 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
|
@ -13,11 +14,9 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from gnocchi import indexer
|
||||
from gnocchi import statsd
|
||||
|
@ -52,11 +51,11 @@ class TestStatsd(tests_base.TestCase):
|
|||
def test_flush_empty(self):
|
||||
self.server.stats.flush()
|
||||
|
||||
@mock.patch.object(timeutils, 'utcnow')
|
||||
@mock.patch.object(utils, 'utcnow')
|
||||
def _test_gauge_or_ms(self, metric_type, utcnow):
|
||||
metric_name = "test_gauge_or_ms"
|
||||
metric_key = metric_name + "|" + metric_type
|
||||
utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 58, 36)
|
||||
utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36)
|
||||
self.server.datagram_received(
|
||||
("%s:1|%s" % (metric_name, metric_type)).encode('ascii'),
|
||||
("127.0.0.1", 12345))
|
||||
|
@ -69,7 +68,7 @@ class TestStatsd(tests_base.TestCase):
|
|||
metric = r.get_metric(metric_key)
|
||||
|
||||
self.stats.storage.process_background_tasks(
|
||||
self.stats.indexer, sync=True)
|
||||
self.stats.indexer, [str(metric.id)], sync=True)
|
||||
|
||||
measures = self.stats.storage.get_measures(metric)
|
||||
self.assertEqual([
|
||||
|
@ -78,7 +77,7 @@ class TestStatsd(tests_base.TestCase):
|
|||
(utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)
|
||||
], measures)
|
||||
|
||||
utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 59, 37)
|
||||
utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37)
|
||||
# This one is going to be ignored
|
||||
self.server.datagram_received(
|
||||
("%s:45|%s" % (metric_name, metric_type)).encode('ascii'),
|
||||
|
@ -89,7 +88,7 @@ class TestStatsd(tests_base.TestCase):
|
|||
self.stats.flush()
|
||||
|
||||
self.stats.storage.process_background_tasks(
|
||||
self.stats.indexer, sync=True)
|
||||
self.stats.indexer, [str(metric.id)], sync=True)
|
||||
|
||||
measures = self.stats.storage.get_measures(metric)
|
||||
self.assertEqual([
|
||||
|
@ -105,11 +104,11 @@ class TestStatsd(tests_base.TestCase):
|
|||
def test_ms(self):
|
||||
self._test_gauge_or_ms("ms")
|
||||
|
||||
@mock.patch.object(timeutils, 'utcnow')
|
||||
@mock.patch.object(utils, 'utcnow')
|
||||
def test_counter(self, utcnow):
|
||||
metric_name = "test_counter"
|
||||
metric_key = metric_name + "|c"
|
||||
utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 58, 36)
|
||||
utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36)
|
||||
self.server.datagram_received(
|
||||
("%s:1|c" % metric_name).encode('ascii'),
|
||||
("127.0.0.1", 12345))
|
||||
|
@ -122,7 +121,7 @@ class TestStatsd(tests_base.TestCase):
|
|||
self.assertIsNotNone(metric)
|
||||
|
||||
self.stats.storage.process_background_tasks(
|
||||
self.stats.indexer, sync=True)
|
||||
self.stats.indexer, [str(metric.id)], sync=True)
|
||||
|
||||
measures = self.stats.storage.get_measures(metric)
|
||||
self.assertEqual([
|
||||
|
@ -130,7 +129,7 @@ class TestStatsd(tests_base.TestCase):
|
|||
(utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0),
|
||||
(utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures)
|
||||
|
||||
utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 59, 37)
|
||||
utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37)
|
||||
self.server.datagram_received(
|
||||
("%s:45|c" % metric_name).encode('ascii'),
|
||||
("127.0.0.1", 12345))
|
||||
|
@ -140,7 +139,7 @@ class TestStatsd(tests_base.TestCase):
|
|||
self.stats.flush()
|
||||
|
||||
self.stats.storage.process_background_tasks(
|
||||
self.stats.indexer, sync=True)
|
||||
self.stats.indexer, [str(metric.id)], sync=True)
|
||||
|
||||
measures = self.stats.storage.get_measures(metric)
|
||||
self.assertEqual([
|
||||
|
|
|
@ -21,9 +21,11 @@ from oslo_utils import timeutils
|
|||
from oslotest import base
|
||||
import six.moves
|
||||
|
||||
from gnocchi import archive_policy
|
||||
from gnocchi import carbonara
|
||||
from gnocchi import indexer
|
||||
from gnocchi import storage
|
||||
from gnocchi.storage import _carbonara
|
||||
from gnocchi.storage import null
|
||||
from gnocchi.tests import base as tests_base
|
||||
from gnocchi import utils
|
||||
|
||||
|
@ -42,50 +44,78 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
archive_policy_name)
|
||||
return m, m_sql
|
||||
|
||||
def test_get_driver(self):
|
||||
self.conf.set_override('driver', 'null', 'storage')
|
||||
driver = storage.get_driver(self.conf)
|
||||
self.assertIsInstance(driver, null.NullStorage)
|
||||
def trigger_processing(self, metrics=None):
|
||||
if metrics is None:
|
||||
metrics = [str(self.metric.id)]
|
||||
self.storage.process_background_tasks(self.index, metrics, sync=True)
|
||||
|
||||
@mock.patch('gnocchi.storage._carbonara.LOG')
|
||||
def test_corrupted_data(self, logger):
|
||||
def test_get_driver(self):
|
||||
driver = storage.get_driver(self.conf)
|
||||
self.assertIsInstance(driver, storage.StorageDriver)
|
||||
|
||||
def test_corrupted_data(self):
|
||||
if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage):
|
||||
self.skipTest("This driver is not based on Carbonara")
|
||||
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 13, 0, 1), 1),
|
||||
])
|
||||
with mock.patch('gnocchi.carbonara.msgpack.unpack',
|
||||
side_effect=ValueError("boom!")):
|
||||
with mock.patch('gnocchi.carbonara.msgpack.loads',
|
||||
side_effect=ValueError("boom!")):
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400.0, 1),
|
||||
(utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1),
|
||||
(utils.datetime_utc(2014, 1, 1, 13), 300.0, 1),
|
||||
], self.storage.get_measures(self.metric))
|
||||
with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize',
|
||||
side_effect=ValueError("boom!")):
|
||||
with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize',
|
||||
side_effect=ValueError("boom!")):
|
||||
self.trigger_processing()
|
||||
|
||||
m = self.storage.get_measures(self.metric)
|
||||
self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 1), m)
|
||||
self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m)
|
||||
self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m)
|
||||
|
||||
def test_list_metric_with_measures_to_process(self):
|
||||
metrics = self.storage.list_metric_with_measures_to_process(
|
||||
None, None, full=True)
|
||||
self.assertEqual(set(), metrics)
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
])
|
||||
metrics = self.storage.list_metric_with_measures_to_process(
|
||||
None, None, full=True)
|
||||
self.assertEqual(set([str(self.metric.id)]), metrics)
|
||||
self.trigger_processing()
|
||||
metrics = self.storage.list_metric_with_measures_to_process(
|
||||
None, None, full=True)
|
||||
self.assertEqual(set([]), metrics)
|
||||
|
||||
def test_delete_nonempty_metric(self):
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.storage.delete_metric(self.metric)
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing()
|
||||
self.storage.delete_metric(self.metric, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
def test_delete_nonempty_metric_unprocessed(self):
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
])
|
||||
self.storage.delete_metric(self.metric)
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.storage.delete_metric(self.metric, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
def test_delete_expunge_metric(self):
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
])
|
||||
self.trigger_processing()
|
||||
self.index.delete_metric(self.metric.id)
|
||||
self.storage.expunge_metrics(self.index, sync=True)
|
||||
self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric,
|
||||
self.metric.id)
|
||||
|
||||
def test_measures_reporting(self):
|
||||
report = self.storage.measures_report(True)
|
||||
|
@ -105,42 +135,63 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
def test_add_measures_big(self):
|
||||
m, __ = self._create_metric('high')
|
||||
self.storage.add_measures(m, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, i, j), 100)
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, i, j), 100)
|
||||
for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing([str(m.id)])
|
||||
|
||||
self.assertEqual(3661, len(self.storage.get_measures(m)))
|
||||
|
||||
@mock.patch('gnocchi.carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT', 48)
|
||||
def test_add_measures_big_update_subset(self):
|
||||
@mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48)
|
||||
def test_add_measures_update_subset_split(self):
|
||||
m, m_sql = self._create_metric('medium')
|
||||
measures = [
|
||||
storage.Measure(datetime.datetime(2014, 1, i, j, 0, 0), 100)
|
||||
for i in six.moves.range(1, 6) for j in six.moves.range(0, 24)]
|
||||
measures.append(
|
||||
storage.Measure(datetime.datetime(2014, 1, 6, 0, 0, 0), 100))
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 6, i, j, 0), 100)
|
||||
for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)]
|
||||
self.storage.add_measures(m, measures)
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing([str(m.id)])
|
||||
|
||||
# add measure to end, in same aggregate time as last point.
|
||||
self.storage.add_measures(m, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 6, 1, 0, 0), 100)])
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 6, 1, 58, 1), 100)])
|
||||
|
||||
with mock.patch.object(self.storage, '_store_metric_measures') as c:
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
# should only resample last aggregate
|
||||
self.trigger_processing([str(m.id)])
|
||||
count = 0
|
||||
for call in c.mock_calls:
|
||||
if mock.call(m_sql, mock.ANY, 'mean', 3600.0, mock.ANY) == call:
|
||||
# policy is 60 points and split is 48. should only update 2nd half
|
||||
args = call[1]
|
||||
if args[0] == m_sql and args[2] == 'mean' and args[3] == 60.0:
|
||||
count += 1
|
||||
self.assertEqual(1, count)
|
||||
|
||||
def test_add_measures_update_subset(self):
|
||||
m, m_sql = self._create_metric('medium')
|
||||
measures = [
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 6, i, j, 0), 100)
|
||||
for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)]
|
||||
self.storage.add_measures(m, measures)
|
||||
self.trigger_processing([str(m.id)])
|
||||
|
||||
# add measure to end, in same aggregate time as last point.
|
||||
new_point = utils.datetime_utc(2014, 1, 6, 1, 58, 1)
|
||||
self.storage.add_measures(m, [storage.Measure(new_point, 100)])
|
||||
|
||||
with mock.patch.object(self.storage, '_add_measures') as c:
|
||||
self.trigger_processing([str(m.id)])
|
||||
for __, args, __ in c.mock_calls:
|
||||
self.assertEqual(
|
||||
list(args[3])[0][0], carbonara.round_timestamp(
|
||||
new_point, args[1].granularity * 10e8))
|
||||
|
||||
def test_delete_old_measures(self):
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
|
||||
|
@ -152,9 +203,9 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
|
||||
# One year later…
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2015, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2015, 1, 1, 12, 0, 1), 69),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
|
||||
|
@ -163,18 +214,121 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
(utils.datetime_utc(2015, 1, 1, 12), 300.0, 69),
|
||||
], self.storage.get_measures(self.metric))
|
||||
|
||||
self.assertEqual({"1244160000.0"},
|
||||
self.storage._list_split_keys_for_metric(
|
||||
self.metric, "mean", 86400.0))
|
||||
self.assertEqual({"1412640000.0"},
|
||||
self.storage._list_split_keys_for_metric(
|
||||
self.metric, "mean", 3600.0))
|
||||
self.assertEqual({"1419120000.0"},
|
||||
self.storage._list_split_keys_for_metric(
|
||||
self.metric, "mean", 300.0))
|
||||
|
||||
def test_rewrite_measures(self):
|
||||
# Create an archive policy that spans on several splits. Each split
|
||||
# being 3600 points, let's go for 36k points so we have 10 splits.
|
||||
apname = str(uuid.uuid4())
|
||||
ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
|
||||
self.index.create_archive_policy(ap)
|
||||
self.metric = storage.Metric(uuid.uuid4(), ap)
|
||||
self.index.create_metric(self.metric.id, str(uuid.uuid4()),
|
||||
str(uuid.uuid4()),
|
||||
apname)
|
||||
|
||||
# First store some points scattered across different splits
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(utils.datetime_utc(2016, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2016, 1, 2, 13, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2016, 1, 4, 14, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2016, 1, 6, 15, 12, 45), 44),
|
||||
])
|
||||
self.trigger_processing()
|
||||
|
||||
splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
|
||||
self.assertEqual(splits,
|
||||
self.storage._list_split_keys_for_metric(
|
||||
self.metric, "mean", 60.0))
|
||||
|
||||
if self.storage.WRITE_FULL:
|
||||
assertCompressedIfWriteFull = self.assertTrue
|
||||
else:
|
||||
assertCompressedIfWriteFull = self.assertFalse
|
||||
|
||||
data = self.storage._get_measures(
|
||||
self.metric, '1451520000.0', "mean", 60.0)
|
||||
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
|
||||
data = self.storage._get_measures(
|
||||
self.metric, '1451736000.0', "mean", 60.0)
|
||||
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
|
||||
data = self.storage._get_measures(
|
||||
self.metric, '1451952000.0', "mean", 60.0)
|
||||
assertCompressedIfWriteFull(
|
||||
carbonara.AggregatedTimeSerie.is_compressed(data))
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
|
||||
(utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
|
||||
(utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
|
||||
(utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
|
||||
], self.storage.get_measures(self.metric, granularity=60.0))
|
||||
|
||||
# Now store brand new points that should force a rewrite of one of the
|
||||
# split (keep in mind the back window size in one hour here). We move
|
||||
# the BoundTimeSerie processing timeserie far away from its current
|
||||
# range.
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(utils.datetime_utc(2016, 1, 10, 16, 18, 45), 45),
|
||||
storage.Measure(utils.datetime_utc(2016, 1, 10, 17, 12, 45), 46),
|
||||
])
|
||||
self.trigger_processing()
|
||||
|
||||
self.assertEqual({'1452384000.0', '1451736000.0',
|
||||
'1451520000.0', '1451952000.0'},
|
||||
self.storage._list_split_keys_for_metric(
|
||||
self.metric, "mean", 60.0))
|
||||
data = self.storage._get_measures(
|
||||
self.metric, '1451520000.0', "mean", 60.0)
|
||||
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
|
||||
data = self.storage._get_measures(
|
||||
self.metric, '1451736000.0', "mean", 60.0)
|
||||
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
|
||||
data = self.storage._get_measures(
|
||||
self.metric, '1451952000.0', "mean", 60.0)
|
||||
# Now this one is compressed because it has been rewritten!
|
||||
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
|
||||
data = self.storage._get_measures(
|
||||
self.metric, '1452384000.0', "mean", 60.0)
|
||||
assertCompressedIfWriteFull(
|
||||
carbonara.AggregatedTimeSerie.is_compressed(data))
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
|
||||
(utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
|
||||
(utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
|
||||
(utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
|
||||
(utils.datetime_utc(2016, 1, 10, 16, 18), 60.0, 45),
|
||||
(utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46),
|
||||
], self.storage.get_measures(self.metric, granularity=60.0))
|
||||
|
||||
def test_updated_measures(self):
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400.0, 55.5),
|
||||
(utils.datetime_utc(2014, 1, 1, 12), 3600.0, 55.5),
|
||||
(utils.datetime_utc(2014, 1, 1, 12), 300.0, 69),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0),
|
||||
], self.storage.get_measures(self.metric))
|
||||
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
|
||||
|
@ -202,12 +356,12 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
|
||||
def test_add_and_get_measures(self):
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing()
|
||||
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1), 86400.0, 39.75),
|
||||
|
@ -292,10 +446,10 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
|
||||
def test_get_measure_unknown_aggregation(self):
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.assertRaises(storage.AggregationDoesNotExist,
|
||||
self.storage.get_measures,
|
||||
|
@ -305,16 +459,16 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
metric2 = storage.Metric(uuid.uuid4(),
|
||||
self.archive_policies['low'])
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.storage.add_measures(metric2, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.assertRaises(storage.AggregationDoesNotExist,
|
||||
self.storage.get_cross_metric_measures,
|
||||
|
@ -325,16 +479,16 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
metric2 = storage.Metric(uuid.uuid4(),
|
||||
self.archive_policies['low'])
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.storage.add_measures(metric2, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.assertRaises(storage.GranularityDoesNotExist,
|
||||
self.storage.get_cross_metric_measures,
|
||||
|
@ -345,16 +499,16 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
metric2 = storage.Metric(uuid.uuid4(),
|
||||
self.archive_policies['no_granularity_match'])
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.storage.add_measures(metric2, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
|
||||
self.assertRaises(storage.MetricUnaggregatable,
|
||||
|
@ -364,18 +518,18 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
def test_add_and_get_cross_metric_measures(self):
|
||||
metric2, __ = self._create_metric()
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44),
|
||||
])
|
||||
self.storage.add_measures(metric2, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 41), 2),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 10, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 41), 2),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 10, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 4),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing([str(self.metric.id), str(metric2.id)])
|
||||
|
||||
values = self.storage.get_cross_metric_measures([self.metric, metric2])
|
||||
self.assertEqual([
|
||||
|
@ -386,6 +540,16 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
(utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0)
|
||||
], values)
|
||||
|
||||
values = self.storage.get_cross_metric_measures([self.metric, metric2],
|
||||
reaggregation='max')
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 39.75),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 39.75),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 69),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 23),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 44)
|
||||
], values)
|
||||
|
||||
values = self.storage.get_cross_metric_measures(
|
||||
[self.metric, metric2],
|
||||
from_timestamp=utils.to_timestamp('2014-01-01 12:10:00'))
|
||||
|
@ -439,19 +603,19 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
def test_add_and_get_cross_metric_measures_with_holes(self):
|
||||
metric2, __ = self._create_metric()
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 5, 31), 8),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 42),
|
||||
])
|
||||
self.storage.add_measures(metric2, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 2),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 6),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 2),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing([str(self.metric.id), str(metric2.id)])
|
||||
|
||||
values = self.storage.get_cross_metric_measures([self.metric, metric2])
|
||||
self.assertEqual([
|
||||
|
@ -465,20 +629,20 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
def test_search_value(self):
|
||||
metric2, __ = self._create_metric()
|
||||
self.storage.add_measures(self.metric, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1,), 69),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1,), 69),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 5, 31), 8),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 42),
|
||||
])
|
||||
|
||||
self.storage.add_measures(metric2, [
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6),
|
||||
storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 2),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 6),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 2),
|
||||
])
|
||||
self.storage.process_background_tasks(self.index, sync=True)
|
||||
self.trigger_processing([str(self.metric.id), str(metric2.id)])
|
||||
|
||||
self.assertEqual(
|
||||
{metric2: [],
|
||||
|
@ -499,6 +663,47 @@ class TestStorageDriver(tests_base.TestCase):
|
|||
{u"eq": 100},
|
||||
{u"≠": 50}]}))
|
||||
|
||||
def test_resize_policy(self):
|
||||
name = str(uuid.uuid4())
|
||||
ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
|
||||
self.index.create_archive_policy(ap)
|
||||
m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()),
|
||||
str(uuid.uuid4()), name)
|
||||
m = self.index.list_metrics(ids=[m.id])[0]
|
||||
self.storage.add_measures(m, [
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 0), 1),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 1),
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 10), 1),
|
||||
])
|
||||
self.trigger_processing([str(m.id)])
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
|
||||
], self.storage.get_measures(m))
|
||||
# expand to more points
|
||||
self.index.update_archive_policy(
|
||||
name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
|
||||
m = self.index.list_metrics(ids=[m.id])[0]
|
||||
self.storage.add_measures(m, [
|
||||
storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 15), 1),
|
||||
])
|
||||
self.trigger_processing([str(m.id)])
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
|
||||
], self.storage.get_measures(m))
|
||||
# shrink timespan
|
||||
self.index.update_archive_policy(
|
||||
name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
|
||||
m = self.index.list_metrics(ids=[m.id])[0]
|
||||
self.assertEqual([
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
|
||||
(utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
|
||||
], self.storage.get_measures(m))
|
||||
|
||||
|
||||
class TestMeasureQuery(base.BaseTestCase):
|
||||
def test_equal(self):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue