Delete v2 gnocchi storage

This is part of a global effort to clean up CloudKitty's
unmaintained codebase.

This storage backend was only present for development purposes,
and not production ready. A second v2 backend will be implemented
in the future, with support for HA/clustering.

Change-Id: Iab9d152d2851ca385e607d338c0a09b74ba7e3b3
Story: 2004400
Task: 28568
This commit is contained in:
Luka Peschke 2018-12-18 14:30:12 +01:00
parent 2d7415a3d3
commit 1ede03ba2c
42 changed files with 12 additions and 1341 deletions

View File

@ -28,7 +28,6 @@ import cloudkitty.orchestrator
import cloudkitty.service
import cloudkitty.storage
import cloudkitty.storage.v1.hybrid.backends.gnocchi
import cloudkitty.storage.v2.gnocchi
import cloudkitty.storage.v2.influx
import cloudkitty.utils
@ -66,8 +65,6 @@ _opts = [
cloudkitty.storage.v2.influx.influx_storage_opts))),
('storage_gnocchi', list(itertools.chain(
cloudkitty.storage.v1.hybrid.backends.gnocchi.gnocchi_storage_opts))),
('storage_gnocchi', list(itertools.chain(
cloudkitty.storage.v2.gnocchi.gnocchi_storage_opts))),
(None, list(itertools.chain(
cloudkitty.api.app.auth_opts,
cloudkitty.service.service_opts))),

View File

@ -1,763 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
from collections import deque
from collections import Iterable
import copy
import datetime
import decimal
import time
from gnocchiclient import auth as gauth
from gnocchiclient import client as gclient
from gnocchiclient import exceptions as gexceptions
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils
import six
from cloudkitty.storage.v2 import BaseStorage
from cloudkitty import utils as ck_utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
gnocchi_storage_opts = [
cfg.StrOpt(
'gnocchi_auth_type',
default='keystone',
choices=['keystone', 'basic'],
help='(v2) Gnocchi auth type (keystone or basic). Keystone '
'credentials can be specified through the "auth_section" parameter',
),
cfg.StrOpt(
'gnocchi_user',
default='',
help='(v2) Gnocchi user (for basic auth only)',
),
cfg.StrOpt(
'gnocchi_endpoint',
default='',
help='(v2) Gnocchi endpoint (for basic auth only)',
),
cfg.StrOpt(
'api_interface',
default='internalURL',
help='(v2) Endpoint URL type (for keystone auth only)',
),
cfg.IntOpt(
'measure_chunk_size',
min=10, max=1000000,
default=500,
help='(v2) Maximum amount of measures to send to gnocchi at once '
'(defaults to 500).',
),
]
CONF.register_opts(gnocchi_storage_opts, 'storage_gnocchi')
ks_loading.register_session_conf_options(CONF, 'storage_gnocchi')
ks_loading.register_auth_conf_options(CONF, 'storage_gnocchi')
RESOURCE_TYPE_NAME_ROOT = 'cloudkitty_metric_'
ARCHIVE_POLICY_NAME = 'cloudkitty_archive_policy'
GROUPBY_NAME_ROOT = 'groupby_attr_'
META_NAME_ROOT = 'meta_attr_'
class GnocchiResource(object):
"""Class representing a gnocchi resource
It provides utils for resource_type/resource creation and identifying.
"""
def __init__(self, name, metric, conn):
"""Resource_type name, metric, gnocchiclient"""
self.name = name
self.resource_type = RESOURCE_TYPE_NAME_ROOT + name
self.unit = metric['vol']['unit']
self.groupby = {
k: v if v else '' for k, v in metric['groupby'].items()}
self.metadata = {
k: v if v else '' for k, v in metric['metadata'].items()}
self._trans_groupby = {
GROUPBY_NAME_ROOT + key: val for key, val in self.groupby.items()
}
self._trans_metadata = {
META_NAME_ROOT + key: val for key, val in self.metadata.items()
}
self._conn = conn
self._resource = None
self.attributes = self.metadata.copy()
self.attributes.update(self.groupby)
self._trans_attributes = self._trans_metadata.copy()
self._trans_attributes.update(self._trans_groupby)
self.needs_update = False
def __getitem__(self, key):
output = self._trans_attributes.get(GROUPBY_NAME_ROOT + key, None)
if output is None:
output = self._trans_attributes.get(META_NAME_ROOT + key, None)
return output
def __eq__(self, other):
if self.resource_type != other.resource_type or \
self['id'] != other['id']:
return False
own_keys = list(self.groupby.keys())
own_keys.sort()
other_keys = list(other.groupby.keys())
other_keys.sort()
if own_keys != other_keys:
return False
for key in own_keys:
if other[key] != self[key]:
return False
return True
@property
def qty(self):
if self._resource:
return self._resource['metrics']['qty']
return None
@property
def cost(self):
if self._resource:
return self._resource['metrics']['cost']
return None
def _get_res_type_dict(self):
attributes = {}
for key in self._trans_groupby.keys():
attributes[key] = {'required': True, 'type': 'string'}
attributes['unit'] = {'required': True, 'type': 'string'}
for key in self._trans_metadata.keys():
attributes[key] = {'required': False, 'type': 'string'}
return {
'name': self.resource_type,
'attributes': attributes,
}
def create_resource_type(self):
"""Allows to create the type corresponding to this resource."""
try:
self._conn.resource_type.get(self.resource_type)
except gexceptions.ResourceTypeNotFound:
res_type = self._get_res_type_dict()
LOG.debug('Creating resource_type {} in gnocchi'.format(
self.resource_type))
self._conn.resource_type.create(res_type)
@staticmethod
def _get_rfc6902_attributes_add_op(new_attributes):
return [{
'op': 'add',
'path': '/attributes/{}'.format(attr),
'value': {
'required': attr.startswith(GROUPBY_NAME_ROOT),
'type': 'string'
}
} for attr in new_attributes]
def update_resource_type(self):
needed_res_type = self._get_res_type_dict()
current_res_type = self._conn.resource_type.get(
needed_res_type['name'])
new_attributes = [attr for attr in needed_res_type['attributes'].keys()
if attr not in current_res_type['attributes'].keys()]
if not new_attributes:
return
LOG.info('Adding {} to resource_type {}'.format(
[attr.replace(GROUPBY_NAME_ROOT, '').replace(META_NAME_ROOT, '')
for attr in new_attributes],
current_res_type['name'].replace(RESOURCE_TYPE_NAME_ROOT, ''),
))
new_attributes_op = self._get_rfc6902_attributes_add_op(new_attributes)
self._conn.resource_type.update(
needed_res_type['name'], new_attributes_op)
def _create_metrics(self):
qty = self._conn.metric.create(
name='qty',
unit=self.unit,
archive_policy_name=ARCHIVE_POLICY_NAME,
)
cost = self._conn.metric.create(
name='cost',
archive_policy_name=ARCHIVE_POLICY_NAME,
)
return qty, cost
def exists_in_gnocchi(self):
"""Check if the resource exists in gnocchi.
Returns true if the resource exists.
"""
query = {
'and': [
{'=': {key: value}}
for key, value in self._trans_groupby.items()
],
}
res = self._conn.resource.search(resource_type=self.resource_type,
query=query)
if len(res) > 1:
LOG.warning(
"Found more than one metric matching groupby. This may not "
"have the behavior you're expecting. You should probably add "
"some items to groupby")
if len(res) > 0:
self._resource = res[0]
return True
return False
def create(self):
"""Creates the resource in gnocchi."""
if self._resource:
return
self.create_resource_type()
qty_metric, cost_metric = self._create_metrics()
resource = self._trans_attributes.copy()
resource['metrics'] = {
'qty': qty_metric['id'],
'cost': cost_metric['id'],
}
resource['id'] = uuidutils.generate_uuid()
resource['unit'] = self.unit
if not self.exists_in_gnocchi():
try:
self._resource = self._conn.resource.create(
self.resource_type, resource)
# Attributes have changed
except gexceptions.BadRequest:
self.update_resource_type()
self._resource = self._conn.resource.create(
self.resource_type, resource)
def update(self, metric):
for key, val in metric['metadata'].items():
self._resource[META_NAME_ROOT + key] = val
self._resource = self._conn.update(
self.resource_type, self._resource['id'], self._resource)
self.needs_update = False
return self._resource
class GnocchiResourceCacher(object):
"""Class allowing to keep created resource in memory to improve perfs.
It keeps the last max_size resources in cache.
"""
def __init__(self, max_size=500):
self._resources = deque(maxlen=max_size)
def __contains__(self, resource):
for r in self._resources:
if r == resource:
for key, val in resource.metadata.items():
if val != r[key]:
r.needs_update = True
return True
return False
def add_resource(self, resource):
"""Add a resource to the cacher.
:param resource: resource to add
:type resource: GnocchiResource
"""
for r in self._resources:
if r == resource:
return
self._resources.append(resource)
def get(self, resource):
"""Returns the resource matching to the parameter.
:param resource: resource to get
:type resource: GnocchiResource
"""
for r in self._resources:
if r == resource:
return r
return None
def get_by_id(self, resource_id):
"""Returns the resource matching the given id.
:param resource_id: ID of the resource to get
:type resource: str
"""
for r in self._resources:
if r['id'] == resource_id:
return r
return None
class GnocchiStorage(BaseStorage):
default_op = ['aggregate', 'sum', ['metric', 'cost', 'sum'], ]
def _check_archive_policy(self):
try:
self._conn.archive_policy.get(ARCHIVE_POLICY_NAME)
except gexceptions.ArchivePolicyNotFound:
definition = [
{'granularity': str(CONF.collect.period) + 's',
'timespan': '{d} days'.format(d=self.get_retention().days)},
]
archive_policy = {
'name': ARCHIVE_POLICY_NAME,
'back_window': 0,
'aggregation_methods': [
'std', 'count', 'min', 'max', 'sum', 'mean'],
'definition': definition,
}
self._conn.archive_policy.create(archive_policy)
def __init__(self, *args, **kwargs):
super(GnocchiStorage, self).__init__(*args, **kwargs)
adapter_options = {'connect_retries': 3}
if CONF.storage_gnocchi.gnocchi_auth_type == 'keystone':
auth_plugin = ks_loading.load_auth_from_conf_options(
CONF,
'storage_gnocchi',
)
adapter_options['interface'] = CONF.storage_gnocchi.api_interface
else:
auth_plugin = gauth.GnocchiBasicPlugin(
user=CONF.storage_gnocchi.gnocchi_user,
endpoint=CONF.storage_gnocchi.gnocchi_endpoint,
)
self._conn = gclient.Client(
'1',
session_options={'auth': auth_plugin},
adapter_options=adapter_options,
)
self._cacher = GnocchiResourceCacher()
def init(self):
self._check_archive_policy()
def _check_resource(self, metric_name, metric):
resource = GnocchiResource(metric_name, metric, self._conn)
if resource in self._cacher:
return self._cacher.get(resource)
resource.create()
self._cacher.add_resource(resource)
return resource
def _push_measures_to_gnocchi(self, measures):
if measures:
try:
self._conn.metric.batch_metrics_measures(measures)
except gexceptions.BadRequest:
LOG.warning(
'An exception occured while trying to push measures to '
'gnocchi. Retrying in 1 second. If this happens again, '
'set measure_chunk_size to a lower value.')
time.sleep(1)
self._conn.metric.batch_metrics_measures(measures)
# Do not use scope_id, as it is deprecated and will be
# removed together with the v1 storage
def push(self, dataframes, scope_id=None):
if not isinstance(dataframes, list):
dataframes = [dataframes]
measures = {}
nb_measures = 0
for dataframe in dataframes:
timestamp = dataframe['period']['begin']
for metric_name, metrics in dataframe['usage'].items():
for metric in metrics:
resource = self._check_resource(metric_name, metric)
if resource.needs_update:
resource.update(metric)
if not resource.qty or not resource.cost:
LOG.warning('Unexpected continue')
continue
# resource.qty is the uuid of the qty metric
if not measures.get(resource.qty):
measures[resource.qty] = []
measures[resource.qty].append({
'timestamp': timestamp,
'value': metric['vol']['qty'],
})
if not measures.get(resource.cost):
measures[resource.cost] = []
measures[resource.cost].append({
'timestamp': timestamp,
'value': metric['rating']['price'],
})
nb_measures += 2
if nb_measures >= CONF.storage_gnocchi.measure_chunk_size:
LOG.debug('Pushing {} measures to gnocchi.'.format(
nb_measures))
self._push_measures_to_gnocchi(measures)
measures = {}
nb_measures = 0
LOG.debug('Pushing {} measures to gnocchi.'.format(nb_measures))
self._push_measures_to_gnocchi(measures)
def _get_ck_resource_types(self):
types = self._conn.resource_type.list()
return [gtype['name'] for gtype in types
if gtype['name'].startswith(RESOURCE_TYPE_NAME_ROOT)]
def _check_res_types(self, res_type=None):
if res_type is None:
output = self._get_ck_resource_types()
elif isinstance(res_type, Iterable):
output = res_type
else:
output = [res_type]
return sorted(output)
@staticmethod
def _check_begin_end(begin, end):
if not begin:
begin = ck_utils.get_month_start()
if not end:
end = ck_utils.get_next_month()
if isinstance(begin, six.text_type):
begin = ck_utils.iso2dt(begin)
if isinstance(begin, int):
begin = ck_utils.ts2dt(begin)
if isinstance(end, six.text_type):
end = ck_utils.iso2dt(end)
if isinstance(end, int):
end = ck_utils.ts2dt(end)
return begin, end
def _get_resource_frame(self,
cost_measure,
qty_measure,
resource):
# Getting price
price = decimal.Decimal(cost_measure[2])
price_dict = {'price': float(price)}
# Getting vol
vol_dict = {
'qty': decimal.Decimal(qty_measure[2]),
'unit': resource.get('unit'),
}
# Formatting
groupby = {
k.replace(GROUPBY_NAME_ROOT, ''): v
for k, v in resource.items() if k.startswith(GROUPBY_NAME_ROOT)
}
metadata = {
k.replace(META_NAME_ROOT, ''): v
for k, v in resource.items() if k.startswith(META_NAME_ROOT)
}
return {
'groupby': groupby,
'metadata': metadata,
'vol': vol_dict,
'rating': price_dict,
}
def _to_cloudkitty(self,
res_type,
resource,
cost_measure,
qty_measure):
start = cost_measure[0]
stop = start + datetime.timedelta(seconds=cost_measure[1])
# Period
period_dict = {
'begin': ck_utils.dt2iso(start),
'end': ck_utils.dt2iso(stop),
}
return {
'usage': {res_type: [
self._get_resource_frame(cost_measure, qty_measure, resource)],
},
'period': period_dict,
}
def _get_resource_info(self, resource_ids, start, stop):
search = {
'and': [
{
'or': [
{
'=': {'id': resource_id},
}
for resource_id in resource_ids
],
},
],
}
resources = []
marker = None
while True:
resource_chunk = self._conn.resource.search(query=search,
details=True,
marker=marker,
sorts=['id:asc'])
if len(resource_chunk) < 1:
break
marker = resource_chunk[-1]['id']
resources += resource_chunk
return {resource['id']: resource for resource in resources}
@staticmethod
def _dataframes_to_list(dataframes):
keys = sorted(dataframes.keys())
return [dataframes[key] for key in keys]
def _get_dataframes(self, measures, resource_info):
dataframes = {}
for measure in measures:
resource_type = measure['group']['type']
resource_id = measure['group']['id']
# Raw metrics do not contain all required attributes
resource = resource_info[resource_id]
dataframe = dataframes.get(measure['cost'][0])
ck_resource_type_name = resource_type.replace(
RESOURCE_TYPE_NAME_ROOT, '')
if dataframe is None:
dataframes[measure['cost'][0]] = self._to_cloudkitty(
ck_resource_type_name,
resource,
measure['cost'],
measure['qty'])
elif dataframe['usage'].get(ck_resource_type_name) is None:
dataframe['usage'][ck_resource_type_name] = [
self._get_resource_frame(
measure['cost'], measure['qty'], resource)]
else:
dataframe['usage'][ck_resource_type_name].append(
self._get_resource_frame(
measure['cost'], measure['qty'], resource))
return self._dataframes_to_list(dataframes)
@staticmethod
def _create_filters(filters, group_filters):
output = {}
if filters:
for k, v in filters.items():
output[META_NAME_ROOT + k] = v
if group_filters:
for k, v in group_filters.items():
output[GROUPBY_NAME_ROOT + k] = v
return output
def _raw_metrics_to_distinct_measures(self,
raw_cost_metrics,
raw_qty_metrics):
output = []
for cost, qty in zip(raw_cost_metrics, raw_qty_metrics):
output += [{
'cost': cost_measure,
'qty': qty['measures']['measures']['aggregated'][idx],
'group': cost['group'],
} for idx, cost_measure in enumerate(
cost['measures']['measures']['aggregated'])
]
# Sorting by timestamp, metric type and resource ID
output.sort(key=lambda x: (
x['cost'][0], x['group']['type'], x['group']['id']))
return output
def retrieve(self, begin=None, end=None,
filters=None, group_filters=None,
metric_types=None,
offset=0, limit=100, paginate=True):
begin, end = self._check_begin_end(begin, end)
metric_types = self._check_res_types(metric_types)
# Getting a list of active gnocchi resources with measures
filters = self._create_filters(filters, group_filters)
# FIXME(lukapeschke): We query all resource types in order to get the
# total amount of dataframes, but this could be done in a better way;
# ie. by not doing addtional queries once the limit is reached
raw_cost_metrics = []
raw_qty_metrics = []
for mtype in metric_types:
cost_metrics, qty_metrics = self._single_resource_type_aggregates(
begin, end, mtype, ['type', 'id'], filters, fetch_qty=True)
raw_cost_metrics += cost_metrics
raw_qty_metrics += qty_metrics
measures = self._raw_metrics_to_distinct_measures(
raw_cost_metrics, raw_qty_metrics)
result = {'total': len(measures)}
if paginate:
measures = measures[offset:limit]
if len(measures) < 1:
return {
'total': 0,
'dataframes': [],
}
resource_ids = [measure['group']['id'] for measure in measures]
resource_info = self._get_resource_info(resource_ids, begin, end)
result['dataframes'] = self._get_dataframes(measures, resource_info)
return result
def _single_resource_type_aggregates(self,
start, stop,
metric_type,
groupby,
filters,
fetch_qty=False):
search = {
'and': [
{'=': {'type': metric_type}}
]
}
search['and'] += [{'=': {k: v}} for k, v in filters.items()]
cost_op = self.default_op
output = (
self._conn.aggregates.fetch(
cost_op,
search=search,
groupby=groupby,
resource_type=metric_type,
start=start, stop=stop),
None
)
if fetch_qty:
qty_op = copy.deepcopy(self.default_op)
qty_op[2][1] = 'qty'
output = (
output[0],
self._conn.aggregates.fetch(
qty_op,
search=search,
groupby=groupby,
resource_type=metric_type,
start=start, stop=stop)
)
return output
@staticmethod
def _ungroup_type(rated_resources):
output = []
for rated_resource in rated_resources:
rated_resource['group'].pop('type', None)
new_item = True
for elem in output:
if rated_resource['group'] == elem['group']:
elem['measures']['measures']['aggregated'] \
+= rated_resource['measures']['measures']['aggregated']
new_item = False
break
if new_item:
output.append(rated_resource)
return output
def total(self, groupby=None,
begin=None, end=None,
metric_types=None,
filters=None, group_filters=None,
offset=0, limit=1000, paginate=True):
begin, end = self._check_begin_end(begin, end)
if groupby is None:
groupby = []
request_groupby = [
GROUPBY_NAME_ROOT + elem for elem in groupby if elem != 'type']
# We need to have a least one attribute on which to group
request_groupby.append('type')
# NOTE(lukapeschke): For now, it isn't possible to group aggregates
# from different resource types using custom attributes, so we need
# to do one request per resource type.
rated_resources = []
metric_types = self._check_res_types(metric_types)
filters = self._create_filters(filters, group_filters)
for mtype in metric_types:
resources, _ = self._single_resource_type_aggregates(
begin, end, mtype, request_groupby, filters)
for resource in resources:
# If we have found something
if len(resource['measures']['measures']['aggregated']):
rated_resources.append(resource)
result = {'total': len(rated_resources)}
if paginate:
rated_resources = rated_resources[offset:limit]
if len(rated_resources) < 1:
return {
'total': 0,
'results': [],
}
# NOTE(lukapeschke): We undo what has been done previously (grouping
# per type). This is not performant. Should be fixed as soon as
# previous note is supported in gnocchi
if 'type' not in groupby:
rated_resources = self._ungroup_type(rated_resources)
output = []
for rated_resource in rated_resources:
rate = sum(measure[2] for measure in
rated_resource['measures']['measures']['aggregated'])
output_elem = {
'begin': begin,
'end': end,
'rate': rate,
}
for group in groupby:
output_elem[group] = rated_resource['group'].get(
GROUPBY_NAME_ROOT + group, '')
# If we want to group per type
if 'type' in groupby:
output_elem['type'] = rated_resource['group'].get(
'type', '').replace(RESOURCE_TYPE_NAME_ROOT, '') or ''
output.append(output_elem)
result['results'] = output
return result

View File

@ -14,15 +14,11 @@
# under the License.
#
"""Test SummaryModel objects."""
import testtools
from oslotest import base
from cloudkitty.api.v1.datamodels import report
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class TestSummary(base.BaseTestCase):
def setUp(self):

View File

@ -14,17 +14,12 @@
# under the License.
#
"""Test cloudkitty/api/v1/types."""
import testtools
from oslotest import base
from wsme import types as wtypes
from cloudkitty.api.v1 import types
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class TestTypes(base.BaseTestCase):
def setUp(self):

View File

@ -14,15 +14,11 @@
# under the License.
#
#
import testtools
from cloudkitty.collector import gnocchi
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class GnocchiCollectorTest(tests.TestCase):
def setUp(self):
super(GnocchiCollectorTest, self).setUp()

View File

@ -17,17 +17,14 @@
#
from decimal import Decimal
import mock
import testtools
from cloudkitty import collector
from cloudkitty.collector import prometheus
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import transformer
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PrometheusCollectorTest(tests.TestCase):
def setUp(self):
super(PrometheusCollectorTest, self).setUp()
@ -132,7 +129,6 @@ class PrometheusCollectorTest(tests.TestCase):
)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PrometheusClientTest(tests.TestCase):
def setUp(self):
super(PrometheusClientTest, self).setUp()

View File

@ -18,7 +18,6 @@
import abc
import decimal
import os
from unittest.case import SkipTest
from gabbi import fixture
import mock
@ -45,7 +44,6 @@ from cloudkitty import storage
from cloudkitty.storage.v1.sqlalchemy import models
from cloudkitty import tests
from cloudkitty.tests import utils as test_utils
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import utils as ck_utils
@ -86,10 +84,9 @@ class BaseExtensionFixture(fixture.GabbiFixture):
self.patch.return_value = fake_mgr
def stop_fixture(self):
if not is_functional_test():
self.patch.assert_called_with(
self.namespace,
**self.assert_args)
self.patch.assert_called_with(
self.namespace,
**self.assert_args)
self.mock.stop()
@ -399,13 +396,6 @@ class MetricsConfFixture(fixture.GabbiFixture):
ck_utils.load_conf = self._original_function
class SkipIfFunctional(fixture.GabbiFixture):
def start_fixture(self):
if is_functional_test():
raise SkipTest
def setup_app():
messaging.setup()
# FIXME(sheeprine): Extension fixtures are interacting with transformers

View File

@ -2,7 +2,6 @@ fixtures:
- ConfigFixtureKeystoneAuth
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: Can't query api without token

View File

@ -1,7 +1,6 @@
fixtures:
- ConfigFixture
- CORSConfigFixture
- SkipIfFunctional
tests:

View File

@ -2,7 +2,6 @@ fixtures:
- ConfigFixture
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: Can query api without auth

View File

@ -1,6 +1,5 @@
fixtures:
- ConfigFixture
- SkipIfFunctional
tests:
- name: test if / is publicly available

View File

@ -1,6 +1,5 @@
fixtures:
- ConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,7 +1,6 @@
fixtures:
- ConfigFixture
- MetricsConfFixture
- SkipIfFunctional
tests:
- name: get config

View File

@ -2,7 +2,6 @@ fixtures:
- ConfigFixture
- RatingModulesFixture
- QuoteFakeRPC
- SkipIfFunctional
tests:
- name: reload list of modules available

View File

@ -2,7 +2,6 @@ fixtures:
- ConfigFixture
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: get period with two tenants

View File

@ -2,7 +2,6 @@ fixtures:
- ConfigFixture
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: fetch period with no data

View File

@ -1,6 +1,5 @@
fixtures:
- HashMapConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,6 +1,5 @@
fixtures:
- HashMapConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,7 +1,6 @@
fixtures:
- HashMapConfigFixture
- UUIDFixture
- SkipIfFunctional
tests:

View File

@ -1,6 +1,5 @@
fixtures:
- HashMapConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,7 +1,6 @@
fixtures:
- PyScriptsConfigFixture
- UUIDFixture
- SkipIfFunctional
tests:

View File

@ -16,7 +16,6 @@
# @author: Luka Peschke
#
import mock
import testtools
from gnocchiclient import exceptions as gexc
@ -55,7 +54,6 @@ class PermissiveDict(object):
return self.value == other.get(self.key)
@testtools.skipIf(test_utils.is_functional_test(), 'Not a functional test')
class HybridStorageTestGnocchi(BaseHybridStorageTest):
def setUp(self):

View File

@ -16,7 +16,6 @@
# @author: Stéphane Albert
#
import copy
import testtools
import mock
import testscenarios
@ -65,7 +64,6 @@ class StorageTest(tests.TestCase):
self.storage.push(working_data, self._other_tenant_id)
@testtools.skipIf(test_utils.is_functional_test(), 'Not a functional test')
class StorageDataframeTest(StorageTest):
storage_scenarios = [
@ -129,7 +127,6 @@ class StorageDataframeTest(StorageTest):
self.assertEqual(3, len(data))
@testtools.skipIf(test_utils.is_functional_test(), 'Not a functional test')
class StorageTotalTest(StorageTest):
storage_scenarios = [
@ -269,7 +266,6 @@ class StorageTotalTest(StorageTest):
self.assertEqual(end, total[3]["end"])
if not test_utils.is_functional_test():
StorageTest.generate_scenarios()
StorageTotalTest.generate_scenarios()
StorageDataframeTest.generate_scenarios()
StorageTest.generate_scenarios()
StorageTotalTest.generate_scenarios()
StorageDataframeTest.generate_scenarios()

View File

@ -1,351 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
import copy
from datetime import datetime
import decimal
import fixtures
import testtools
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import uuidutils
from cloudkitty import storage
from cloudkitty.tests import utils as test_utils
from cloudkitty import utils as ck_utils
CONF = None
def _init_conf():
global CONF
if not CONF:
CONF = cfg.CONF
CONF(args=[], project='cloudkitty',
validate_default_values=True,
default_config_files=['/etc/cloudkitty/cloudkitty.conf'])
class BaseFunctionalStorageTest(testtools.TestCase):
# Name of the storage backend to test
storage_backend = None
storage_version = 0
@classmethod
def setUpClass(cls):
_init_conf()
cls._conf_fixture = config_fixture.Config(conf=CONF)
cls._conf_fixture.set_config_files(
['/etc.cloudkitty/cloudkitty.conf'])
cls.conf = cls._conf_fixture.conf
cls.conf.set_override('version', cls.storage_version, 'storage')
cls.conf.set_override('backend', cls.storage_backend, 'storage')
cls.storage = storage.get_storage()
cls.storage.init()
cls.project_ids, cls.data = cls.gen_data_separate_projects(3)
for i, project_data in enumerate(cls.data):
cls.storage.push(project_data, cls.project_ids[i])
# Appending data for the second tenant
data_next_period = copy.deepcopy(cls.data[0])
data_next_period['period']['begin'] += 3600
data_next_period['period']['end'] += 3600
cls.storage.push(data_next_period, cls.project_ids[0])
cls.project_ids.append(cls.project_ids[0])
cls.data.append(data_next_period)
cls.wait_for_backend()
@classmethod
def tearDownClass(cls):
cls.cleanup_backend()
# cls._conf_fixture.cleanUp()
# pass
def setUp(self):
super(BaseFunctionalStorageTest, self).setUp()
self.useFixture(fixtures.FakeLogger())
self.useFixture(self._conf_fixture)
def cleanUp(self):
super(BaseFunctionalStorageTest, self).cleanUp()
@classmethod
def wait_for_backend(cls):
"""Function waiting for the storage backend to be ready.
Ex: wait for gnocchi to have processed all metrics
"""
@classmethod
def cleanup_backend(cls):
"""Function deleting everything from the storage backend"""
@staticmethod
def gen_data_separate_projects(nb_projects):
project_ids = [uuidutils.generate_uuid() for i in range(nb_projects)]
data = [
test_utils.generate_v2_storage_data(
project_ids=project_ids[i], nb_projects=1)
for i in range(nb_projects)]
return project_ids, data
def test_get_retention(self):
retention = self.storage.get_retention().days * 24
self.assertEqual(retention, self.conf.storage.retention_period)
@staticmethod
def _validate_filters(comp, filters=None, group_filters=None):
if group_filters:
for k, v in group_filters.items():
if comp['groupby'].get(k) != v:
return False
if filters:
for k, v in filters.items():
if comp['metadata'].get(k) != v:
return False
return True
def _get_expected_total(self, begin=None, end=None,
filters=None, group_filters=None):
total = decimal.Decimal(0)
for dataframes in self.data:
if (ck_utils.ts2dt(dataframes['period']['begin']) >= end
or ck_utils.ts2dt(dataframes['period']['end']) <= begin):
continue
for df in dataframes['usage'].values():
for elem in df:
if self._validate_filters(elem, filters, group_filters):
total += elem['rating']['price']
return total
def _compare_totals(self, expected_total, total):
self.assertEqual(len(total), len(expected_total))
for i in range(len(total)):
self.assertEqual(
round(expected_total[i], 5),
round(decimal.Decimal(total[i]['rate']), 5),
)
def test_get_total_all_projects_on_time_window_with_data_no_grouping(self):
expected_total = self._get_expected_total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1))
total = self.storage.total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1))
self.assertEqual(len(total), 1)
self.assertEqual(
round(expected_total, 5),
round(decimal.Decimal(total[0]['rate']), 5),
)
def test_get_total_one_project_on_time_window_with_data_no_grouping(self):
group_filters = {'project_id': self.project_ids[0]}
expected_total = self._get_expected_total(
begin=datetime(2018, 1, 1), end=datetime(2018, 1, 1, 1),
group_filters=group_filters)
total = self.storage.total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1),
group_filters=group_filters)
self.assertEqual(len(total), 1)
self.assertEqual(
round(expected_total, 5),
round(decimal.Decimal(total[0]['rate']), 5),
)
def test_get_total_all_projects_window_with_data_group_by_project_id(self):
expected_total = []
for project_id in sorted(self.project_ids[:-1]):
group_filters = {'project_id': project_id}
expected_total.append(self._get_expected_total(
begin=datetime(2018, 1, 1), end=datetime(2018, 1, 1, 1),
group_filters=group_filters))
total = self.storage.total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1),
groupby=['project_id'])
total = sorted(total, key=lambda k: k['project_id'])
self._compare_totals(expected_total, total)
def test_get_total_one_project_window_with_data_group_by_resource_id(self):
expected_total = []
for df in self.data[0]['usage'].values():
expected_total += copy.deepcopy(df)
for df in self.data[-1]['usage'].values():
for df_elem in df:
for elem in expected_total:
if elem['groupby'] == df_elem['groupby']:
elem['rating']['price'] += df_elem['rating']['price']
expected_total = sorted(
expected_total, key=lambda k: k['groupby']['id'])
expected_total = [i['rating']['price'] for i in expected_total]
total = self.storage.total(
begin=datetime(2018, 1, 1), end=datetime(2018, 1, 1, 2),
group_filters={'project_id': self.project_ids[0]},
groupby=['id'])
total = sorted(total, key=lambda k: k['id'])
self._compare_totals(expected_total, total)
def test_get_total_all_projects_group_by_resource_id_project_id(self):
expected_total = []
for data in self.data[:-1]:
for df in data['usage'].values():
expected_total += copy.deepcopy(df)
for df in self.data[-1]['usage'].values():
for elem in df:
for total_elem in expected_total:
if total_elem['groupby'] == elem['groupby']:
total_elem['rating']['price'] \
+= elem['rating']['price']
expected_total = sorted(
expected_total, key=lambda k: k['groupby']['id'])
expected_total = [i['rating']['price'] for i in expected_total]
total = self.storage.total(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
groupby=['id', 'project_id'])
total = sorted(total, key=lambda k: k['id'])
self._compare_totals(expected_total, total)
def test_get_total_all_projects_group_by_resource_type(self):
expected_total = {}
for data in self.data:
for res_type, df in data['usage'].items():
if expected_total.get(res_type):
expected_total[res_type] += sum(
elem['rating']['price'] for elem in df)
else:
expected_total[res_type] = sum(
elem['rating']['price'] for elem in df)
expected_total = [
expected_total[key] for key in sorted(expected_total.keys())]
total = self.storage.total(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
groupby=['type'])
total = sorted(total, key=lambda k: k['type'])
self._compare_totals(expected_total, total)
def test_get_total_one_project_group_by_resource_type(self):
expected_total = {}
for res_type, df in self.data[0]['usage'].items():
expected_total[res_type] = sum(
elem['rating']['price'] for elem in df)
expected_total = [
expected_total[key] for key in sorted(expected_total.keys())]
group_filters = {'project_id': self.project_ids[0]}
total = self.storage.total(
begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1),
group_filters=group_filters,
groupby=['type'])
total = sorted(total, key=lambda k: k['type'])
self._compare_totals(expected_total, total)
def test_get_total_no_data_period(self):
total = self.storage.total(
begin=datetime(2018, 2, 1), end=datetime(2018, 2, 1, 1))
self.assertEqual(0, len(total))
def test_retrieve_all_projects_with_data(self):
expected_length = sum(
len(data['usage'].values()) for data in self.data)
frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
limit=1000)
self.assertEqual(expected_length, frames['total'])
self.assertEqual(2, len(frames['dataframes']))
def test_retrieve_one_project_with_data(self):
expected_length = len(self.data[0]['usage'].values()) \
+ len(self.data[-1]['usage'].values())
group_filters = {'project_id': self.project_ids[0]}
frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
limit=1000)
self.assertEqual(expected_length, frames['total'])
self.assertEqual(2, len(frames['dataframes']))
for metric_type in self.data[0]['usage'].keys():
self.assertEqual(
len(frames['dataframes'][0]['usage'][metric_type]),
len(self.data[0]['usage'][metric_type]))
for metric_type in self.data[-1]['usage'].keys():
self.assertEqual(
len(frames['dataframes'][1]['usage'][metric_type]),
len(self.data[-1]['usage'][metric_type]))
def test_retrieve_pagination_one_project(self):
expected_length = len(self.data[0]['usage'].values()) \
+ len(self.data[-1]['usage'].values())
group_filters = {'project_id': self.project_ids[0]}
first_frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
limit=5)
last_frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
offset=5,
limit=1000)
all_frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
paginate=False)
self.assertEqual(expected_length, first_frames['total'])
self.assertEqual(expected_length, last_frames['total'])
real_length = 0
paginated_measures = []
for frame in first_frames['dataframes'] + last_frames['dataframes']:
for measures in frame['usage'].values():
real_length += len(measures)
paginated_measures += measures
paginated_measures = sorted(
paginated_measures, key=lambda x: x['groupby']['id'])
all_measures = []
for frame in all_frames['dataframes']:
for measures in frame['usage'].values():
all_measures += measures
all_measures = sorted(
all_measures, key=lambda x: x['groupby']['id'])
self.assertEqual(expected_length, real_length)
self.assertEqual(paginated_measures, all_measures)

View File

@ -1,72 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
import testtools
from time import sleep
from gnocchiclient import exceptions as gexceptions
from oslo_log import log
from cloudkitty.tests.storage.v2 import base_functional
from cloudkitty.tests.utils import is_functional_test
LOG = log.getLogger(__name__)
@testtools.skipUnless(is_functional_test(), 'Test is not a functional test')
class GnocchiBaseFunctionalStorageTest(
base_functional.BaseFunctionalStorageTest):
storage_backend = 'gnocchi'
storage_version = 2
def setUp(self):
super(GnocchiBaseFunctionalStorageTest, self).setUp()
self.conf.import_group(
'storage_gnocchi', 'cloudkitty.storage.v2.gnocchi')
@classmethod
def _get_status(cls):
status = cls.storage._conn.status.get()
return status['storage']['summary']['measures']
@classmethod
def wait_for_backend(cls):
while True:
status = cls._get_status()
if status == 0:
break
LOG.info('Waiting for gnocchi to have processed all measures, {} '
'left.'.format(status))
sleep(1)
@classmethod
def cleanup_backend(cls):
for res_type in cls.storage._get_ck_resource_types():
batch_query = {">=": {"started_at": "1970-01-01T01:00:00"}}
cls.storage._conn.resource.batch_delete(
batch_query, resource_type=res_type)
try:
cls.storage._conn.resource_type.delete(res_type)
except gexceptions.BadRequest:
pass
try:
cls.storage._conn.archive_policy.delete(
'cloudkitty_archive_policy')
except gexceptions.BadRequest:
pass

View File

@ -323,5 +323,4 @@ class StorageUnitTest(TestCase):
self.assertEqual(expected_length, retrieved_length)
if not test_utils.is_functional_test():
StorageUnitTest.generate_scenarios()
StorageUnitTest.generate_scenarios()

View File

@ -13,14 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
#
import testtools
from cloudkitty.common import config as ck_config
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class ConfigTest(tests.TestCase):
def test_config(self):
ck_config.list_opts()

View File

@ -13,7 +13,6 @@
# under the License.
import sys
import testtools
import textwrap
import ddt
@ -22,10 +21,8 @@ import pep8
from cloudkitty.hacking import checks
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
@ddt.ddt
class HackingTestCase(tests.TestCase):
"""Hacking test cases

View File

@ -17,7 +17,6 @@
#
import copy
import decimal
import testtools
import mock
from oslo_utils import uuidutils
@ -25,7 +24,6 @@ from oslo_utils import uuidutils
from cloudkitty.rating import hash
from cloudkitty.rating.hash.db import api
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
TEST_TS = 1388577600
@ -84,7 +82,6 @@ CK_RESOURCES_DATA = [{
"unit": "instance"}}]}}]
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class HashMapRatingTest(tests.TestCase):
def setUp(self):
super(HashMapRatingTest, self).setUp()

View File

@ -15,7 +15,6 @@
#
# @author: Stéphane Albert
#
import testtools
import unittest
import mock
@ -23,7 +22,6 @@ from oslo_utils import uuidutils
from cloudkitty.fetcher import keystone
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
class FakeRole(object):
@ -69,7 +67,6 @@ def Client(**kwargs):
return FakeKeystoneClient(**kwargs)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class KeystoneFetcherTest(tests.TestCase):
def setUp(self):
super(KeystoneFetcherTest, self).setUp()

View File

@ -15,15 +15,12 @@
#
# @author: Stéphane Albert
#
import testtools
import mock
from oslo_messaging import conffixture
from stevedore import extension
from cloudkitty import orchestrator
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
class FakeKeystoneClient(object):
@ -36,7 +33,6 @@ class FakeKeystoneClient(object):
tenants = FakeTenants()
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class OrchestratorTest(tests.TestCase):
def setUp(self):
super(OrchestratorTest, self).setUp()

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import testtools
from oslo_config import cfg
from oslo_config import fixture as config_fixture
@ -22,14 +21,12 @@ from oslo_policy import policy as oslo_policy
from cloudkitty.common import policy
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import utils
CONF = cfg.CONF
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PolicyFileTestCase(tests.TestCase):
def setUp(self):
@ -61,7 +58,6 @@ class PolicyFileTestCase(tests.TestCase):
self.context, action, self.target)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PolicyTestCase(tests.TestCase):
def setUp(self):

View File

@ -18,7 +18,6 @@
import copy
import decimal
import hashlib
import testtools
import zlib
import mock
@ -28,7 +27,6 @@ import six
from cloudkitty.rating import pyscripts
from cloudkitty.rating.pyscripts.db import api
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
FAKE_UUID = '6c1b8a30-797f-4b7e-ad66-9879b79059fb'
@ -106,7 +104,6 @@ for period in data:
""".encode('utf-8')
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PyScriptsRatingTest(tests.TestCase):
def setUp(self):
super(PyScriptsRatingTest, self).setUp()

View File

@ -15,13 +15,10 @@
#
# @author: Stéphane Albert
#
import testtools
import mock
from cloudkitty.db import api as ck_db_api
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
class FakeRPCClient(object):
@ -42,7 +39,6 @@ class FakeRPCClient(object):
self._queue.append(cast_data)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class RatingTest(tests.TestCase):
def setUp(self):
super(RatingTest, self).setUp()

View File

@ -16,14 +16,11 @@
# @author: Gauvain Pocentek
#
import datetime
import testtools
from cloudkitty import state
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class DBStateManagerTest(tests.TestCase):
def setUp(self):
super(DBStateManagerTest, self).setUp()

View File

@ -19,13 +19,11 @@ import datetime
import decimal
import fractions
import itertools
import testtools
import unittest
import mock
from oslo_utils import timeutils
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import utils as ck_utils
@ -33,7 +31,6 @@ def iso2dt(iso_str):
return timeutils.parse_isotime(iso_str)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class UtilsTimeCalculationsTest(unittest.TestCase):
def setUp(self):
self.date_ts = 1416219015
@ -144,7 +141,6 @@ class UtilsTimeCalculationsTest(unittest.TestCase):
self.assertEqual(calc_dt, check_dt)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class ConvertUnitTest(unittest.TestCase):
"""Class testing the convert_unit and num2decimal function"""
possible_args = [

View File

@ -16,12 +16,10 @@
# @author: Stéphane Albert
#
import copy
import testtools
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests import transformers as t_transformers
from cloudkitty.tests.utils import is_functional_test
TRANS_METADATA = {
'availability_zone': 'nova',
@ -32,7 +30,6 @@ TRANS_METADATA = {
'vcpus': '1'}
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class TransformerBaseTest(tests.TestCase):
def test_strip_resource_on_dict(self):
metadata = copy.deepcopy(samples.COMPUTE_METADATA)

View File

@ -17,7 +17,6 @@
#
import copy
from datetime import datetime
from os import getenv
import random
from oslo_utils import uuidutils
@ -26,10 +25,6 @@ from cloudkitty.tests import samples
from cloudkitty import utils as ck_utils
def is_functional_test():
return getenv('TEST_FUNCTIONAL', False)
def generate_v2_storage_data(min_length=10,
nb_projects=2,
project_ids=None,

View File

@ -14,49 +14,3 @@ implement the following abstract class:
You'll then need to register an entrypoint corresponding to your storage
backend in the ``cloudkitty.storage.v2.backends`` section of the ``setup.cfg``
file.
Testing
=======
There is a generic test class for v2 storage backends. It allows to run a
functional test suite against a new v2 storage backend.
.. code:: shell
$ tree cloudkitty/tests/storage/v2
cloudkitty/tests/storage/v2
├── base_functional.py
├── __init__.py
└── test_gnocchi_functional.py
In order to use the class, add a file called ``test_mybackend_functional.py``
to the ``cloudkitty/tests/storage/v2`` directory. You will then need to write a
class inheriting from ``BaseFunctionalStorageTest``. Specify the storage version
and the backend name as class attributes
Example:
.. code:: python
import testtools
from cloudkitty.tests.storage.v2 import base_functional
from cloudkitty.tests.utils import is_functional_test
@testtools.skipUnless(is_functional_test(), 'Test is not a functional test')
class GnocchiBaseFunctionalStorageTest(
base_functional.BaseFunctionalStorageTest):
storage_backend = 'gnocchi'
storage_version = 2
Two methods need to be implemented:
* ``wait_for_backend``. This method is called once data has been once
dataframes have been pushed to the storage backend (in gnocchi's case, it
waits for all measures to have been processed). It is a classmethod.
* ``cleanup_backend``: This method is called at the end of the test suite in
order to delete all data from the storage backend. It is a classmethod.

View File

@ -0,0 +1,5 @@
---
deprecations:
- |
The gnocchi v2 storage backend has been removed. Users wanting to use the
v2 storage interface must use the InfluxDB backend.

View File

@ -67,7 +67,6 @@ cloudkitty.storage.v1.backends =
hybrid = cloudkitty.storage.v1.hybrid:HybridStorage
cloudkitty.storage.v2.backends =
gnocchi = cloudkitty.storage.v2.gnocchi:GnocchiStorage
influxdb = cloudkitty.storage.v2.influx:InfluxStorage
cloudkitty.storage.hybrid.backends =

View File

@ -71,10 +71,3 @@ local-check-factory = cloudkitty.hacking.checks.factory
[testenv:releasenotes]
basepython = python3
commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:functional]
basepython = python3
setenv = TEST_FUNCTIONAL = 1
# Some tests do push and remove data from the storage backend, so this is done
# in order to keep data consistency
commands = stestr run --concurrency 1 {posargs}