Adding a v2 storage backend

This adds a v2 storage interface as well as a gnocchi backend for it to
cloudkitty. This new interface will allow us to implement the v2 API, which will
be more metric-oriented.

This new storage uses the new dataframe format ('desc' is now split up into
'groupby' and 'metadata', following the collect logic); and supports grouping,
filters and pagination.

Modifications:

  * The tenant/scope state management isn't handled by the storage backend
    anymore. A StateManager object has been added.

  * All internal interactions with the storage backend use the v2 interface.
    An adapter class has been added to ensure retrocompatibility with the
    v1 interface.

  * A base class for functional tests testing the v2 storage interface has been
    added. It contains generic tests, which should allow easier testing for new
    v2 backends.

  * Some developer documentation explaining how to implement a v2 storage
    backend has been added. User documentation will be updated once the v2
    storage backend is considered stable.

The v1 is still the default storage version. Note that this new version is
considered unstable and should only be used for upstream development.

Change-Id: I5e9b95c79292582fab3017289d35ee310e35ffea
Story: 2001372
Task: 6585
Task: 14361
Task: 24535
This commit is contained in:
Luka Peschke 2018-08-10 13:44:05 +02:00
parent b4a918186d
commit 3ab4774f66
90 changed files with 2842 additions and 1015 deletions

View File

@ -68,6 +68,7 @@ def setup_app(pecan_config=None, extra_hooks=None):
app_conf = get_pecan_config()
storage_backend = storage.get_storage()
app_hooks = [
hooks.RPCHook(),
hooks.StorageHook(storage_backend),

View File

@ -28,10 +28,13 @@ from cloudkitty.api.v1.datamodels import report as report_models
from cloudkitty.common import policy
from cloudkitty import utils as ck_utils
LOG = logging.getLogger(__name__)
class InvalidFilter(Exception):
"""Exception raised when a storage filter is invalid"""
class ReportController(rest.RestController):
"""REST Controller managing the reporting.
@ -91,11 +94,17 @@ class ReportController(rest.RestController):
# FIXME(sheeprine): We should filter on user id.
# Use keystone token information by default but make it overridable and
# enforce it by policy engine
total = storage.get_total(begin, end, tenant_id, service)
groupby = ['project_id']
group_filters = {'project_id': tenant_id} if tenant_id else None
total_resources = storage.total(
groupby=groupby,
begin=begin, end=end,
metric_types=service,
group_filters=group_filters)
# TODO(Aaron): `get_total` return a list of dict,
# Get value of rate from index[0]
total = total[0].get('rate', decimal.Decimal('0'))
total = sum(total['rate'] for total in total_resources)
return total if total else decimal.Decimal('0')
@wsme_pecan.wsexpose(report_models.SummaryCollectionModel,
@ -124,11 +133,28 @@ class ReportController(rest.RestController):
{"tenant_id": tenant_id})
storage = pecan.request.storage_backend
storage_groupby = []
if groupby is not None and 'tenant_id' in groupby:
storage_groupby.append('project_id')
if groupby is not None and 'res_type' in groupby:
storage_groupby.append('type')
group_filters = {'project_id': tenant_id} if tenant_id else None
results = storage.total(
groupby=storage_groupby,
begin=begin, end=end,
metric_types=service,
group_filters=group_filters)
summarymodels = []
results = storage.get_total(begin, end, tenant_id, service,
groupby=groupby)
for result in results:
summarymodel = report_models.SummaryModel(**result)
for res in results:
kwargs = {
'res_type': res.get('type') or res.get('res_type'),
'tenant_id': res.get('project_id') or res.get('tenant_id'),
'begin': res['begin'],
'end': res['end'],
'rate': res['rate'],
}
summarymodel = report_models.SummaryModel(**kwargs)
summarymodels.append(summarymodel)
return report_models.SummaryCollectionModel(summary=summarymodels)

View File

@ -25,7 +25,7 @@ import wsmeext.pecan as wsme_pecan
from cloudkitty.api.v1.datamodels import storage as storage_models
from cloudkitty.common import policy
from cloudkitty import storage as ck_storage
from cloudkitty import storage
from cloudkitty import utils as ck_utils
@ -50,42 +50,48 @@ class DataFramesController(rest.RestController):
policy.authorize(pecan.request.context, 'storage:list_data_frames', {})
if not begin:
begin = ck_utils.get_month_start()
if not end:
end = ck_utils.get_next_month()
begin_ts = ck_utils.dt2ts(begin)
end_ts = ck_utils.dt2ts(end)
backend = pecan.request.storage_backend
dataframes = []
group_filters = {'project_id': tenant_id} if tenant_id else None
if begin:
begin = ck_utils.dt2ts(begin)
if end:
end = ck_utils.dt2ts(end)
try:
frames = backend.get_time_frame(begin_ts,
end_ts,
tenant_id=tenant_id,
res_type=resource_type)
for frame in frames:
for service, data_list in frame['usage'].items():
frame_tenant = None
resources = []
for data in data_list:
desc = data['desc'] if data['desc'] else {}
price = decimal.Decimal(str(data['rating']['price']))
resource = storage_models.RatedResource(
service=service,
desc=desc,
volume=data['vol']['qty'],
rating=price)
frame_tenant = data['tenant_id']
resources.append(resource)
dataframe = storage_models.DataFrame(
begin=ck_utils.iso2dt(frame['period']['begin']),
end=ck_utils.iso2dt(frame['period']['end']),
tenant_id=frame_tenant,
resources=resources)
dataframes.append(dataframe)
except ck_storage.NoTimeFrame:
pass
resp = backend.retrieve(
begin, end,
group_filters=group_filters,
metric_types=resource_type,
paginate=False)
except storage.NoTimeFrame:
return storage_models.DataFrameCollection(dataframes=[])
for frame in resp['dataframes']:
for service, data_list in frame['usage'].items():
frame_tenant = None
resources = []
for data in data_list:
# This means we use a v1 storage backend
if 'desc' in data.keys():
desc = data['desc']
else:
desc = data['metadata'].copy()
desc.update(data.get('groupby', {}))
price = decimal.Decimal(str(data['rating']['price']))
resource = storage_models.RatedResource(
service=service,
desc=desc,
volume=data['vol']['qty'],
rating=price)
if frame_tenant is None:
frame_tenant = data['scope_id']
resources.append(resource)
dataframe = storage_models.DataFrame(
begin=ck_utils.iso2dt(frame['period']['begin']),
end=ck_utils.iso2dt(frame['period']['end']),
tenant_id=frame_tenant,
resources=resources)
dataframes.append(dataframe)
return storage_models.DataFrameCollection(dataframes=dataframes)

View File

@ -22,14 +22,11 @@ from oslo_log import log
from wsme import types as wtypes
from cloudkitty.api.v1 import types as cktypes
from cloudkitty import utils as ck_utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
METRICS_CONF = ck_utils.load_conf(CONF.collect.metrics_conf)
class CloudkittyResource(wtypes.Base):
"""Type describing a resource in CloudKitty.

View File

@ -17,11 +17,14 @@
#
from cloudkitty import service
from cloudkitty import storage
from cloudkitty import storage_state
def init_storage_backend():
backend = storage.get_storage()
backend.init()
state_manager = storage_state.StateManager()
state_manager.init()
def main():

View File

@ -94,9 +94,6 @@ class GnocchiCollector(collector.BaseCollector):
def __init__(self, transformers, **kwargs):
super(GnocchiCollector, self).__init__(transformers, **kwargs)
self.t_gnocchi = self.transformers['GnocchiTransformer']
self.t_cloudkitty = self.transformers['CloudKittyFormatTransformer']
adapter_options = {'connect_retries': 3}
if CONF.gnocchi_collector.gnocchi_auth_type == 'keystone':
auth_plugin = ks_loading.load_auth_from_conf_options(
@ -110,6 +107,7 @@ class GnocchiCollector(collector.BaseCollector):
endpoint=CONF.gnocchi_collector.gnocchi_endpoint,
)
adapter_options['region_name'] = CONF.gnocchi_collector.region_name
self._conn = gclient.Client(
'1',
session_options={'auth': auth_plugin},
@ -216,8 +214,6 @@ class GnocchiCollector(collector.BaseCollector):
resource_type = extra_args['resource_type']
query_parameters.append(
self.gen_filter(cop="=", type=resource_type))
if project_id:
kwargs = {extra_args['scope_key']: project_id}
query_parameters.append(self.gen_filter(**kwargs))
@ -294,12 +290,11 @@ class GnocchiCollector(collector.BaseCollector):
# if resource info is provided, add additional
# metadata as defined in the conf
metadata = dict()
if resources_info:
if resources_info is not None:
resource = resources_info[
groupby[metconf['extra_args']['resource_key']]]
for i in metconf['metadata']:
metadata[i] = resource.get(i, '')
qty = data['measures']['measures']['aggregated'][0][2]
converted_qty = ck_utils.convert_unit(
qty, metconf['factor'], metconf['offset'])
@ -328,7 +323,6 @@ class GnocchiCollector(collector.BaseCollector):
project_id=project_id,
q_filter=q_filter
)
formated_resources = list()
for d in data:
# Only if aggregates have been found

View File

@ -26,7 +26,8 @@ import cloudkitty.fetcher.source
import cloudkitty.orchestrator
import cloudkitty.service
import cloudkitty.storage
import cloudkitty.storage.hybrid.backends.gnocchi
import cloudkitty.storage.v1.hybrid.backends.gnocchi
import cloudkitty.storage.v2.gnocchi
import cloudkitty.utils
__all__ = ['list_opts']
@ -56,9 +57,9 @@ _opts = [
('storage', list(itertools.chain(
cloudkitty.storage.storage_opts))),
('storage_gnocchi', list(itertools.chain(
cloudkitty.storage.hybrid.backends.gnocchi.gnocchi_storage_opts))),
('fetcher', list(itertools.chain(
cloudkitty.fetcher.fetchers_opts))),
cloudkitty.storage.v1.hybrid.backends.gnocchi.gnocchi_storage_opts))),
('storage_gnocchi', list(itertools.chain(
cloudkitty.storage.v2.gnocchi.gnocchi_storage_opts))),
(None, list(itertools.chain(
cloudkitty.api.app.auth_opts,
cloudkitty.service.service_opts))),

View File

@ -16,9 +16,9 @@
# @author: Stéphane Albert
#
import abc
import six
from oslo_config import cfg
import six
FETCHER_OPTS = 'fetcher'
DEPRECATED_FETCHER_OPTS = 'tenant_fetcher'

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -33,6 +32,7 @@ from cloudkitty import config # noqa
from cloudkitty import extension_manager
from cloudkitty import messaging
from cloudkitty import storage
from cloudkitty import storage_state as state
from cloudkitty import transformer
from cloudkitty import utils as ck_utils
@ -159,6 +159,7 @@ class Worker(BaseWorker):
self._wait_time = CONF.collect.wait_periods * self._period
self._tenant_id = tenant_id
self._conf = ck_utils.load_conf(CONF.collect.metrics_conf)
self._state = state.StateManager()
super(Worker, self).__init__(self._tenant_id)
@ -178,7 +179,7 @@ class Worker(BaseWorker):
'usage': raw_data}]
def check_state(self):
timestamp = self._storage.get_state(self._tenant_id)
timestamp = self._state.get_state(self._tenant_id)
return ck_utils.check_time_state(timestamp,
self._period,
CONF.collect.wait_periods)
@ -191,6 +192,7 @@ class Worker(BaseWorker):
metrics = list(self._conf['metrics'].keys())
storage_data = []
for metric in metrics:
try:
try:
@ -204,20 +206,24 @@ class Worker(BaseWorker):
{'metric': metric, 'error': e})
raise collector.NoDataCollected('', metric)
except collector.NoDataCollected:
begin = timestamp
end = begin + self._period
for processor in self._processors:
processor.obj.nodata(begin, end)
self._storage.nodata(begin, end, self._tenant_id)
LOG.info(
'No data collected for metric {} '
'at timestamp {}'.format(
metric, ck_utils.ts2dt(timestamp))
)
else:
# Rating
for processor in self._processors:
processor.obj.process(data)
# Writing
self._storage.append(data, self._tenant_id)
if isinstance(data, list):
storage_data += data
else:
storage_data.append(data)
# We're getting a full period so we directly commit
self._storage.commit(self._tenant_id)
self._storage.push(storage_data, self._tenant_id)
self._state.set_state(self._tenant_id, timestamp)
class Orchestrator(object):
@ -231,6 +237,7 @@ class Orchestrator(object):
transformers = transformer.get_transformers()
self.collector = collector.get_collector(transformers)
self.storage = storage.get_storage()
self._state = state.StateManager()
# RPC
self.server = None
@ -258,7 +265,7 @@ class Orchestrator(object):
self.server.start()
def _check_state(self, tenant_id):
timestamp = self.storage.get_state(tenant_id)
timestamp = self._state.get_state(tenant_id)
return ck_utils.check_time_state(timestamp,
CONF.collect.period,
CONF.collect.wait_periods)
@ -288,10 +295,10 @@ class Orchestrator(object):
tenant_id,
)
worker.run()
lock.release()
self.coord.heartbeat()
# NOTE(sheeprine): Slow down looping if all tenants are
# being processed
eventlet.sleep(1)

View File

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -13,48 +13,32 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import abc
from oslo_config import cfg
from oslo_log import log as logging
import six
from stevedore import driver
from cloudkitty import utils as ck_utils
from cloudkitty.storage import v2 as storage_v2
LOG = logging.getLogger(__name__)
storage_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
help='Name of the storage backend driver.')
help='Name of the storage backend driver.'),
cfg.IntOpt('version',
min=1, max=2,
default=1,
help='Storage version to use.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('period', 'cloudkitty.collector', 'collect')
CONF.register_opts(storage_opts, 'storage')
STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
def get_storage(**kwargs):
storage_args = {
'period': CONF.collect.period,
}
storage_args.update(kwargs)
backend = driver.DriverManager(
STORAGES_NAMESPACE,
cfg.CONF.storage.backend,
invoke_on_load=True,
invoke_kwds=storage_args
).driver
return backend
class NoTimeFrame(Exception):
"""Raised when there is no time frame available."""
@ -64,193 +48,113 @@ class NoTimeFrame(Exception):
"No time frame available")
@six.add_metaclass(abc.ABCMeta)
class BaseStorage(object):
"""Base Storage class:
def _get_storage_instance(storage_args, storage_namespace, backend=None):
backend = backend or cfg.CONF.storage.backend
return driver.DriverManager(
storage_namespace,
backend,
invoke_on_load=True,
invoke_kwds=storage_args
).driver
Handle incoming data from the global orchestrator, and store them.
"""
def __init__(self, **kwargs):
self._period = kwargs.get('period')
self._collector = kwargs.get('collector')
# State vars
self.usage_start = {}
self.usage_start_dt = {}
self.usage_end = {}
self.usage_end_dt = {}
self._has_data = {}
class V1StorageAdapter(storage_v2.BaseStorage):
def __init__(self, storage_args, storage_namespace, backend=None):
self.storage = _get_storage_instance(
storage_args, storage_namespace, backend=backend)
def init(self):
return self.storage.init()
def push(self, dataframes, scope_id):
if dataframes:
self.storage.append(dataframes, scope_id)
self.storage.commit(scope_id)
@staticmethod
def init():
"""Initialize storage backend.
def _check_metric_types(metric_types):
if isinstance(metric_types, list):
return metric_types[0]
return metric_types
Can be used to create DB schema on first start.
"""
pass
def retrieve(self, begin=None, end=None,
filters=None, group_filters=None,
metric_types=None,
offset=0, limit=100, paginate=True):
tenant_id = group_filters.get('project_id') if group_filters else None
metric_types = self._check_metric_types(metric_types)
frames = self.storage.get_time_frame(
begin, end,
res_type=metric_types,
tenant_id=tenant_id)
def _filter_period(self, json_data):
"""Detect the best usage period to extract.
for frame in frames:
for _, data_list in frame['usage'].items():
for data in data_list:
data['scope_id'] = (data.get('project_id')
or data.get('tenant_id'))
Removes the usage from the json data and returns it.
:param json_data: Data to filter.
"""
candidate_ts = None
candidate_idx = 0
return {
'total': len(frames),
'dataframes': frames,
}
for idx, usage in enumerate(json_data):
usage_ts = usage['period']['begin']
if candidate_ts is None or usage_ts < candidate_ts:
candidate_ts = usage_ts
candidate_idx = idx
def total(self, groupby=None,
begin=None, end=None,
metric_types=None,
filters=None, group_filters=None):
tenant_id = group_filters.get('project_id') if group_filters else None
if candidate_ts:
return candidate_ts, json_data.pop(candidate_idx)['usage']
storage_gby = []
if groupby:
for elem in set(groupby):
if elem == 'type':
storage_gby.append('res_type')
elif elem == 'project_id':
storage_gby.append('tenant_id')
storage_gby = ','.join(storage_gby) if storage_gby else None
metric_types = self._check_metric_types(metric_types)
total = self.storage.get_total(
begin, end,
tenant_id=tenant_id,
service=metric_types,
groupby=storage_gby)
def _pre_commit(self, tenant_id):
"""Called before every commit.
for t in total:
if t.get('tenant_id') is None:
t['tenant_id'] = tenant_id
if t.get('rate') is None:
t['rate'] = float(0)
if groupby and 'type' in groupby:
t['type'] = t.get('res_type')
else:
t['type'] = None
return total
:param tenant_id: tenant_id which information must be committed.
"""
@abc.abstractmethod
def _commit(self, tenant_id):
"""Push data to the storage backend.
:param tenant_id: tenant_id which information must be committed.
"""
def _post_commit(self, tenant_id):
"""Called after every commit.
:param tenant_id: tenant_id which information must be committed.
"""
if tenant_id in self._has_data:
del self._has_data[tenant_id]
self._clear_usage_info(tenant_id)
@abc.abstractmethod
def _dispatch(self, data, tenant_id):
"""Process rated data.
:param data: The rated data frames.
:param tenant_id: tenant_id which data must be dispatched to.
"""
def _update_start(self, begin, tenant_id):
"""Update usage_start with a new timestamp.
:param begin: New usage beginning timestamp.
:param tenant_id: tenant_id to update.
"""
self.usage_start[tenant_id] = begin
self.usage_start_dt[tenant_id] = ck_utils.ts2dt(begin)
def _update_end(self, end, tenant_id):
"""Update usage_end with a new timestamp.
:param end: New usage end timestamp.
:param tenant_id: tenant_id to update.
"""
self.usage_end[tenant_id] = end
self.usage_end_dt[tenant_id] = ck_utils.ts2dt(end)
def _clear_usage_info(self, tenant_id):
"""Clear usage information timestamps.
:param tenant_id: tenant_id which information needs to be removed.
"""
self.usage_start.pop(tenant_id, None)
self.usage_start_dt.pop(tenant_id, None)
self.usage_end.pop(tenant_id, None)
self.usage_end_dt.pop(tenant_id, None)
def _check_commit(self, usage_start, tenant_id):
"""Check if the period for a given tenant must be committed.
:param usage_start: Start of the period.
:param tenant_id: tenant_id to check for.
"""
usage_end = self.usage_end.get(tenant_id)
if usage_end is not None and usage_start >= usage_end:
self.commit(tenant_id)
if self.usage_start.get(tenant_id) is None:
self._update_start(usage_start, tenant_id)
self._update_end(usage_start + self._period, tenant_id)
@abc.abstractmethod
def get_state(self, tenant_id=None):
"""Return the last written frame's timestamp.
:param tenant_id: tenant_id to filter on.
"""
@abc.abstractmethod
def get_total(self, begin=None, end=None, tenant_id=None,
service=None, groupby=None):
"""Return the current total.
:param begin: When to start filtering.
:type begin: datetime.datetime
:param end: When to stop filtering.
:type end: datetime.datetime
:param tenant_id: Filter on the tenant_id.
:type res_type: str
:param service: Filter on the resource type.
:type service: str
:param groupby: Fields to group by, separated by commas if multiple.
:type groupby: str
"""
@abc.abstractmethod
def get_tenants(self, begin, end):
"""Return the list of rated tenants.
tenants = self.storage.get_tenants(begin, end)
return tenants
:param begin: When to start filtering.
:type begin: datetime.datetime
:param end: When to stop filtering.
:type end: datetime.datetime
"""
def get_state(self, tenant_id=None):
return self.storage.get_state(tenant_id)
@abc.abstractmethod
def get_time_frame(self, begin, end, **filters):
"""Request a time frame from the storage backend.
:param begin: When to start filtering.
:type begin: datetime.datetime
:param end: When to stop filtering.
:type end: datetime.datetime
:param res_type: (Optional) Filter on the resource type.
:type res_type: str
:param tenant_id: (Optional) Filter on the tenant_id.
:type res_type: str
"""
def get_storage(**kwargs):
storage_args = {
'period': CONF.collect.period,
}
backend = kwargs.pop('backend', None)
storage_args.update(kwargs)
def append(self, raw_data, tenant_id):
"""Append rated data before committing them to the backend.
version = kwargs.pop('version', None) or cfg.CONF.storage.version
if int(version) > 1:
LOG.warning('V2 Storage is not considered stable and should not be '
'used in production')
storage_namespace = 'cloudkitty.storage.v{}.backends'.format(version)
:param raw_data: The rated data frames.
:param tenant_id: Tenant the frame is belonging to.
"""
while raw_data:
usage_start, data = self._filter_period(raw_data)
self._check_commit(usage_start, tenant_id)
self._dispatch(data, tenant_id)
def nodata(self, begin, end, tenant_id):
"""Append a no data frame to the storage backend.
:param begin: Begin of the period with no data.
:param end: End of the period with no data.
:param tenant_id: Tenant to update with no data marker for the period.
"""
self._check_commit(begin, tenant_id)
def commit(self, tenant_id):
"""Commit the changes to the backend.
:param tenant_id: Tenant the changes belong to.
"""
self._pre_commit(tenant_id)
self._commit(tenant_id)
self._post_commit(tenant_id)
if version == 1:
return V1StorageAdapter(
storage_args, storage_namespace, backend=backend)
return _get_storage_instance(
storage_args, storage_namespace, backend=backend)

View File

@ -0,0 +1,222 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import abc
from oslo_config import cfg
from oslo_log import log as logging
import six
from cloudkitty import utils as ck_utils
# from cloudkitty.storage import NoTimeFrame
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@six.add_metaclass(abc.ABCMeta)
class BaseStorage(object):
"""Base Storage class:
Handle incoming data from the global orchestrator, and store them.
"""
def __init__(self, **kwargs):
self._period = kwargs.get('period')
self._collector = kwargs.get('collector')
# State vars
self.usage_start = {}
self.usage_start_dt = {}
self.usage_end = {}
self.usage_end_dt = {}
self._has_data = {}
@staticmethod
def init():
"""Initialize storage backend.
Can be used to create DB schema on first start.
"""
pass
def _filter_period(self, json_data):
"""Detect the best usage period to extract.
Removes the usage from the json data and returns it.
:param json_data: Data to filter.
"""
candidate_ts = None
candidate_idx = 0
for idx, usage in enumerate(json_data):
usage_ts = usage['period']['begin']
if candidate_ts is None or usage_ts < candidate_ts:
candidate_ts = usage_ts
candidate_idx = idx
if candidate_ts:
return candidate_ts, json_data.pop(candidate_idx)['usage']
def _pre_commit(self, tenant_id):
"""Called before every commit.
:param tenant_id: tenant_id which information must be committed.
"""
@abc.abstractmethod
def _commit(self, tenant_id):
"""Push data to the storage backend.
:param tenant_id: tenant_id which information must be committed.
"""
def _post_commit(self, tenant_id):
"""Called after every commit.
:param tenant_id: tenant_id which information must be committed.
"""
if tenant_id in self._has_data:
del self._has_data[tenant_id]
self._clear_usage_info(tenant_id)
@abc.abstractmethod
def _dispatch(self, data, tenant_id):
"""Process rated data.
:param data: The rated data frames.
:param tenant_id: tenant_id which data must be dispatched to.
"""
def _update_start(self, begin, tenant_id):
"""Update usage_start with a new timestamp.
:param begin: New usage beginning timestamp.
:param tenant_id: tenant_id to update.
"""
self.usage_start[tenant_id] = begin
self.usage_start_dt[tenant_id] = ck_utils.ts2dt(begin)
def _update_end(self, end, tenant_id):
"""Update usage_end with a new timestamp.
:param end: New usage end timestamp.
:param tenant_id: tenant_id to update.
"""
self.usage_end[tenant_id] = end
self.usage_end_dt[tenant_id] = ck_utils.ts2dt(end)
def _clear_usage_info(self, tenant_id):
"""Clear usage information timestamps.
:param tenant_id: tenant_id which information needs to be removed.
"""
self.usage_start.pop(tenant_id, None)
self.usage_start_dt.pop(tenant_id, None)
self.usage_end.pop(tenant_id, None)
self.usage_end_dt.pop(tenant_id, None)
def _check_commit(self, usage_start, tenant_id):
"""Check if the period for a given tenant must be committed.
:param usage_start: Start of the period.
:param tenant_id: tenant_id to check for.
"""
usage_end = self.usage_end.get(tenant_id)
if usage_end is not None and usage_start >= usage_end:
self.commit(tenant_id)
if self.usage_start.get(tenant_id) is None:
self._update_start(usage_start, tenant_id)
self._update_end(usage_start + self._period, tenant_id)
@abc.abstractmethod
def get_state(self, tenant_id=None):
"""Return the last written frame's timestamp.
:param tenant_id: tenant_id to filter on.
"""
@abc.abstractmethod
def get_total(self, begin=None, end=None, tenant_id=None,
service=None, groupby=None):
"""Return the current total.
:param begin: When to start filtering.
:type begin: datetime.datetime
:param end: When to stop filtering.
:type end: datetime.datetime
:param tenant_id: Filter on the tenant_id.
:type res_type: str
:param service: Filter on the resource type.
:type service: str
:param groupby: Fields to group by, separated by commas if multiple.
:type groupby: str
"""
@abc.abstractmethod
def get_tenants(self, begin, end):
"""Return the list of rated tenants.
:param begin: When to start filtering.
:type begin: datetime.datetime
:param end: When to stop filtering.
:type end: datetime.datetime
"""
@abc.abstractmethod
def get_time_frame(self, begin, end, **filters):
"""Request a time frame from the storage backend.
:param begin: When to start filtering.
:type begin: datetime.datetime
:param end: When to stop filtering.
:type end: datetime.datetime
:param res_type: (Optional) Filter on the resource type.
:type res_type: str
:param tenant_id: (Optional) Filter on the tenant_id.
:type res_type: str
"""
def append(self, raw_data, tenant_id):
"""Append rated data before committing them to the backend.
:param raw_data: The rated data frames.
:param tenant_id: Tenant the frame is belonging to.
"""
while raw_data:
usage_start, data = self._filter_period(raw_data)
self._check_commit(usage_start, tenant_id)
self._dispatch(data, tenant_id)
def nodata(self, begin, end, tenant_id):
"""Append a no data frame to the storage backend.
:param begin: Begin of the period with no data.
:param end: End of the period with no data.
:param tenant_id: Tenant to update with no data marker for the period.
"""
self._check_commit(begin, tenant_id)
def commit(self, tenant_id):
"""Commit the changes to the backend.
:param tenant_id: Tenant the changes belong to.
"""
self._pre_commit(tenant_id)
self._commit(tenant_id)
self._post_commit(tenant_id)

View File

@ -20,9 +20,9 @@ from oslo_db.sqlalchemy import utils
from stevedore import driver
from cloudkitty import db
from cloudkitty import storage
from cloudkitty.storage.hybrid import migration
from cloudkitty.storage.hybrid import models
from cloudkitty.storage.v1 import BaseStorage
from cloudkitty.storage.v1.hybrid import migration
from cloudkitty.storage.v1.hybrid import models
from cloudkitty import utils as ck_utils
@ -40,7 +40,7 @@ CONF.register_opts(storage_opts, group='hybrid_storage')
HYBRID_BACKENDS_NAMESPACE = 'cloudkitty.storage.hybrid.backends'
class HybridStorage(storage.BaseStorage):
class HybridStorage(BaseStorage):
"""Hybrid Storage Backend.
Stores dataframes in one of the available backends and other informations

View File

@ -16,7 +16,7 @@
# @author: Luka Peschke
#
from cloudkitty.common.db.alembic import env # noqa
from cloudkitty.storage.hybrid import models
from cloudkitty.storage.v1.hybrid import models
target_metadata = models.Base.metadata
version_table = 'storage_hybrid_alembic'

View File

@ -28,7 +28,7 @@ from oslo_utils import uuidutils
import six
from cloudkitty.collector import validate_conf
from cloudkitty.storage.hybrid.backends import BaseHybridBackend
from cloudkitty.storage.v1.hybrid.backends import BaseHybridBackend
import cloudkitty.utils as ck_utils

View File

@ -22,9 +22,10 @@ from oslo_db.sqlalchemy import utils
import sqlalchemy
from cloudkitty import db
from cloudkitty import storage
from cloudkitty.storage.sqlalchemy import migration
from cloudkitty.storage.sqlalchemy import models
from cloudkitty.storage import NoTimeFrame
from cloudkitty.storage import v1 as storage
from cloudkitty.storage.v1.sqlalchemy import migration
from cloudkitty.storage.v1.sqlalchemy import models
from cloudkitty import utils as ck_utils
@ -98,11 +99,11 @@ class SQLAlchemyStorage(storage.BaseStorage):
sqlalchemy.func.sum(self.frame_model.rate).label('rate')
]
# Boundary calculation
if not begin:
begin = ck_utils.get_month_start()
begin = ck_utils.get_month_start_timestamp()
if not end:
end = ck_utils.get_next_month()
end = ck_utils.get_next_month_timestamp()
# Boundary calculation
if tenant_id:
querymodels.append(self.frame_model.tenant_id)
if service:
@ -137,6 +138,7 @@ class SQLAlchemyStorage(storage.BaseStorage):
total["begin"] = begin
total["end"] = end
totallist.append(total)
return totallist
def get_tenants(self, begin, end):
@ -152,6 +154,10 @@ class SQLAlchemyStorage(storage.BaseStorage):
return [tenant.tenant_id for tenant in tenants]
def get_time_frame(self, begin, end, **filters):
if not begin:
begin = ck_utils.get_month_start_timestamp()
if not end:
end = ck_utils.get_next_month_timestamp()
session = db.get_session()
q = utils.model_query(
self.frame_model,
@ -167,7 +173,7 @@ class SQLAlchemyStorage(storage.BaseStorage):
q = q.filter(self.frame_model.res_type != '_NO_DATA_')
count = q.count()
if not count:
raise storage.NoTimeFrame()
raise NoTimeFrame()
r = q.all()
return [entry.to_cloudkitty(self._collector) for entry in r]

View File

@ -16,7 +16,7 @@
# @author: Stéphane Albert
#
from cloudkitty.common.db.alembic import env # noqa
from cloudkitty.storage.sqlalchemy import models
from cloudkitty.storage.v1.sqlalchemy import models
target_metadata = models.Base.metadata
version_table = 'storage_sqlalchemy_alembic'

View File

@ -0,0 +1,170 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
import abc
import datetime
from oslo_config import cfg
import six
from cloudkitty import storage_state
storage_opts = [
cfg.IntOpt(
'retention_period',
default=2400,
help='Duration after which data should be cleaned up/aggregated. '
'Duration is given in hours. Defaults to 2400 (100 days)'
),
]
CONF = cfg.CONF
CONF.register_opts(storage_opts, 'storage')
@six.add_metaclass(abc.ABCMeta)
class BaseStorage(object):
"""Abstract class for v2 storage objects."""
def __init__(self, *args, **kwargs):
"""Left empty so that child classes don't need to implement this."""
@abc.abstractmethod
def init(self):
"""Called for storage backend initialization"""
@abc.abstractmethod
def push(self, dataframes, scope_id):
"""Pushes dataframes to the storage backend
A dataframe has the following format::
{
"usage": {
"bananas": [ # metric name
{
"vol": {
"unit": "banana",
"qty": 1
},
"rating": {
"price": 1
},
"groupby": {
"xxx_id": "hello",
"yyy_id": "bye",
},
"metadata": {
"flavor": "chocolate",
"eaten_by": "gorilla",
},
}
],
"metric_name2": [...],
}
"period": {
"begin": "1239781290", # timestamp
"end": "1239793490", # timestamp
}
}
:param dataframes: List of dataframes
:type dataframes: list
:param scope_id: ID of the scope (A project ID for example).
:type scope_id: str
"""
@abc.abstractmethod
def retrieve(self, begin=None, end=None,
filters=None, group_filters=None,
metric_types=None,
offset=0, limit=1000, paginate=True):
"""Returns the following dict::
{
'total': int, # total amount of measures found
'dataframes': list of dataframes,
}
:param begin: Start date
:type begin: datetime
:param end: End date
:type end: datetime
:param filters: Metadata to filter on. ex: {'flavor_id': '42'}
:type filters: dict
:param group_filters: Groupby to filter on. ex: {'project_id': '123ab'}
:type group_filters: dict
:param metric_types: Metric type to filter on.
:type metric_types: str or list
:param offset: Offset for pagination
:type offset: int
:param limit: Maximum amount of elements to return
:type limit: int
:param paginate: Defaults to True. If False, all found results
will be returned.
:type limit: int
:rtype: dict
"""
@abc.abstractmethod
def total(self, groupby=None,
begin=None, end=None,
metric_types=None,
filters=None, group_filters=None):
"""Returns a grouped total for given groupby.
:param groupby: Attributes on which to group by. These attributes must
be part of the 'groupby' section for the given metric
type in metrics.yml. In order to group by metric type,
add 'type' to the groupby list.
:type groupby: list of strings
:param begin: Start date
:type begin: datetime
:param end: End date
:type end: datetime
:param filters: Metadata to filter on. ex: {'flavor_id': '42'}
:type filters: dict
:param group_filters: Groupby to filter on. ex: {'project_id': '123ab'}
:type group_filters: dict
:param metric_types: Metric type to filter on.
:type metric_types: str or list
:rtype: list of dicts
returns a list of dicts with the following format::
{
'begin': XXX,
'end': XXX,
'type': XXX,
'rate': XXX,
'groupby1': XXX,
'groupby2': XXX
}
"""
@staticmethod
def get_retention():
"""Returns the retention period defined in the configuration.
:rtype: datetime.timedelta
"""
return datetime.timedelta(hours=CONF.storage.retention_period)
# NOTE(lpeschke): This is only kept for v1 storage backward compatibility
def get_tenants(self, begin=None, end=None):
return storage_state.StateManager().get_tenants(begin, end)

View File

@ -0,0 +1,758 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
from collections import deque
from collections import Iterable
import copy
import datetime
import decimal
import time
from gnocchiclient import auth as gauth
from gnocchiclient import client as gclient
from gnocchiclient import exceptions as gexceptions
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils
import six
from cloudkitty.storage.v2 import BaseStorage
from cloudkitty import utils as ck_utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
gnocchi_storage_opts = [
cfg.StrOpt(
'gnocchi_auth_type',
default='keystone',
choices=['keystone', 'basic'],
help='(v2) Gnocchi auth type (keystone or basic). Keystone '
'credentials can be specified through the "auth_section" parameter',
),
cfg.StrOpt(
'gnocchi_user',
default='',
help='(v2) Gnocchi user (for basic auth only)',
),
cfg.StrOpt(
'gnocchi_endpoint',
default='',
help='(v2) Gnocchi endpoint (for basic auth only)',
),
cfg.StrOpt(
'api_interface',
default='internalURL',
help='(v2) Endpoint URL type (for keystone auth only)',
),
cfg.IntOpt(
'measure_chunk_size',
min=10, max=1000000,
default=500,
help='(v2) Maximum amount of measures to send to gnocchi at once '
'(defaults to 500).',
),
]
CONF.register_opts(gnocchi_storage_opts, 'storage_gnocchi')
ks_loading.register_session_conf_options(CONF, 'storage_gnocchi')
ks_loading.register_auth_conf_options(CONF, 'storage_gnocchi')
RESOURCE_TYPE_NAME_ROOT = 'cloudkitty_metric_'
ARCHIVE_POLICY_NAME = 'cloudkitty_archive_policy'
GROUPBY_NAME_ROOT = 'groupby_attr_'
META_NAME_ROOT = 'meta_attr_'
class GnocchiResource(object):
"""Class representing a gnocchi resource
It provides utils for resource_type/resource creation and identifying.
"""
def __init__(self, name, metric, conn, scope_id):
"""Resource_type name, metric, gnocchiclient"""
self.name = name
self.resource_type = RESOURCE_TYPE_NAME_ROOT + name
self.unit = metric['vol']['unit']
self.groupby = {
k: v if v else '' for k, v in metric['groupby'].items()}
self.groupby['ck_scope_id'] = scope_id
self.metadata = {
k: v if v else '' for k, v in metric['metadata'].items()}
self._trans_groupby = {
GROUPBY_NAME_ROOT + key: val for key, val in self.groupby.items()
}
self._trans_metadata = {
META_NAME_ROOT + key: val for key, val in self.metadata.items()
}
self._conn = conn
self._resource = None
self.attributes = self.metadata.copy()
self.attributes.update(self.groupby)
self._trans_attributes = self._trans_metadata.copy()
self._trans_attributes.update(self._trans_groupby)
self.needs_update = False
def __getitem__(self, key):
output = self._trans_attributes.get(GROUPBY_NAME_ROOT + key, None)
if output is None:
output = self._trans_attributes.get(META_NAME_ROOT + key, None)
return output
def __eq__(self, other):
if self.resource_type != other.resource_type or \
self['id'] != other['id']:
return False
own_keys = list(self.groupby.keys())
own_keys.sort()
other_keys = list(other.groupby.keys())
other_keys.sort()
if own_keys != other_keys:
return False
for key in own_keys:
if other[key] != self[key]:
return False
return True
@property
def qty(self):
if self._resource:
return self._resource['metrics']['qty']
return None
@property
def cost(self):
if self._resource:
return self._resource['metrics']['cost']
return None
def _get_res_type_dict(self):
attributes = {}
for key in self._trans_groupby.keys():
attributes[key] = {'required': True, 'type': 'string'}
attributes['unit'] = {'required': True, 'type': 'string'}
for key in self._trans_metadata.keys():
attributes[key] = {'required': False, 'type': 'string'}
return {
'name': self.resource_type,
'attributes': attributes,
}
def create_resource_type(self):
"""Allows to create the type corresponding to this resource."""
try:
self._conn.resource_type.get(self.resource_type)
except gexceptions.ResourceTypeNotFound:
res_type = self._get_res_type_dict()
LOG.debug('Creating resource_type {} in gnocchi'.format(
self.resource_type))
self._conn.resource_type.create(res_type)
@staticmethod
def _get_rfc6902_attributes_add_op(new_attributes):
return [{
'op': 'add',
'path': '/attributes/{}'.format(attr),
'value': {
'required': attr.startswith(GROUPBY_NAME_ROOT),
'type': 'string'
}
} for attr in new_attributes]
def update_resource_type(self):
needed_res_type = self._get_res_type_dict()
current_res_type = self._conn.resource_type.get(
needed_res_type['name'])
new_attributes = [attr for attr in needed_res_type['attributes'].keys()
if attr not in current_res_type['attributes'].keys()]
if not new_attributes:
return
LOG.info('Adding {} to resource_type {}'.format(
[attr.replace(GROUPBY_NAME_ROOT, '').replace(META_NAME_ROOT, '')
for attr in new_attributes],
current_res_type['name'].replace(RESOURCE_TYPE_NAME_ROOT, ''),
))
new_attributes_op = self._get_rfc6902_attributes_add_op(new_attributes)
self._conn.resource_type.update(
needed_res_type['name'], new_attributes_op)
def _create_metrics(self):
qty = self._conn.metric.create(
name='qty',
unit=self.unit,
archive_policy_name=ARCHIVE_POLICY_NAME,
)
cost = self._conn.metric.create(
name='cost',
archive_policy_name=ARCHIVE_POLICY_NAME,
)
return qty, cost
def exists_in_gnocchi(self):
"""Check if the resource exists in gnocchi.
Returns true if the resource exists.
"""
query = {
'and': [
{'=': {key: value}}
for key, value in self._trans_groupby.items()
],
}
res = self._conn.resource.search(resource_type=self.resource_type,
query=query)
if len(res) > 1:
LOG.warning(
"Found more than one metric matching groupby. This may not "
"have the behavior you're expecting. You should probably add "
"some items to groupby")
if len(res) > 0:
self._resource = res[0]
return True
return False
def create(self):
"""Creates the resource in gnocchi."""
if self._resource:
return
self.create_resource_type()
qty_metric, cost_metric = self._create_metrics()
resource = self._trans_attributes.copy()
resource['metrics'] = {
'qty': qty_metric['id'],
'cost': cost_metric['id'],
}
resource['id'] = uuidutils.generate_uuid()
resource['unit'] = self.unit
if not self.exists_in_gnocchi():
try:
self._resource = self._conn.resource.create(
self.resource_type, resource)
# Attributes have changed
except gexceptions.BadRequest:
self.update_resource_type()
self._resource = self._conn.resource.create(
self.resource_type, resource)
def update(self, metric):
for key, val in metric['metadata'].items():
self._resource[META_NAME_ROOT + key] = val
self._resource = self._conn.update(
self.resource_type, self._resource['id'], self._resource)
self.needs_update = False
return self._resource
class GnocchiResourceCacher(object):
"""Class allowing to keep created resource in memory to improve perfs.
It keeps the last max_size resources in cache.
"""
def __init__(self, max_size=500):
self._resources = deque(maxlen=max_size)
def __contains__(self, resource):
for r in self._resources:
if r == resource:
for key, val in resource.metadata.items():
if val != r[key]:
r.needs_update = True
return True
return False
def add_resource(self, resource):
"""Add a resource to the cacher.
:param resource: resource to add
:type resource: GnocchiResource
"""
for r in self._resources:
if r == resource:
return
self._resources.append(resource)
def get(self, resource):
"""Returns the resource matching to the parameter.
:param resource: resource to get
:type resource: GnocchiResource
"""
for r in self._resources:
if r == resource:
return r
return None
def get_by_id(self, resource_id):
"""Returns the resource matching the given id.
:param resource_id: ID of the resource to get
:type resource: str
"""
for r in self._resources:
if r['id'] == resource_id:
return r
return None
class GnocchiStorage(BaseStorage):
default_op = ['aggregate', 'sum', ['metric', 'cost', 'sum'], ]
def _check_archive_policy(self):
try:
self._conn.archive_policy.get(ARCHIVE_POLICY_NAME)
except gexceptions.ArchivePolicyNotFound:
definition = [
{'granularity': str(CONF.collect.period) + 's',
'timespan': '{d} days'.format(d=self.get_retention().days)},
]
archive_policy = {
'name': ARCHIVE_POLICY_NAME,
'back_window': 0,
'aggregation_methods': [
'std', 'count', 'min', 'max', 'sum', 'mean'],
'definition': definition,
}
self._conn.archive_policy.create(archive_policy)
def __init__(self, *args, **kwargs):
super(GnocchiStorage, self).__init__(*args, **kwargs)
adapter_options = {'connect_retries': 3}
if CONF.storage_gnocchi.gnocchi_auth_type == 'keystone':
auth_plugin = ks_loading.load_auth_from_conf_options(
CONF,
'storage_gnocchi',
)
adapter_options['interface'] = CONF.storage_gnocchi.api_interface
else:
auth_plugin = gauth.GnocchiBasicPlugin(
user=CONF.storage_gnocchi.gnocchi_user,
endpoint=CONF.storage_gnocchi.gnocchi_endpoint,
)
self._conn = gclient.Client(
'1',
session_options={'auth': auth_plugin},
adapter_options=adapter_options,
)
self._cacher = GnocchiResourceCacher()
def init(self):
self._check_archive_policy()
def _check_resource(self, metric_name, metric, scope_id):
resource = GnocchiResource(metric_name, metric, self._conn, scope_id)
if resource in self._cacher:
return self._cacher.get(resource)
resource.create()
self._cacher.add_resource(resource)
return resource
def _push_measures_to_gnocchi(self, measures):
if measures:
try:
self._conn.metric.batch_metrics_measures(measures)
except gexceptions.BadRequest:
LOG.warning(
'An exception occured while trying to push measures to '
'gnocchi. Retrying in 1 second. If this happens again, '
'set measure_chunk_size to a lower value.')
time.sleep(1)
self._conn.metric.batch_metrics_measures(measures)
def push(self, dataframes, scope_id):
if not isinstance(dataframes, list):
dataframes = [dataframes]
measures = {}
nb_measures = 0
for dataframe in dataframes:
timestamp = dataframe['period']['begin']
for metric_name, metrics in dataframe['usage'].items():
for metric in metrics:
resource = self._check_resource(
metric_name, metric, scope_id)
if resource.needs_update:
resource.update(metric)
if not resource.qty or not resource.cost:
LOG.warning('Unexpected continue')
continue
# resource.qty is the uuid of the qty metric
if not measures.get(resource.qty):
measures[resource.qty] = []
measures[resource.qty].append({
'timestamp': timestamp,
'value': metric['vol']['qty'],
})
if not measures.get(resource.cost):
measures[resource.cost] = []
measures[resource.cost].append({
'timestamp': timestamp,
'value': metric['rating']['price'],
})
nb_measures += 2
if nb_measures >= CONF.storage_gnocchi.measure_chunk_size:
LOG.debug('Pushing {} measures to gnocchi.'.format(
nb_measures))
self._push_measures_to_gnocchi(measures)
measures = {}
nb_measures = 0
LOG.debug('Pushing {} measures to gnocchi.'.format(nb_measures))
self._push_measures_to_gnocchi(measures)
def _get_ck_resource_types(self):
types = self._conn.resource_type.list()
return [gtype['name'] for gtype in types
if gtype['name'].startswith(RESOURCE_TYPE_NAME_ROOT)]
def _check_res_types(self, res_type=None):
if res_type is None:
output = self._get_ck_resource_types()
elif isinstance(res_type, Iterable):
output = res_type
else:
output = [res_type]
return sorted(output)
@staticmethod
def _check_begin_end(begin, end):
if not begin:
begin = ck_utils.get_month_start()
if not end:
end = ck_utils.get_next_month()
if isinstance(begin, six.text_type):
begin = ck_utils.iso2dt(begin)
if isinstance(begin, int):
begin = ck_utils.ts2dt(begin)
if isinstance(end, six.text_type):
end = ck_utils.iso2dt(end)
if isinstance(end, int):
end = ck_utils.ts2dt(end)
return begin, end
def _get_resource_frame(self,
cost_measure,
qty_measure,
resource,
scope_id):
# Getting price
price = decimal.Decimal(cost_measure[2])
price_dict = {'price': float(price)}
# Getting vol
vol_dict = {
'qty': decimal.Decimal(qty_measure[2]),
'unit': resource.get('unit'),
}
# Formatting
groupby = {
k.replace(GROUPBY_NAME_ROOT, ''): v
for k, v in resource.items() if k.startswith(GROUPBY_NAME_ROOT)
}
metadata = {
k.replace(META_NAME_ROOT, ''): v
for k, v in resource.items() if k.startswith(META_NAME_ROOT)
}
return {
'groupby': groupby,
'metadata': metadata,
'vol': vol_dict,
'rating': price_dict,
'scope_id': scope_id,
}
def _to_cloudkitty(self,
scope_id,
res_type,
resource,
cost_measure,
qty_measure):
start = cost_measure[0]
stop = start + datetime.timedelta(seconds=cost_measure[1])
# Period
period_dict = {
'begin': ck_utils.dt2iso(start),
'end': ck_utils.dt2iso(stop),
}
return {
'usage': {res_type: [
self._get_resource_frame(
cost_measure, qty_measure, resource, scope_id)]
},
'period': period_dict,
}
def _get_resource_info(self, resource_ids, start, stop):
search = {
'and': [
{
'or': [
{
'=': {'id': resource_id},
}
for resource_id in resource_ids
],
},
],
}
resources = []
marker = None
while True:
resource_chunk = self._conn.resource.search(query=search,
details=True,
marker=marker,
sorts=['id:asc'])
if len(resource_chunk) < 1:
break
marker = resource_chunk[-1]['id']
resources += resource_chunk
return {resource['id']: resource for resource in resources}
@staticmethod
def _dataframes_to_list(dataframes):
keys = sorted(dataframes.keys())
return [dataframes[key] for key in keys]
def _get_dataframes(self, measures, resource_info):
dataframes = {}
for measure in measures:
resource_type = measure['group']['type']
resource_id = measure['group']['id']
# Raw metrics do not contain all required attributes
resource = resource_info[resource_id]
scope_id = resource[GROUPBY_NAME_ROOT + 'ck_scope_id']
dataframe = dataframes.get(measure['cost'][0])
ck_resource_type_name = resource_type.replace(
RESOURCE_TYPE_NAME_ROOT, '')
if dataframe is None:
dataframes[measure['cost'][0]] = self._to_cloudkitty(
scope_id,
ck_resource_type_name,
resource,
measure['cost'],
measure['qty'])
elif dataframe['usage'].get(ck_resource_type_name) is None:
dataframe['usage'][ck_resource_type_name] = [
self._get_resource_frame(
measure['cost'], measure['qty'], resource, scope_id)]
else:
dataframe['usage'][ck_resource_type_name].append(
self._get_resource_frame(
measure['cost'], measure['qty'], resource, scope_id))
return self._dataframes_to_list(dataframes)
@staticmethod
def _create_filters(filters, group_filters):
output = {}
if filters:
for k, v in filters.items():
output[META_NAME_ROOT + k] = v
if group_filters:
for k, v in group_filters.items():
output[GROUPBY_NAME_ROOT + k] = v
return output
def _raw_metrics_to_distinct_measures(self,
raw_cost_metrics,
raw_qty_metrics):
output = []
for cost, qty in zip(raw_cost_metrics, raw_qty_metrics):
output += [{
'cost': cost_measure,
'qty': qty['measures']['measures']['aggregated'][idx],
'group': cost['group'],
} for idx, cost_measure in enumerate(
cost['measures']['measures']['aggregated'])
]
# Sorting by timestamp, metric type and resource ID
output.sort(key=lambda x: (
x['cost'][0], x['group']['type'], x['group']['id']))
return output
def retrieve(self, begin=None, end=None,
filters=None, group_filters=None,
metric_types=None,
offset=0, limit=100, paginate=True):
begin, end = self._check_begin_end(begin, end)
metric_types = self._check_res_types(metric_types)
# Getting a list of active gnocchi resources with measures
filters = self._create_filters(filters, group_filters)
# FIXME(lukapeschke): We query all resource types in order to get the
# total amount of dataframes, but this could be done in a better way;
# ie. by not doing addtional queries once the limit is reached
raw_cost_metrics = []
raw_qty_metrics = []
for mtype in metric_types:
cost_metrics, qty_metrics = self._single_resource_type_aggregates(
begin, end, mtype, ['type', 'id'], filters, fetch_qty=True)
raw_cost_metrics += cost_metrics
raw_qty_metrics += qty_metrics
measures = self._raw_metrics_to_distinct_measures(
raw_cost_metrics, raw_qty_metrics)
result = {'total': len(measures)}
if paginate:
measures = measures[offset:limit]
if len(measures) < 1:
return {
'total': 0,
'dataframes': [],
}
resource_ids = [measure['group']['id'] for measure in measures]
resource_info = self._get_resource_info(resource_ids, begin, end)
result['dataframes'] = self._get_dataframes(measures, resource_info)
return result
def _single_resource_type_aggregates(self,
start, stop,
metric_type,
groupby,
filters,
fetch_qty=False):
search = {
'and': [
{'=': {'type': metric_type}}
]
}
search['and'] += [{'=': {k: v}} for k, v in filters.items()]
cost_op = self.default_op
output = (
self._conn.aggregates.fetch(
cost_op,
search=search,
groupby=groupby,
resource_type=metric_type,
start=start, stop=stop),
None
)
if fetch_qty:
qty_op = copy.deepcopy(self.default_op)
qty_op[2][1] = 'qty'
output = (
output[0],
self._conn.aggregates.fetch(
qty_op,
search=search,
groupby=groupby,
resource_type=metric_type,
start=start, stop=stop)
)
return output
@staticmethod
def _ungroup_type(rated_resources):
output = []
for rated_resource in rated_resources:
rated_resource['group'].pop('type', None)
new_item = True
for elem in output:
if rated_resource['group'] == elem['group']:
elem['measures']['measures']['aggregated'] \
+= rated_resource['measures']['measures']['aggregated']
new_item = False
break
if new_item:
output.append(rated_resource)
return output
def total(self, groupby=None,
begin=None, end=None,
metric_types=None,
filters=None, group_filters=None):
begin, end = self._check_begin_end(begin, end)
if groupby is None:
groupby = []
request_groupby = [
GROUPBY_NAME_ROOT + elem for elem in groupby if elem != 'type']
# We need to have a least one attribute on which to group
request_groupby.append('type')
# NOTE(lukapeschke): For now, it isn't possible to group aggregates
# from different resource types using custom attributes, so we need
# to do one request per resource type.
rated_resources = []
metric_types = self._check_res_types(metric_types)
filters = self._create_filters(filters, group_filters)
for mtype in metric_types:
resources, _ = self._single_resource_type_aggregates(
begin, end, mtype, request_groupby, filters)
for resource in resources:
# If we have found something
if len(resource['measures']['measures']['aggregated']):
rated_resources.append(resource)
# NOTE(lukapeschke): We undo what has been done previously (grouping
# per type). This is not performant. Should be fixed as soon as
# previous note is supported in gnocchi
if 'type' not in groupby:
rated_resources = self._ungroup_type(rated_resources)
output = []
for rated_resource in rated_resources:
rate = sum(measure[2] for measure in
rated_resource['measures']['measures']['aggregated'])
output_elem = {
'begin': begin,
'end': end,
'rate': rate,
}
for group in groupby:
output_elem[group] = rated_resource['group'].get(
GROUPBY_NAME_ROOT + group, '')
# If we want to group per type
if 'type' in groupby:
output_elem['type'] = rated_resource['group'].get(
'type', '').replace(RESOURCE_TYPE_NAME_ROOT, '') or ''
output.append(output_elem)
return output

View File

@ -0,0 +1,70 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
from oslo_db.sqlalchemy import utils
from cloudkitty import db
from cloudkitty.storage_state import migration
from cloudkitty.storage_state import models
from cloudkitty import utils as ck_utils
class StateManager(object):
"""Class allowing state management in CloudKitty"""
model = models.IdentifierState
def _get_db_item(self, session, identifier):
q = utils.model_query(self.model, session)
return q.filter(self.model.identifier == identifier).first()
def set_state(self, identifier, state):
if isinstance(state, int):
state = ck_utils.ts2dt(state)
session = db.get_session()
session.begin()
r = self._get_db_item(session, identifier)
if r and r.state != state:
r.state = state
session.commit()
else:
state_object = self.model(
identifier=identifier,
state=state,
)
session.add(state_object)
session.commit()
session.close()
def get_state(self, identifier):
session = db.get_session()
session.begin()
r = self._get_db_item(session, identifier)
session.close()
return ck_utils.dt2ts(r.state) if r else None
def init(self):
migration.upgrade('head')
# This is made in order to stay compatible with legacy behavior but
# shouldn't be used
def get_tenants(self, begin=None, end=None):
session = db.get_session()
session.begin()
q = utils.model_query(self.model, session)
session.close()
return [tenant.identifier for tenant in q]

View File

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
from cloudkitty.common.db.alembic import env # noqa
from cloudkitty.storage_state import models
target_metadata = models.Base.metadata
version_table = 'storage_states_alembic'
env.run_migrations_online(target_metadata, version_table)

View File

@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Initial
Revision ID: c14eea9d3cc1
Revises:
Create Date: 2018-04-20 14:27:11.434366
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c14eea9d3cc1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'cloudkitty_storage_states',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('identifier',
sa.String(length=40),
nullable=False,
unique=True),
sa.Column('state', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
def downgrade():
op.drop_table('cloudkitty_storage_states')

View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import os
from cloudkitty.common.db.alembic import migration
ALEMBIC_REPO = os.path.join(os.path.dirname(__file__), 'alembic')
def upgrade(revision):
config = migration.load_alembic_config(ALEMBIC_REPO)
return migration.upgrade(config, revision)
def version():
config = migration.load_alembic_config(ALEMBIC_REPO)
return migration.version(config)
def revision(message, autogenerate):
config = migration.load_alembic_config(ALEMBIC_REPO)
return migration.revision(config, message, autogenerate)
def stamp(revision):
config = migration.load_alembic_config(ALEMBIC_REPO)
return migration.stamp(config, revision)

View File

@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
from oslo_db.sqlalchemy import models
import sqlalchemy
from sqlalchemy.ext import declarative
Base = declarative.declarative_base()
class IdentifierState(Base, models.ModelBase):
"""Represents the state of a given identifier."""
__table_args__ = {'mysql_charset': "utf8",
'mysql_engine': "InnoDB"}
__tablename__ = 'cloudkitty_storage_states'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True)
# SHA1 of the identifier
identifier = sqlalchemy.Column(sqlalchemy.String(256),
nullable=False,
unique=True)
state = sqlalchemy.Column(sqlalchemy.DateTime,
nullable=False)

View File

@ -14,12 +14,15 @@
# under the License.
#
"""Test SummaryModel objects."""
import testtools
from oslotest import base
from cloudkitty.api.v1.datamodels import report
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class TestSummary(base.BaseTestCase):
def setUp(self):

View File

@ -15,12 +15,16 @@
#
"""Test cloudkitty/api/v1/types."""
import testtools
from oslotest import base
from wsme import types as wtypes
from cloudkitty.api.v1 import types
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class TestTypes(base.BaseTestCase):
def setUp(self):

View File

@ -14,11 +14,15 @@
# under the License.
#
#
import testtools
from cloudkitty.collector import gnocchi
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class GnocchiCollectorTest(tests.TestCase):
def setUp(self):
super(GnocchiCollectorTest, self).setUp()

View File

@ -17,14 +17,17 @@
#
from decimal import Decimal
import mock
import testtools
from cloudkitty import collector
from cloudkitty.collector import prometheus
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import transformer
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PrometheusCollectorTest(tests.TestCase):
def setUp(self):
super(PrometheusCollectorTest, self).setUp()
@ -130,6 +133,7 @@ class PrometheusCollectorTest(tests.TestCase):
)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PrometheusClientTest(tests.TestCase):
def setUp(self):
super(PrometheusClientTest, self).setUp()

View File

@ -18,6 +18,7 @@
import abc
import decimal
import os
from unittest.case import SkipTest
from gabbi import fixture
import mock
@ -41,9 +42,10 @@ from cloudkitty.db import api as ck_db_api
from cloudkitty import messaging
from cloudkitty import rating
from cloudkitty import storage
from cloudkitty.storage.sqlalchemy import models
from cloudkitty.storage.v1.sqlalchemy import models
from cloudkitty import tests
from cloudkitty.tests import test_utils
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import utils as ck_utils
@ -84,9 +86,10 @@ class BaseExtensionFixture(fixture.GabbiFixture):
self.patch.return_value = fake_mgr
def stop_fixture(self):
self.patch.assert_called_with(
self.namespace,
**self.assert_args)
if not is_functional_test():
self.patch.assert_called_with(
self.namespace,
**self.assert_args)
self.mock.stop()
@ -203,6 +206,7 @@ class ConfigFixture(fixture.GabbiFixture):
)
conf.import_group('storage', 'cloudkitty.storage')
conf.set_override('backend', 'sqlalchemy', 'storage')
conf.set_override('version', '1', 'storage')
self.conf = conf
self.conn = ck_db_api.get_instance()
migration = self.conn.get_migration()
@ -332,28 +336,19 @@ class StorageDataFixture(BaseStorageDataFixture):
nodata_duration = (24 * 3 + 12) * 3600
tenant_list = ['8f82cc70-e50c-466e-8624-24bdea811375',
'7606a24a-b8ad-4ae0-be6c-3d7a41334a2e']
for tenant in tenant_list:
for i in range(INITIAL_TIMESTAMP,
INITIAL_TIMESTAMP + nodata_duration,
3600):
self.storage.nodata(i, i + 3600, tenant)
data_ts = INITIAL_TIMESTAMP + nodata_duration + 3600
data_duration = (24 * 2 + 8) * 3600
for i in range(data_ts,
data_ts + data_duration,
3600):
data = self.create_fake_data(i, i + 3600)
self.storage.append(data, tenant_list[0])
self.storage.push(data, tenant_list[0])
half_duration = int(data_duration / 2)
for i in range(data_ts,
data_ts + half_duration,
3600):
data = self.create_fake_data(i, i + 3600)
self.storage.append(data, tenant_list[1])
for i in range(data_ts + half_duration + 3600,
data_ts + data_duration,
3600):
self.storage.nodata(i, i + 3600, tenant_list[1])
self.storage.push(data, tenant_list[1])
class NowStorageDataFixture(BaseStorageDataFixture):
@ -363,8 +358,8 @@ class NowStorageDataFixture(BaseStorageDataFixture):
begin + 3600 * 12,
3600):
data = self.create_fake_data(i, i + 3600)
self.storage.append(data,
'3d9a1b33-482f-42fd-aef9-b575a3da9369')
self.storage.push(data,
'3d9a1b33-482f-42fd-aef9-b575a3da9369')
class CORSConfigFixture(fixture.GabbiFixture):
@ -402,6 +397,13 @@ class MetricsConfFixture(fixture.GabbiFixture):
ck_utils.load_conf = self._original_function
class SkipIfFunctional(fixture.GabbiFixture):
def start_fixture(self):
if is_functional_test():
raise SkipTest
def setup_app():
messaging.setup()
# FIXME(sheeprine): Extension fixtures are interacting with transformers

View File

@ -2,6 +2,7 @@ fixtures:
- ConfigFixtureKeystoneAuth
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: Can't query api without token

View File

@ -1,6 +1,7 @@
fixtures:
- ConfigFixture
- CORSConfigFixture
- SkipIfFunctional
tests:

View File

@ -2,6 +2,7 @@ fixtures:
- ConfigFixture
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: Can query api without auth

View File

@ -1,5 +1,6 @@
fixtures:
- ConfigFixture
- SkipIfFunctional
tests:
- name: test if / is publicly available

View File

@ -1,158 +0,0 @@
fixtures:
- ConfigFixture
- RatingModulesFixture
- QuoteFakeRPC
tests:
- name: reload list of modules available
url: /v1/billing/reload_modules
status: 204
- name: list all modules available
url: /v1/billing/modules
status: 200
response_json_paths:
$.modules.`len`: 3
$.modules[0].priority: 3
$.modules[0].module_id: "fake1"
$.modules[0].enabled: false
$.modules[0].description: "fake rating module"
$.modules[0].hot-config: false
$.modules[1].priority: 1
$.modules[1].module_id: "fake2"
$.modules[1].enabled: false
$.modules[1].description: "fake rating module"
$.modules[1].hot-config: false
$.modules[2].priority: 2
$.modules[2].module_id: "fake3"
$.modules[2].enabled: false
$.modules[2].description: "fake rating module"
$.modules[2].hot-config: false
- name: get information of one module
url: /v1/billing/modules/fake2
status: 200
response_json_paths:
$.priority: 1
$.module_id: "fake2"
$.enabled: false
$.description: "fake rating module"
$.hot-config: false
- name: get information of a unknown module
url: /v1/billing/modules/fakb
status: 404
response_strings:
- "Module not found."
- name: change priority of a module
url: /v1/billing/modules/fake3
method: PUT
request_headers:
content-type: application/json
x-roles: admin
data:
module_id: "fake3"
priority: 5
status: 302
response_headers:
location: "$SCHEME://$NETLOC/v1/billing/modules/fake3"
- name: get information of the modified module (priority)
url: $LOCATION
status: 200
response_json_paths:
$.priority: 5
$.module_id: "fake3"
$.enabled: false
$.description: "fake rating module"
$.hot-config: false
- name: change enabled status of a module
url: /v1/billing/modules/fake3
method: PUT
request_headers:
content-type: application/json
x-roles: admin
data:
module_id: "fake3"
enabled: true
status: 302
response_headers:
location: "$SCHEME://$NETLOC/v1/billing/modules/fake3"
- name: get information of the modified module (status)
url: $LOCATION
status: 200
response_json_paths:
$.priority: 5
$.module_id: "fake3"
$.enabled: true
$.description: "fake rating module"
$.hot-config: false
- name: change status and priority of a module
url: /v1/billing/modules/fake3
method: PUT
request_headers:
content-type: application/json
x-roles: admin
data:
module_id: "fake3"
priority: 3
enabled: false
status: 302
response_headers:
location: "$SCHEME://$NETLOC/v1/billing/modules/fake3"
- name: get information of the modified module (both)
url: $LOCATION
status: 200
response_json_paths:
$.priority: 3
$.module_id: "fake3"
$.enabled: false
$.description: "fake rating module"
$.hot-config: false
- name: get a quote for a resource description
url: /v1/billing/quote
method: POST
request_headers:
content-type: application/json
x-roles: admin
data:
resources:
- service: "cpu"
volume: "1.0"
desc:
test: 1
status: 200
response_strings:
- "1.0"
- name: module without custom API should use notconfigurable controller (GET)
url: /v1/billing/module_config/fake1
status: 409
response_strings:
- "Module is not configurable"
- name: module without custom API should use notconfigurable controller (POST)
url: /v1/billing/module_config/fake1
method: POST
status: 409
response_strings:
- "Module is not configurable"
- name: module without custom API should use notconfigurable controller (PUT)
url: /v1/billing/module_config/fake1
method: PUT
status: 409
response_strings:
- "Module is not configurable"
- name: verify module exposes its custom API
url: /v1/billing/module_config/fake2/test
status: 200
response_strings:
- "OK"

View File

@ -1,5 +1,6 @@
fixtures:
- ConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,6 +1,7 @@
fixtures:
- ConfigFixture
- MetricsConfFixture
- SkipIfFunctional
tests:
- name: get config

View File

@ -2,6 +2,7 @@ fixtures:
- ConfigFixture
- RatingModulesFixture
- QuoteFakeRPC
- SkipIfFunctional
tests:
- name: reload list of modules available

View File

@ -2,13 +2,14 @@ fixtures:
- ConfigFixture
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: get period with two tenants
url: /v1/report/tenants
query_parameters:
begin: "2015-01-01T00:00:00"
end: "2015-01-04T00:00:00"
begin: "2015-01-04T00:00:00"
end: "2015-01-05T00:00:00"
status: 200
response_strings:
- "8f82cc70-e50c-466e-8624-24bdea811375"
@ -46,7 +47,7 @@ tests:
end: "2015-02-04T00:00:00"
status: 200
response_strings:
- "121.014"
- "122.472"
- name: get total for a period filtering on first tenant
url: /v1/report/total
@ -56,7 +57,7 @@ tests:
tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
status: 200
response_strings:
- "80.19"
- "81.648"
- name: get total for a period filtering on second tenant
url: /v1/report/total
@ -76,7 +77,7 @@ tests:
service: "cpu"
status: 200
response_strings:
- "110.971"
- "112.308"
- name: get total for a period filtering on image service
url: /v1/report/total
@ -86,7 +87,7 @@ tests:
service: "image.size"
status: 200
response_strings:
- "10.043"
- "10.164"
- name: get total for a period filtering on compute service and tenant
url: /v1/report/total
@ -122,7 +123,7 @@ tests:
$.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "80.19"
$.summary[1].rate: "81.648"
$.summary[1].res_type: "ALL"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].begin: "2015-01-01T00:00:00"
@ -143,7 +144,7 @@ tests:
$.summary[0].res_type: "cpu"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "73.535"
$.summary[1].rate: "74.872"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].res_type: "cpu"
$.summary[1].begin: "2015-01-01T00:00:00"
@ -158,16 +159,16 @@ tests:
status: 200
response_json_paths:
$.summary.`len`: 2
$.summary[0].rate: "10.043"
$.summary[0].res_type: "image.size"
$.summary[0].tenant_id: "ALL"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "110.971"
$.summary[1].res_type: "cpu"
$.summary[1].tenant_id: "ALL"
$.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00"
$.summary[/res_type][0].rate: "112.308"
$.summary[/res_type][0].res_type: "cpu"
$.summary[/res_type][0].tenant_id: "ALL"
$.summary[/res_type][0].begin: "2015-01-01T00:00:00"
$.summary[/res_type][0].end: "2015-02-04T00:00:00"
$.summary[/res_type][1].rate: "10.164"
$.summary[/res_type][1].res_type: "image.size"
$.summary[/res_type][1].tenant_id: "ALL"
$.summary[/res_type][1].begin: "2015-01-01T00:00:00"
$.summary[/res_type][1].end: "2015-02-04T00:00:00"
- name: get summary for a period of each service filtering on first tenant
url: /v1/report/summary
@ -179,16 +180,16 @@ tests:
status: 200
response_json_paths:
$.summary.`len`: 2
$.summary[0].rate: "6.655"
$.summary[0].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[0].res_type: "image.size"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "73.535"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].res_type: "cpu"
$.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00"
$.summary[/res_type][0].rate: "74.872"
$.summary[/res_type][0].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[/res_type][0].res_type: "cpu"
$.summary[/res_type][0].begin: "2015-01-01T00:00:00"
$.summary[/res_type][0].end: "2015-02-04T00:00:00"
$.summary[/res_type][1].rate: "6.776"
$.summary[/res_type][1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[/res_type][1].res_type: "image.size"
$.summary[/res_type][1].begin: "2015-01-01T00:00:00"
$.summary[/res_type][1].end: "2015-02-04T00:00:00"
- name: get summary for a period of each service and tenant
url: /v1/report/summary
@ -204,7 +205,7 @@ tests:
$.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "6.655"
$.summary[1].rate: "6.776"
$.summary[1].res_type: "image.size"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].begin: "2015-01-01T00:00:00"
@ -214,7 +215,7 @@ tests:
$.summary[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[2].begin: "2015-01-01T00:00:00"
$.summary[2].end: "2015-02-04T00:00:00"
$.summary[3].rate: "73.535"
$.summary[3].rate: "74.872"
$.summary[3].res_type: "cpu"
$.summary[3].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[3].begin: "2015-01-01T00:00:00"

View File

@ -2,6 +2,7 @@ fixtures:
- ConfigFixture
- StorageDataFixture
- NowStorageDataFixture
- SkipIfFunctional
tests:
- name: fetch period with no data
@ -39,7 +40,7 @@ tests:
tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
status: 200
response_json_paths:
$.dataframes.`len`: 110
$.dataframes.`len`: 112
- name: fetch data for the first tenant without begin and end time
url: /v1/storage/dataframes
@ -47,7 +48,7 @@ tests:
tenant_id: "3d9a1b33-482f-42fd-aef9-b575a3da9369"
status: 200
response_json_paths:
$.dataframes.`len`: 22
$.dataframes.`len`: 24
- name: fetch data for the first tenant when begin time bigger than end time
url: /v1/storage/dataframes

View File

@ -1,5 +1,6 @@
fixtures:
- HashMapConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,5 +1,6 @@
fixtures:
- HashMapConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,6 +1,7 @@
fixtures:
- HashMapConfigFixture
- UUIDFixture
- SkipIfFunctional
tests:

View File

@ -1,5 +1,6 @@
fixtures:
- HashMapConfigFixture
- SkipIfFunctional
tests:

View File

@ -1,6 +1,7 @@
fixtures:
- PyScriptsConfigFixture
- UUIDFixture
- SkipIfFunctional
tests:

View File

@ -21,6 +21,7 @@ from gabbi import driver
from cloudkitty.tests.gabbi import fixtures
TESTS_DIR = 'gabbits'

View File

@ -18,6 +18,8 @@
import copy
import decimal
from oslo_utils import uuidutils
from cloudkitty import utils as ck_utils
TENANT = 'f266f30b11f246b589fd266f85eeec39'
@ -290,3 +292,119 @@ PROMETHEUS_EMPTY_RESP_INSTANT_QUERY = {
"result": [],
}
}
V2_STORAGE_SAMPLE = {
"instance": {
"vol": {
"unit": "instance",
"qty": 1.0,
},
"rating": {
"price": decimal.Decimal(2.5),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_METADATA['project_id'],
},
"metadata": {
"flavor": "m1.nano",
"flavor_id": "42",
},
},
"image.size": {
"vol": {
"unit": "MiB",
"qty": 152.0,
},
"rating": {
"price": decimal.Decimal(0.152),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_METADATA['project_id'],
},
"metadata": {
"disk_format": "qcow2",
},
},
"volume.size": {
"vol": {
"unit": "GiB",
"qty": 20.0,
},
"rating": {
"price": decimal.Decimal(1.2),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_METADATA['project_id'],
},
"metadata": {
"volume_type": "ceph-region1"
},
},
"network.outgoing.bytes": {
"vol": {
"unit": "MB",
"qty": 12345.6,
},
"rating": {
"price": decimal.Decimal(0.00123456),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_METADATA['project_id'],
},
"metadata": {
"instance_id": uuidutils.generate_uuid(),
},
},
"network.incoming.bytes": {
"vol": {
"unit": "MB",
"qty": 34567.8,
},
"rating": {
"price": decimal.Decimal(0.00345678),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_METADATA['project_id'],
},
"metadata": {
"instance_id": uuidutils.generate_uuid(),
},
},
"ip.floating": {
"vol": {
"unit": "ip",
"qty": 1.0,
},
"rating": {
"price": decimal.Decimal(0.01),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_METADATA['project_id'],
},
"metadata": {
"state": "attached",
},
},
"radosgw.objects.size": {
"vol": {
"unit": "GiB",
"qty": 3.0,
},
"rating": {
"price": decimal.Decimal(0.30),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_METADATA['project_id'],
},
"metadata": {
"object_id": uuidutils.generate_uuid(),
},
}
}

View File

@ -1,501 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import copy
import mock
import sqlalchemy
import testscenarios
from cloudkitty import storage
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests import test_utils
from cloudkitty import utils as ck_utils
class StorageTest(tests.TestCase):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy')),
('hybrid', dict(storage_backend='hybrid'))]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(
cls.scenarios,
cls.storage_scenarios)
@mock.patch('cloudkitty.storage.hybrid.backends.gnocchi.gclient')
@mock.patch('cloudkitty.utils.load_conf', new=test_utils.load_conf)
def setUp(self, gclient_mock):
super(StorageTest, self).setUp()
self._tenant_id = samples.TENANT
self._other_tenant_id = '8d3ae50089ea4142-9c6e1269db6a0b64'
self.conf.set_override('backend', self.storage_backend, 'storage')
self.storage = storage.get_storage(conf=test_utils.load_conf())
self.storage.init()
def insert_data(self):
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.append(working_data, self._tenant_id)
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.append(working_data, self._other_tenant_id)
self.storage.commit(self._tenant_id)
self.storage.commit(self._other_tenant_id)
def insert_different_data_two_tenants(self):
working_data = copy.deepcopy(samples.RATED_DATA)
del working_data[1]
self.storage.append(working_data, self._tenant_id)
working_data = copy.deepcopy(samples.RATED_DATA)
del working_data[0]
self.storage.append(working_data, self._other_tenant_id)
self.storage.commit(self._tenant_id)
self.storage.commit(self._other_tenant_id)
# Filtering
def test_filter_period(self):
working_data = copy.deepcopy(samples.RATED_DATA)
usage_start, data = self.storage._filter_period(working_data)
self.assertEqual(samples.FIRST_PERIOD_BEGIN, usage_start)
self.assertEqual(samples.RATED_DATA[0]['usage'], data)
expected_remaining_data = [{
"period": samples.SECOND_PERIOD,
"usage": samples.RATED_DATA[1]['usage']}]
self.assertEqual(expected_remaining_data, working_data)
usage_start, data = self.storage._filter_period(working_data)
self.assertEqual(samples.SECOND_PERIOD_BEGIN, usage_start)
self.assertEqual(samples.RATED_DATA[1]['usage'], data)
self.assertEqual([], working_data)
# State
def test_get_state_when_nothing_in_storage(self):
state = self.storage.get_state()
self.assertIsNone(state)
def test_get_latest_global_state(self):
self.insert_different_data_two_tenants()
state = self.storage.get_state()
self.assertEqual(samples.SECOND_PERIOD_BEGIN, state)
def test_get_state_on_rated_tenant(self):
self.insert_different_data_two_tenants()
state = self.storage.get_state(self._tenant_id)
self.assertEqual(samples.FIRST_PERIOD_BEGIN, state)
state = self.storage.get_state(self._other_tenant_id)
self.assertEqual(samples.SECOND_PERIOD_BEGIN, state)
def test_get_state_on_no_data_frame(self):
self.storage.nodata(
samples.FIRST_PERIOD_BEGIN,
samples.FIRST_PERIOD_END,
self._tenant_id)
self.storage.commit(self._tenant_id)
state = self.storage.get_state(self._tenant_id)
self.assertEqual(samples.FIRST_PERIOD_BEGIN, state)
class StorageDataframeTest(StorageTest):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy'))]
# Queries
# Data
def test_get_no_frame_when_nothing_in_storage(self):
self.assertRaises(
storage.NoTimeFrame,
self.storage.get_time_frame,
begin=samples.FIRST_PERIOD_BEGIN - 3600,
end=samples.FIRST_PERIOD_BEGIN)
def test_get_frame_filter_outside_data(self):
self.insert_different_data_two_tenants()
self.assertRaises(
storage.NoTimeFrame,
self.storage.get_time_frame,
begin=samples.FIRST_PERIOD_BEGIN - 3600,
end=samples.FIRST_PERIOD_BEGIN)
def test_get_frame_without_filter_but_timestamp(self):
self.insert_different_data_two_tenants()
data = self.storage.get_time_frame(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.SECOND_PERIOD_END)
self.assertEqual(3, len(data))
def test_get_frame_on_one_period(self):
self.insert_different_data_two_tenants()
data = self.storage.get_time_frame(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.FIRST_PERIOD_END)
self.assertEqual(2, len(data))
def test_get_frame_on_one_period_and_one_tenant(self):
self.insert_different_data_two_tenants()
data = self.storage.get_time_frame(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.FIRST_PERIOD_END,
tenant_id=self._tenant_id)
self.assertEqual(2, len(data))
def test_get_frame_on_one_period_and_one_tenant_outside_data(self):
self.insert_different_data_two_tenants()
self.assertRaises(
storage.NoTimeFrame,
self.storage.get_time_frame,
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.FIRST_PERIOD_END,
tenant_id=self._other_tenant_id)
def test_get_frame_on_two_periods(self):
self.insert_different_data_two_tenants()
data = self.storage.get_time_frame(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.SECOND_PERIOD_END)
self.assertEqual(3, len(data))
class StorageTotalTest(StorageTest):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy'))]
# Total
def test_get_empty_total(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN - 3600)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end)
self.assertEqual(1, len(total))
self.assertIsNone(total[0]["rate"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_without_filter_but_timestamp(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end)
# FIXME(sheeprine): floating point error (transition to decimal)
self.assertEqual(1, len(total))
self.assertEqual(1.9473999999999998, total[0]["rate"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_one_period(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_END)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end)
self.assertEqual(1, len(total))
self.assertEqual(1.1074, total[0]["rate"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_one_period_and_one_tenant(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_END)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end,
tenant_id=self._tenant_id)
self.assertEqual(1, len(total))
self.assertEqual(0.5537, total[0]["rate"])
self.assertEqual(self._tenant_id, total[0]["tenant_id"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_service(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_END)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end,
service='instance')
self.assertEqual(1, len(total))
self.assertEqual(0.84, total[0]["rate"])
self.assertEqual('instance', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_groupby_tenant(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end,
groupby="tenant_id")
self.assertEqual(2, len(total))
self.assertEqual(0.9737, total[0]["rate"])
self.assertEqual(self._other_tenant_id, total[0]["tenant_id"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
self.assertEqual(0.9737, total[1]["rate"])
self.assertEqual(self._tenant_id, total[1]["tenant_id"])
self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"])
def test_get_total_groupby_restype(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end,
groupby="res_type")
self.assertEqual(2, len(total))
self.assertEqual(0.2674, total[0]["rate"])
self.assertEqual('image.size', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
self.assertEqual(1.68, total[1]["rate"])
self.assertEqual('instance', total[1]["res_type"])
self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"])
def test_get_total_groupby_tenant_and_restype(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.get_total(
begin=begin,
end=end,
groupby="tenant_id,res_type")
self.assertEqual(4, len(total))
self.assertEqual(0.1337, total[0]["rate"])
self.assertEqual(self._other_tenant_id, total[0]["tenant_id"])
self.assertEqual('image.size', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
self.assertEqual(0.1337, total[1]["rate"])
self.assertEqual(self._tenant_id, total[1]["tenant_id"])
self.assertEqual('image.size', total[1]["res_type"])
self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"])
self.assertEqual(0.84, total[2]["rate"])
self.assertEqual(self._other_tenant_id, total[2]["tenant_id"])
self.assertEqual('instance', total[2]["res_type"])
self.assertEqual(begin, total[2]["begin"])
self.assertEqual(end, total[2]["end"])
self.assertEqual(0.84, total[3]["rate"])
self.assertEqual(self._tenant_id, total[3]["tenant_id"])
self.assertEqual('instance', total[3]["res_type"])
self.assertEqual(begin, total[3]["begin"])
self.assertEqual(end, total[3]["end"])
class StorageTenantTest(StorageTest):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy'))]
# Tenants
def test_get_empty_tenant_with_nothing_in_storage(self):
tenants = self.storage.get_tenants(
begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN),
end=ck_utils.ts2dt(samples.SECOND_PERIOD_BEGIN))
self.assertEqual([], tenants)
def test_get_empty_tenant_list(self):
self.insert_data()
tenants = self.storage.get_tenants(
begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN - 3600),
end=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN))
self.assertEqual([], tenants)
def test_get_tenants_filtering_on_period(self):
self.insert_different_data_two_tenants()
tenants = self.storage.get_tenants(
begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN),
end=ck_utils.ts2dt(samples.SECOND_PERIOD_END))
self.assertListEqual(
[self._tenant_id, self._other_tenant_id],
tenants)
tenants = self.storage.get_tenants(
begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN),
end=ck_utils.ts2dt(samples.FIRST_PERIOD_END))
self.assertListEqual(
[self._tenant_id],
tenants)
tenants = self.storage.get_tenants(
begin=ck_utils.ts2dt(samples.SECOND_PERIOD_BEGIN),
end=ck_utils.ts2dt(samples.SECOND_PERIOD_END))
self.assertListEqual(
[self._other_tenant_id],
tenants)
class StorageDataIntegrityTest(StorageTest):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy'))]
# Data integrity
def test_has_data_flag_behaviour(self):
self.assertNotIn(self._tenant_id, self.storage._has_data)
self.storage.nodata(
samples.FIRST_PERIOD_BEGIN,
samples.FIRST_PERIOD_END,
self._tenant_id)
self.assertNotIn(self._tenant_id, self.storage._has_data)
working_data = copy.deepcopy(samples.RATED_DATA)
working_data = [working_data[1]]
self.storage.append(working_data, self._tenant_id)
self.assertTrue(self.storage._has_data[self._tenant_id])
self.storage.commit(self._tenant_id)
self.assertNotIn(self._tenant_id, self.storage._has_data)
def test_notify_no_data(self):
self.storage.nodata(
samples.FIRST_PERIOD_BEGIN,
samples.FIRST_PERIOD_END,
self._tenant_id)
working_data = copy.deepcopy(samples.RATED_DATA)
working_data = [working_data[1]]
self.storage.append(working_data, self._tenant_id)
kwargs = {
'begin': samples.FIRST_PERIOD_BEGIN,
'end': samples.FIRST_PERIOD_END,
'tenant_id': self._tenant_id}
self.assertRaises(
storage.NoTimeFrame,
self.storage.get_time_frame,
**kwargs)
kwargs['res_type'] = '_NO_DATA_'
stored_data = self.storage.get_time_frame(**kwargs)
self.assertEqual(1, len(stored_data))
self.assertEqual(1, len(stored_data[0]['usage']))
self.assertIn('_NO_DATA_', stored_data[0]['usage'])
def test_send_nodata_between_data(self):
working_data = copy.deepcopy(samples.RATED_DATA)
for period in working_data:
for service, data in sorted(period['usage'].items()):
sub_data = [{
'period': period['period'],
'usage': {
service: data}}]
self.storage.append(sub_data, self._tenant_id)
if service == 'compute':
self.storage.nodata(
period['period']['begin'],
period['period']['end'],
self._tenant_id)
self.storage.commit(self._tenant_id)
self.assertRaises(
storage.NoTimeFrame,
self.storage.get_time_frame,
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.SECOND_PERIOD_END,
res_type='_NO_DATA_')
def test_auto_commit_on_period_change(self):
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.append(working_data, self._tenant_id)
stored_data = self.storage.get_time_frame(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.SECOND_PERIOD_END)
self.assertEqual(2, len(stored_data))
expected_data = copy.deepcopy(samples.STORED_DATA)
# We only stored the first timeframe, the second one is waiting for a
# commit or an append with the next timeframe.
del expected_data[2]
# NOTE(sheeprine): Quick and dirty sort (ensure result consistency,
# order is not significant to the test result)
if 'image.size' in stored_data[0]['usage']:
stored_data[0]['usage'], stored_data[1]['usage'] = (
stored_data[1]['usage'], stored_data[0]['usage'])
if 'image.size' in expected_data[0]['usage']:
expected_data[0]['usage'], expected_data[1]['usage'] = (
expected_data[1]['usage'], expected_data[0]['usage'])
self.assertEqual(
expected_data,
stored_data)
def test_create_session_on_append(self):
self.assertNotIn(self._tenant_id, self.storage._session)
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.append(working_data, self._tenant_id)
self.assertIn(self._tenant_id, self.storage._session)
self.assertIsInstance(
self.storage._session[self._tenant_id],
sqlalchemy.orm.session.Session)
def test_delete_session_on_commit(self):
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.append(working_data, self._tenant_id)
self.storage.commit(self._tenant_id)
self.assertNotIn(self._tenant_id, self.storage._session)
def test_update_period_on_append(self):
self.assertNotIn(self._tenant_id, self.storage.usage_start)
self.assertNotIn(self._tenant_id, self.storage.usage_start_dt)
self.assertNotIn(self._tenant_id, self.storage.usage_end)
self.assertNotIn(self._tenant_id, self.storage.usage_end_dt)
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.append([working_data[0]], self._tenant_id)
self.assertEqual(
self.storage.usage_start[self._tenant_id],
samples.FIRST_PERIOD_BEGIN)
self.assertEqual(
self.storage.usage_start_dt[self._tenant_id],
ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN))
self.assertEqual(
self.storage.usage_end[self._tenant_id],
samples.FIRST_PERIOD_END)
self.assertEqual(
self.storage.usage_end_dt[self._tenant_id],
ck_utils.ts2dt(samples.FIRST_PERIOD_END))
self.storage.append([working_data[1]], self._tenant_id)
self.assertEqual(
self.storage.usage_start[self._tenant_id],
samples.SECOND_PERIOD_BEGIN)
self.assertEqual(
self.storage.usage_start_dt[self._tenant_id],
ck_utils.ts2dt(samples.SECOND_PERIOD_BEGIN))
self.assertEqual(
self.storage.usage_end[self._tenant_id],
samples.SECOND_PERIOD_END)
self.assertEqual(
self.storage.usage_end_dt[self._tenant_id],
ck_utils.ts2dt(samples.SECOND_PERIOD_END))
def test_clear_period_info_on_commit(self):
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.append(working_data, self._tenant_id)
self.storage.commit(self._tenant_id)
self.assertNotIn(self._tenant_id, self.storage.usage_start)
self.assertNotIn(self._tenant_id, self.storage.usage_start_dt)
self.assertNotIn(self._tenant_id, self.storage.usage_end)
self.assertNotIn(self._tenant_id, self.storage.usage_end_dt)
StorageTest.generate_scenarios()
StorageTotalTest.generate_scenarios()
StorageTenantTest.generate_scenarios()
StorageDataframeTest.generate_scenarios()
StorageDataIntegrityTest.generate_scenarios()

View File

View File

@ -15,14 +15,15 @@
#
# @author: Luka Peschke
#
import mock
import testtools
from gnocchiclient import exceptions as gexc
from cloudkitty import storage
from cloudkitty import tests
from cloudkitty.tests import test_utils
from cloudkitty.tests.utils import is_functional_test
class BaseHybridStorageTest(tests.TestCase):
@ -31,9 +32,10 @@ class BaseHybridStorageTest(tests.TestCase):
def setUp(self):
super(BaseHybridStorageTest, self).setUp()
self.conf.set_override('backend', 'hybrid', 'storage')
self.conf.set_override('version', '1', 'storage')
self.storage = storage.get_storage(conf=test_utils.load_conf())
with mock.patch.object(
self.storage._hybrid_backend, 'init'):
self.storage.storage._hybrid_backend, 'init'):
self.storage.init()
@ -54,6 +56,7 @@ class PermissiveDict(object):
return self.value == other.get(self.key)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class HybridStorageTestGnocchi(BaseHybridStorageTest):
def setUp(self):
@ -63,14 +66,14 @@ class HybridStorageTestGnocchi(BaseHybridStorageTest):
super(HybridStorageTestGnocchi, self).tearDown()
def _init_storage(self, archive_policy=False, res_type=False):
with mock.patch.object(self.storage._hybrid_backend._conn,
with mock.patch.object(self.storage.storage._hybrid_backend._conn,
'archive_policy',
spec=['get', 'create']) as pol_mock:
if not archive_policy:
pol_mock.get.side_effect = gexc.ArchivePolicyNotFound
else:
pol_mock.create.side_effect = gexc.ArchivePolicyAlreadyExists
with mock.patch.object(self.storage._hybrid_backend._conn,
with mock.patch.object(self.storage.storage._hybrid_backend._conn,
'resource_type',
spec=['get', 'create']) as rtype_mock:
if not res_type:
@ -80,7 +83,8 @@ class HybridStorageTestGnocchi(BaseHybridStorageTest):
= gexc.ResourceTypeAlreadyExists
self.storage.init()
rtype_data = self.storage._hybrid_backend._resource_type_data
rtype_data = (self.storage.storage
._hybrid_backend._resource_type_data)
rtype_calls = list()
for val in rtype_data.values():
rtype_calls.append(
@ -91,18 +95,20 @@ class HybridStorageTestGnocchi(BaseHybridStorageTest):
rtype_mock.create.assert_has_calls(
rtype_calls, any_order=True)
pol_mock.get.assert_called_once_with(
self.storage._hybrid_backend._archive_policy_name)
self.storage.storage._hybrid_backend._archive_policy_name)
if archive_policy:
pol_mock.create.assert_not_called()
else:
apolicy = {
'name': self.storage._hybrid_backend._archive_policy_name,
'name': (self.storage.storage
._hybrid_backend._archive_policy_name),
'back_window': 0,
'aggregation_methods':
['std', 'count', 'min', 'max', 'sum', 'mean'],
}
apolicy['definition'] = \
self.storage._hybrid_backend._archive_policy_definition
apolicy['definition'] = (self.storage.storage
._hybrid_backend
._archive_policy_definition)
pol_mock.create.assert_called_once_with(apolicy)
def test_init_no_res_type_no_policy(self):

View File

@ -0,0 +1,276 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import copy
import testtools
import mock
import testscenarios
from cloudkitty import storage
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests import test_utils
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import utils as ck_utils
class StorageTest(tests.TestCase):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy')),
('hybrid', dict(storage_backend='hybrid'))]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(
cls.scenarios,
cls.storage_scenarios)
@mock.patch('cloudkitty.storage.v1.hybrid.backends.gnocchi.gclient')
@mock.patch('cloudkitty.utils.load_conf', new=test_utils.load_conf)
def setUp(self, gclient_mock):
super(StorageTest, self).setUp()
self._tenant_id = samples.TENANT
self._other_tenant_id = '8d3ae50089ea4142-9c6e1269db6a0b64'
self.conf.set_override('backend', self.storage_backend, 'storage')
self.conf.set_override('version', '1', 'storage')
self.storage = storage.get_storage(conf=test_utils.load_conf())
self.storage.init()
def insert_data(self):
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.push(working_data, self._tenant_id)
working_data = copy.deepcopy(samples.RATED_DATA)
self.storage.push(working_data, self._other_tenant_id)
def insert_different_data_two_tenants(self):
working_data = copy.deepcopy(samples.RATED_DATA)
del working_data[1]
self.storage.push(working_data, self._tenant_id)
working_data = copy.deepcopy(samples.RATED_DATA)
del working_data[0]
self.storage.push(working_data, self._other_tenant_id)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class StorageDataframeTest(StorageTest):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy'))]
# Queries
# Data
def test_get_no_frame_when_nothing_in_storage(self):
self.assertRaises(
storage.NoTimeFrame,
self.storage.retrieve,
begin=samples.FIRST_PERIOD_BEGIN - 3600,
end=samples.FIRST_PERIOD_BEGIN)
def test_get_frame_filter_outside_data(self):
self.insert_different_data_two_tenants()
self.assertRaises(
storage.NoTimeFrame,
self.storage.retrieve,
begin=samples.FIRST_PERIOD_BEGIN - 3600,
end=samples.FIRST_PERIOD_BEGIN)
def test_get_frame_without_filter_but_timestamp(self):
self.insert_different_data_two_tenants()
data = self.storage.retrieve(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.SECOND_PERIOD_END)['dataframes']
self.assertEqual(3, len(data))
def test_get_frame_on_one_period(self):
self.insert_different_data_two_tenants()
data = self.storage.retrieve(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.FIRST_PERIOD_END)['dataframes']
self.assertEqual(2, len(data))
def test_get_frame_on_one_period_and_one_tenant(self):
self.insert_different_data_two_tenants()
group_filters = {'project_id': self._tenant_id}
data = self.storage.retrieve(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.FIRST_PERIOD_END,
group_filters=group_filters)['dataframes']
self.assertEqual(2, len(data))
def test_get_frame_on_one_period_and_one_tenant_outside_data(self):
self.insert_different_data_two_tenants()
group_filters = {'project_id': self._other_tenant_id}
self.assertRaises(
storage.NoTimeFrame,
self.storage.retrieve,
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.FIRST_PERIOD_END,
group_filters=group_filters)
def test_get_frame_on_two_periods(self):
self.insert_different_data_two_tenants()
data = self.storage.retrieve(
begin=samples.FIRST_PERIOD_BEGIN,
end=samples.SECOND_PERIOD_END)['dataframes']
self.assertEqual(3, len(data))
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class StorageTotalTest(StorageTest):
storage_scenarios = [
('sqlalchemy', dict(storage_backend='sqlalchemy'))]
# Total
def test_get_empty_total(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN - 3600)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
self.insert_data()
total = self.storage.total(
begin=begin,
end=end)
self.assertEqual(1, len(total))
self.assertEqual(total[0]["rate"], 0)
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_without_filter_but_timestamp(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.total(
begin=begin,
end=end)
# FIXME(sheeprine): floating point error (transition to decimal)
self.assertEqual(1, len(total))
self.assertEqual(1.9473999999999998, total[0]["rate"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_one_period(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_END)
self.insert_data()
total = self.storage.total(
begin=begin,
end=end)
self.assertEqual(1, len(total))
self.assertEqual(1.1074, total[0]["rate"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_one_period_and_one_tenant(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_END)
self.insert_data()
group_filters = {'project_id': self._tenant_id}
total = self.storage.total(
begin=begin,
end=end,
group_filters=group_filters)
self.assertEqual(1, len(total))
self.assertEqual(0.5537, total[0]["rate"])
self.assertEqual(self._tenant_id, total[0]["tenant_id"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_service(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.FIRST_PERIOD_END)
self.insert_data()
total = self.storage.total(
begin=begin,
end=end,
metric_types='instance')
self.assertEqual(1, len(total))
self.assertEqual(0.84, total[0]["rate"])
self.assertEqual('instance', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
def test_get_total_groupby_tenant(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.total(
begin=begin,
end=end,
groupby=['project_id'])
self.assertEqual(2, len(total))
self.assertEqual(0.9737, total[0]["rate"])
self.assertEqual(self._other_tenant_id, total[0]["tenant_id"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
self.assertEqual(0.9737, total[1]["rate"])
self.assertEqual(self._tenant_id, total[1]["tenant_id"])
self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"])
def test_get_total_groupby_restype(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.total(
begin=begin,
end=end,
groupby=['type'])
self.assertEqual(2, len(total))
self.assertEqual(0.2674, total[0]["rate"])
self.assertEqual('image.size', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
self.assertEqual(1.68, total[1]["rate"])
self.assertEqual('instance', total[1]["res_type"])
self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"])
def test_get_total_groupby_tenant_and_restype(self):
begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)
end = ck_utils.ts2dt(samples.SECOND_PERIOD_END)
self.insert_data()
total = self.storage.total(
begin=begin,
end=end,
groupby=['project_id', 'type'])
self.assertEqual(4, len(total))
self.assertEqual(0.1337, total[0]["rate"])
self.assertEqual(self._other_tenant_id, total[0]["tenant_id"])
self.assertEqual('image.size', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
self.assertEqual(0.1337, total[1]["rate"])
self.assertEqual(self._tenant_id, total[1]["tenant_id"])
self.assertEqual('image.size', total[1]["res_type"])
self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"])
self.assertEqual(0.84, total[2]["rate"])
self.assertEqual(self._other_tenant_id, total[2]["tenant_id"])
self.assertEqual('instance', total[2]["res_type"])
self.assertEqual(begin, total[2]["begin"])
self.assertEqual(end, total[2]["end"])
self.assertEqual(0.84, total[3]["rate"])
self.assertEqual(self._tenant_id, total[3]["tenant_id"])
self.assertEqual('instance', total[3]["res_type"])
self.assertEqual(begin, total[3]["begin"])
self.assertEqual(end, total[3]["end"])
if not is_functional_test():
StorageTest.generate_scenarios()
StorageTotalTest.generate_scenarios()
StorageDataframeTest.generate_scenarios()

View File

View File

@ -0,0 +1,387 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
import copy
from datetime import datetime
import decimal
import fixtures
import testtools
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import uuidutils
from cloudkitty import storage
from cloudkitty.tests import samples
from cloudkitty import utils as ck_utils
CONF = None
def _init_conf():
global CONF
if not CONF:
CONF = cfg.CONF
CONF(args=[], project='cloudkitty',
validate_default_values=True,
default_config_files=['/etc/cloudkitty/cloudkitty.conf'])
def get_storage_data(min_length=10,
nb_projects=2,
project_ids=None,
start=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1)):
if isinstance(start, datetime):
start = ck_utils.dt2ts(start)
if isinstance(end, datetime):
end = ck_utils.dt2ts(end)
if not project_ids:
project_ids = [uuidutils.generate_uuid() for i in range(nb_projects)]
elif not isinstance(project_ids, list):
project_ids = [project_ids]
usage = {}
for metric_name, sample in samples.V2_STORAGE_SAMPLE.items():
dataframes = []
for project_id in project_ids:
data = [copy.deepcopy(sample)
# for i in range(min_length + random.randint(1, 10))]
for i in range(1)]
for elem in data:
elem['groupby']['id'] = uuidutils.generate_uuid()
elem['groupby']['project_id'] = project_id
dataframes += data
usage[metric_name] = dataframes
return {
'usage': usage,
'period': {
'begin': start,
'end': end
}
}
class BaseFunctionalStorageTest(testtools.TestCase):
# Name of the storage backend to test
storage_backend = None
storage_version = 0
@classmethod
def setUpClass(cls):
_init_conf()
cls._conf_fixture = config_fixture.Config(conf=CONF)
cls._conf_fixture.set_config_files(
['/etc.cloudkitty/cloudkitty.conf'])
cls.conf = cls._conf_fixture.conf
cls.conf.set_override('version', cls.storage_version, 'storage')
cls.conf.set_override('backend', cls.storage_backend, 'storage')
cls.storage = storage.get_storage()
cls.storage.init()
cls.project_ids, cls.data = cls.gen_data_separate_projects(3)
for i, project_data in enumerate(cls.data):
cls.storage.push(project_data, cls.project_ids[i])
# Appending data for the second tenant
data_next_period = copy.deepcopy(cls.data[0])
data_next_period['period']['begin'] += 3600
data_next_period['period']['end'] += 3600
cls.storage.push(data_next_period, cls.project_ids[0])
cls.project_ids.append(cls.project_ids[0])
cls.data.append(data_next_period)
cls.wait_for_backend()
@classmethod
def tearDownClass(cls):
cls.cleanup_backend()
# cls._conf_fixture.cleanUp()
# pass
def setUp(self):
super(BaseFunctionalStorageTest, self).setUp()
self.useFixture(fixtures.FakeLogger())
self.useFixture(self._conf_fixture)
def cleanUp(self):
super(BaseFunctionalStorageTest, self).cleanUp()
@classmethod
def wait_for_backend(cls):
"""Function waiting for the storage backend to be ready.
Ex: wait for gnocchi to have processed all metrics
"""
@classmethod
def cleanup_backend(cls):
"""Function deleting everything from the storage backend"""
@staticmethod
def gen_data_separate_projects(nb_projects):
project_ids = [uuidutils.generate_uuid() for i in range(nb_projects)]
data = [get_storage_data(
project_ids=project_ids[i], nb_projects=1)
for i in range(nb_projects)]
return project_ids, data
def test_get_retention(self):
retention = self.storage.get_retention().days * 24
self.assertEqual(retention, self.conf.storage.retention_period)
@staticmethod
def _validate_filters(comp, filters=None, group_filters=None):
if group_filters:
for k, v in group_filters.items():
if comp['groupby'].get(k) != v:
return False
if filters:
for k, v in filters.items():
if comp['metadata'].get(k) != v:
return False
return True
def _get_expected_total(self, begin=None, end=None,
filters=None, group_filters=None):
total = decimal.Decimal(0)
for dataframes in self.data:
if (ck_utils.ts2dt(dataframes['period']['begin']) >= end
or ck_utils.ts2dt(dataframes['period']['end']) <= begin):
continue
for df in dataframes['usage'].values():
for elem in df:
if self._validate_filters(elem, filters, group_filters):
total += elem['rating']['price']
return total
def _compare_totals(self, expected_total, total):
self.assertEqual(len(total), len(expected_total))
for i in range(len(total)):
self.assertEqual(
round(expected_total[i], 5),
round(decimal.Decimal(total[i]['rate']), 5),
)
def test_get_total_all_projects_on_time_window_with_data_no_grouping(self):
expected_total = self._get_expected_total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1))
total = self.storage.total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1))
self.assertEqual(len(total), 1)
self.assertEqual(
round(expected_total, 5),
round(decimal.Decimal(total[0]['rate']), 5),
)
def test_get_total_one_project_on_time_window_with_data_no_grouping(self):
group_filters = {'project_id': self.project_ids[0]}
expected_total = self._get_expected_total(
begin=datetime(2018, 1, 1), end=datetime(2018, 1, 1, 1),
group_filters=group_filters)
total = self.storage.total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1),
group_filters=group_filters)
self.assertEqual(len(total), 1)
self.assertEqual(
round(expected_total, 5),
round(decimal.Decimal(total[0]['rate']), 5),
)
def test_get_total_all_projects_window_with_data_group_by_project_id(self):
expected_total = []
for project_id in sorted(self.project_ids[:-1]):
group_filters = {'project_id': project_id}
expected_total.append(self._get_expected_total(
begin=datetime(2018, 1, 1), end=datetime(2018, 1, 1, 1),
group_filters=group_filters))
total = self.storage.total(begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1),
groupby=['project_id'])
total = sorted(total, key=lambda k: k['project_id'])
self._compare_totals(expected_total, total)
def test_get_total_one_project_window_with_data_group_by_resource_id(self):
expected_total = []
for df in self.data[0]['usage'].values():
expected_total += copy.deepcopy(df)
for df in self.data[-1]['usage'].values():
for df_elem in df:
for elem in expected_total:
if elem['groupby'] == df_elem['groupby']:
elem['rating']['price'] += df_elem['rating']['price']
expected_total = sorted(
expected_total, key=lambda k: k['groupby']['id'])
expected_total = [i['rating']['price'] for i in expected_total]
total = self.storage.total(
begin=datetime(2018, 1, 1), end=datetime(2018, 1, 1, 2),
group_filters={'project_id': self.project_ids[0]},
groupby=['id'])
total = sorted(total, key=lambda k: k['id'])
self._compare_totals(expected_total, total)
def test_get_total_all_projects_group_by_resource_id_project_id(self):
expected_total = []
for data in self.data[:-1]:
for df in data['usage'].values():
expected_total += copy.deepcopy(df)
for df in self.data[-1]['usage'].values():
for elem in df:
for total_elem in expected_total:
if total_elem['groupby'] == elem['groupby']:
total_elem['rating']['price'] \
+= elem['rating']['price']
expected_total = sorted(
expected_total, key=lambda k: k['groupby']['id'])
expected_total = [i['rating']['price'] for i in expected_total]
total = self.storage.total(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
groupby=['id', 'project_id'])
total = sorted(total, key=lambda k: k['id'])
self._compare_totals(expected_total, total)
def test_get_total_all_projects_group_by_resource_type(self):
expected_total = {}
for data in self.data:
for res_type, df in data['usage'].items():
if expected_total.get(res_type):
expected_total[res_type] += sum(
elem['rating']['price'] for elem in df)
else:
expected_total[res_type] = sum(
elem['rating']['price'] for elem in df)
expected_total = [
expected_total[key] for key in sorted(expected_total.keys())]
total = self.storage.total(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
groupby=['type'])
total = sorted(total, key=lambda k: k['type'])
self._compare_totals(expected_total, total)
def test_get_total_one_project_group_by_resource_type(self):
expected_total = {}
for res_type, df in self.data[0]['usage'].items():
expected_total[res_type] = sum(
elem['rating']['price'] for elem in df)
expected_total = [
expected_total[key] for key in sorted(expected_total.keys())]
group_filters = {'project_id': self.project_ids[0]}
total = self.storage.total(
begin=datetime(2018, 1, 1),
end=datetime(2018, 1, 1, 1),
group_filters=group_filters,
groupby=['type'])
total = sorted(total, key=lambda k: k['type'])
self._compare_totals(expected_total, total)
def test_get_total_no_data_period(self):
total = self.storage.total(
begin=datetime(2018, 2, 1), end=datetime(2018, 2, 1, 1))
self.assertEqual(0, len(total))
def test_retrieve_all_projects_with_data(self):
expected_length = sum(
len(data['usage'].values()) for data in self.data)
frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
limit=1000)
self.assertEqual(expected_length, frames['total'])
self.assertEqual(2, len(frames['dataframes']))
def test_retrieve_one_project_with_data(self):
expected_length = len(self.data[0]['usage'].values()) \
+ len(self.data[-1]['usage'].values())
group_filters = {'project_id': self.project_ids[0]}
frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
limit=1000)
self.assertEqual(expected_length, frames['total'])
self.assertEqual(2, len(frames['dataframes']))
for metric_type in self.data[0]['usage'].keys():
self.assertEqual(
len(frames['dataframes'][0]['usage'][metric_type]),
len(self.data[0]['usage'][metric_type]))
for metric_type in self.data[-1]['usage'].keys():
self.assertEqual(
len(frames['dataframes'][1]['usage'][metric_type]),
len(self.data[-1]['usage'][metric_type]))
def test_retrieve_pagination_one_project(self):
expected_length = len(self.data[0]['usage'].values()) \
+ len(self.data[-1]['usage'].values())
group_filters = {'project_id': self.project_ids[0]}
first_frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
limit=5)
last_frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
offset=5,
limit=1000)
all_frames = self.storage.retrieve(
begin=datetime(2018, 1, 1),
end=datetime(2018, 2, 1),
group_filters=group_filters,
paginate=False)
self.assertEqual(expected_length, first_frames['total'])
self.assertEqual(expected_length, last_frames['total'])
real_length = 0
paginated_measures = []
for frame in first_frames['dataframes'] + last_frames['dataframes']:
for measures in frame['usage'].values():
real_length += len(measures)
paginated_measures += measures
paginated_measures = sorted(
paginated_measures, key=lambda x: x['groupby']['id'])
all_measures = []
for frame in all_frames['dataframes']:
for measures in frame['usage'].values():
all_measures += measures
all_measures = sorted(
all_measures, key=lambda x: x['groupby']['id'])
self.assertEqual(expected_length, real_length)
self.assertEqual(paginated_measures, all_measures)

View File

@ -0,0 +1,72 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
import testtools
from time import sleep
from gnocchiclient import exceptions as gexceptions
from oslo_log import log
from cloudkitty.tests.storage.v2 import base_functional
from cloudkitty.tests.utils import is_functional_test
LOG = log.getLogger(__name__)
@testtools.skipUnless(is_functional_test(), 'Test is not a functional test')
class GnocchiBaseFunctionalStorageTest(
base_functional.BaseFunctionalStorageTest):
storage_backend = 'gnocchi'
storage_version = 2
def setUp(self):
super(GnocchiBaseFunctionalStorageTest, self).setUp()
self.conf.import_group(
'storage_gnocchi', 'cloudkitty.storage.v2.gnocchi')
@classmethod
def _get_status(cls):
status = cls.storage._conn.status.get()
return status['storage']['summary']['measures']
@classmethod
def wait_for_backend(cls):
while True:
status = cls._get_status()
if status == 0:
break
LOG.info('Waiting for gnocchi to have processed all measures, {} '
'left.'.format(status))
sleep(1)
@classmethod
def cleanup_backend(cls):
for res_type in cls.storage._get_ck_resource_types():
batch_query = {">=": {"started_at": "1970-01-01T01:00:00"}}
cls.storage._conn.resource.batch_delete(
batch_query, resource_type=res_type)
try:
cls.storage._conn.resource_type.delete(res_type)
except gexceptions.BadRequest:
pass
try:
cls.storage._conn.archive_policy.delete(
'cloudkitty_archive_policy')
except gexceptions.BadRequest:
pass

View File

@ -13,11 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
#
import testtools
from cloudkitty.common import config as ck_config
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class ConfigTest(tests.TestCase):
def test_config(self):
ck_config.list_opts()

View File

@ -13,6 +13,7 @@
# under the License.
import sys
import testtools
import textwrap
import ddt
@ -21,8 +22,10 @@ import pep8
from cloudkitty.hacking import checks
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
@ddt.ddt
class HackingTestCase(tests.TestCase):
"""Hacking test cases

View File

@ -17,6 +17,7 @@
#
import copy
import decimal
import testtools
import mock
from oslo_utils import uuidutils
@ -24,6 +25,8 @@ from oslo_utils import uuidutils
from cloudkitty.rating import hash
from cloudkitty.rating.hash.db import api
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
TEST_TS = 1388577600
FAKE_UUID = '6c1b8a30-797f-4b7e-ad66-9879b79059fb'
@ -81,6 +84,7 @@ CK_RESOURCES_DATA = [{
"unit": "instance"}}]}}]
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class HashMapRatingTest(tests.TestCase):
def setUp(self):
super(HashMapRatingTest, self).setUp()

View File

@ -15,6 +15,7 @@
#
# @author: Stéphane Albert
#
import testtools
import unittest
import mock
@ -22,6 +23,7 @@ from oslo_utils import uuidutils
from cloudkitty.fetcher import keystone
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
class FakeRole(object):
@ -67,6 +69,7 @@ def Client(**kwargs):
return FakeKeystoneClient(**kwargs)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class KeystoneFetcherTest(tests.TestCase):
def setUp(self):
super(KeystoneFetcherTest, self).setUp()

View File

@ -15,12 +15,15 @@
#
# @author: Stéphane Albert
#
import testtools
import mock
from oslo_messaging import conffixture
from stevedore import extension
from cloudkitty import orchestrator
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
class FakeKeystoneClient(object):
@ -33,11 +36,13 @@ class FakeKeystoneClient(object):
tenants = FakeTenants()
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class OrchestratorTest(tests.TestCase):
def setUp(self):
super(OrchestratorTest, self).setUp()
messaging_conf = self.useFixture(conffixture.ConfFixture(self.conf))
messaging_conf.transport_url = 'fake:/'
self.conf.set_override('backend', 'keystone', 'fetcher')
self.conf.import_group('keystone_fetcher',
'cloudkitty.fetcher.keystone')

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import testtools
from oslo_config import cfg
from oslo_config import fixture as config_fixture
@ -21,11 +22,14 @@ from oslo_policy import policy as oslo_policy
from cloudkitty.common import policy
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import utils
CONF = cfg.CONF
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PolicyFileTestCase(tests.TestCase):
def setUp(self):
@ -57,6 +61,7 @@ class PolicyFileTestCase(tests.TestCase):
self.context, action, self.target)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PolicyTestCase(tests.TestCase):
def setUp(self):

View File

@ -18,6 +18,7 @@
import copy
import decimal
import hashlib
import testtools
import zlib
import mock
@ -27,6 +28,8 @@ import six
from cloudkitty.rating import pyscripts
from cloudkitty.rating.pyscripts.db import api
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
FAKE_UUID = '6c1b8a30-797f-4b7e-ad66-9879b79059fb'
CK_RESOURCES_DATA = [{
@ -103,6 +106,7 @@ for period in data:
""".encode('utf-8')
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class PyScriptsRatingTest(tests.TestCase):
def setUp(self):
super(PyScriptsRatingTest, self).setUp()

View File

@ -15,10 +15,13 @@
#
# @author: Stéphane Albert
#
import testtools
import mock
from cloudkitty.db import api as ck_db_api
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
class FakeRPCClient(object):
@ -39,6 +42,7 @@ class FakeRPCClient(object):
self._queue.append(cast_data)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class RatingTest(tests.TestCase):
def setUp(self):
super(RatingTest, self).setUp()

View File

@ -16,11 +16,14 @@
# @author: Gauvain Pocentek
#
import datetime
import testtools
from cloudkitty import state
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class DBStateManagerTest(tests.TestCase):
def setUp(self):
super(DBStateManagerTest, self).setUp()

View File

@ -19,12 +19,14 @@ import datetime
import decimal
import fractions
import itertools
import testtools
import unittest
import mock
from oslo_utils import timeutils
from cloudkitty.tests.samples import DEFAULT_METRICS_CONF
from cloudkitty.tests.utils import is_functional_test
from cloudkitty import utils as ck_utils
@ -32,6 +34,7 @@ def iso2dt(iso_str):
return timeutils.parse_isotime(iso_str)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class UtilsTimeCalculationsTest(unittest.TestCase):
def setUp(self):
self.date_ts = 1416219015
@ -142,6 +145,7 @@ class UtilsTimeCalculationsTest(unittest.TestCase):
self.assertEqual(calc_dt, check_dt)
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class ConvertUnitTest(unittest.TestCase):
"""Class testing the convert_unit and num2decimal function"""
possible_args = [

View File

@ -16,10 +16,12 @@
# @author: Stéphane Albert
#
import copy
import testtools
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests import transformers as t_transformers
from cloudkitty.tests.utils import is_functional_test
TRANS_METADATA = {
'availability_zone': 'nova',
@ -30,6 +32,7 @@ TRANS_METADATA = {
'vcpus': '1'}
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class TransformerBaseTest(tests.TestCase):
def test_strip_resource_on_dict(self):
metadata = copy.deepcopy(samples.COMPUTE_METADATA)

View File

@ -16,10 +16,13 @@
# @author: Stéphane Albert
#
import copy
import testtools
from cloudkitty import tests
from cloudkitty.tests.utils import is_functional_test
from cloudkitty.transformer import gnocchi
GNOCCHI_COMPUTE = {
'id': '2f58a438-3169-11e6-b36c-bfe1fa3241fe',
'project_id': '4480c638-3169-11e6-91de-a3bd3a7d3afb',
@ -86,6 +89,7 @@ TRANS_NETWORK = {
'metrics': {}}
@testtools.skipIf(is_functional_test(), 'Not a functional test')
class GnocchiTransformerTest(tests.TestCase):
def test_strip_gnocchi_compute(self):
resource = copy.deepcopy(GNOCCHI_COMPUTE)

22
cloudkitty/tests/utils.py Normal file
View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Luka Peschke
#
from os import getenv
def is_functional_test():
return getenv('TEST_FUNCTIONAL', False)

View File

@ -15,12 +15,15 @@
#
# @author: Stéphane Albert
#
import copy
from oslo_config import cfg
from oslo_utils import fileutils
from stevedore import named
from cloudkitty import state
from cloudkitty import storage
from cloudkitty import storage_state
from cloudkitty import utils as ck_utils
CONF = cfg.CONF
@ -42,6 +45,7 @@ class WriteOrchestrator(object):
self._backend = backend
self._tenant_id = tenant_id
self._storage = storage
self._storage_state = storage_state.StateManager()
self._basepath = basepath
if self._basepath:
fileutils.ensure_tree(self._basepath)
@ -99,9 +103,16 @@ class WriteOrchestrator(object):
if not timeframe_end:
timeframe_end = timeframe + self._period
try:
data = self._storage.get_time_frame(timeframe,
timeframe_end,
tenant_id=self._tenant_id)
group_filters = {'project_id': self._tenant_id}
data = self._storage.retrieve(begin=timeframe,
end=timeframe_end,
group_filters=group_filters,
paginate=False)
for df in data['dataframes']:
for service, resources in df['usage'].items():
for resource in resources:
resource['desc'] = copy.deepcopy(resource['metadata'])
resource['desc'].update(resource['groupby'])
except storage.NoTimeFrame:
return None
return data
@ -112,8 +123,8 @@ class WriteOrchestrator(object):
def _push_data(self):
data = self.get_timeframe(self.usage_start, self.usage_end)
if data:
for timeframe in data:
if data and data['total'] > 0:
for timeframe in data['dataframes']:
self._dispatch(timeframe['usage'])
return True
else:
@ -125,7 +136,7 @@ class WriteOrchestrator(object):
def reset_state(self):
self._load_state_manager_data()
self.usage_end = self._storage.get_state()
self.usage_end = self._storage_state.get_state()
self._update_state_manager_data()
def restart_month(self):
@ -136,7 +147,7 @@ class WriteOrchestrator(object):
def process(self):
self._load_state_manager_data()
storage_state = self._storage.get_state()
storage_state = self._storage_state.get_state(self._tenant_id)
if not self.usage_start:
self.usage_start = storage_state
self.usage_end = self.usage_start + self._period
@ -145,5 +156,5 @@ class WriteOrchestrator(object):
self._commit_data()
self._update_state_manager_data()
self._load_state_manager_data()
storage_state = self._storage.get_state()
storage_state = self._storage_state.get_state(self._tenant_id)
self.close()

View File

@ -119,7 +119,7 @@ class CSVMapped(csv_base.BaseCSVBackend):
"""Context dependent product name translation.
"""
if context == 'compute':
if context == 'compute' or context == 'instance':
return 'Nova Computing'
else:
return context
@ -128,7 +128,7 @@ class CSVMapped(csv_base.BaseCSVBackend):
"""Context dependent operation translation.
"""
if context == 'compute':
if context == 'compute' or context == 'instance':
return 'RunInstances'
def _trans_res_id(self, context, report_data):

View File

@ -175,6 +175,7 @@ function configure_cloudkitty {
# storage
iniset $CLOUDKITTY_CONF storage backend $CLOUDKITTY_STORAGE_BACKEND
iniset $CLOUDKITTY_CONF storage version $CLOUDKITTY_STORAGE_VERSION
if [ "$CLOUDKITTY_STORAGE_BACKEND" != "sqlalchemy" ]; then
iniset $CLOUDKITTY_CONF storage_${CLOUDKITTY_STORAGE_BACKEND} auth_section authinfos
fi

View File

@ -47,6 +47,7 @@ CLOUDKITTY_METRICS_CONF=metrics.yml
# Set CloudKitty storage info
CLOUDKITTY_STORAGE_BACKEND=${CLOUDKITTY_STORAGE_BACKEND:-"sqlalchemy"}
CLOUDKITTY_STORAGE_VERSION=${CLOUDKITTY_STORAGE_VERSION:-"1"}
# Set CloudKitty output info
CLOUDKITTY_OUTPUT_BACKEND=${CLOUDKITTY_OUTPUT_BACKEND:-"cloudkitty.backend.file.FileBackend"}

View File

@ -0,0 +1,86 @@
#
#"context_is_admin": "role:admin"
#
#"admin_or_owner": "is_admin:True or tenant:%(tenant_id)s"
#
#"default": ""
# Return the list of every services mapped to a collector.
# LIST /v1/collector/mappings
#"collector:list_mappings": "role:admin"
# Return a service to collector mapping.
# GET /v1/collector/mappings/{service_id}
#"collector:get_mapping": "role:admin"
# Manage a service to collector mapping.
# POST /v1/collector/mappings
# DELETE /v1/collector/mappings/{service_id}
#"collector:manage_mapping": "role:admin"
# Query the enable state of a collector.
# GET /v1/collector/states/{collector_id}
#"collector:get_state": "role:admin"
# Set the enable state of a collector.
# PUT /v1/collector/states/{collector_id}
#"collector:update_state": "role:admin"
# List available services information in Cloudkitty.
# LIST /v1/info/services
#"info:list_services_info": ""
# Get specified service information.
# GET /v1/info/services/{metric_id}
#"info:get_service_info": ""
# List available metrics information in Cloudkitty.
# LIST /v1/info/metrics
#"info:list_metrics_info": ""
# Get specified metric information.
# GET /v1/info/metrics/{metric_id}
#"info:get_metric_info": ""
# Get current configuration in Cloudkitty.
# GET /v1/info/config
#"info:get_config": ""
# Reture the list of loaded modules in Cloudkitty.
# LIST /v1/rating/modules
#"rating:list_modules": "role:admin"
# Get specified module.
# GET /v1/rating/modules/{module_id}
#"rating:get_module": "role:admin"
# Change the state and priority of a module.
# PUT /v1/rating/modules/{module_id}
#"rating:update_module": "role:admin"
# Get an instant quote based on multiple resource descriptions.
# POST /v1/rating/quote
#"rating:quote": ""
# Trigger a rating module list reload.
# GET /v1/rating/reload_modules
#"rating:module_config": "role:admin"
# Return the list of rated tenants.
# GET /v1/report/tenants
#"report:list_tenants": "role:admin"
# Return the summary to pay for a given period.
# GET /v1/report/summary
#"report:get_summary": "rule:admin_or_owner"
# Return the amount to pay for a given period.
# GET /v1/report/total
#"report:get_total": "rule:admin_or_owner"
# Return a list of rated resources for a time period and a tenant.
# GET /v1/storage/dataframes
#"storage:list_data_frames": ""

View File

@ -81,13 +81,17 @@ The following shows the basic configuration items:
The tenant named ``service`` is also commonly called ``services``
It is now time to configure the storage backend. Two storage backends are
available: ``sqlalchemy`` and ``hybrid`` (which will soon become the v2
storage).
available: ``sqlalchemy`` and ``hybrid`` (SQLalchemy being the recommended one).
.. warning:: A v2 backend storage is also available. Whether its implementation
nor its API are considered stable yet, and it will evolve during
the Stein cycle. It is available for development purposes only.
.. code-block:: ini
[storage]
backend = hybrid
backend = sqlalchemy
version = 1
As you will see in the following example, collector and storage backends
sometimes need additional configuration sections. (The tenant fetcher works the

View File

@ -0,0 +1,8 @@
=======================
Developer Documentation
=======================
.. toctree::
:glob:
storage

View File

@ -0,0 +1,61 @@
====================
Storage backend (v2)
====================
.. warning:: This backend is considered unstable and should be used for upstream
development only.
In order to implement a storage backend for cloudkitty, you'll have to implement
the following abstract class:
.. autoclass:: cloudkitty.storage.v2.BaseStorage
:members:
You'll then need to register an entrypoint corresponding to your storage backend
in the ``cloudkitty.storage.v2.backends`` section of the ``setup.cfg`` file.
Testing
=======
There is a generic test class for v2 storage backends. It allows to run a
functional test suite against a new v2 storage backend.
.. code:: shell
$ tree cloudkitty/tests/storage/v2
cloudkitty/tests/storage/v2
├── base_functional.py
├── __init__.py
└── test_gnocchi_functional.py
In order to use the class, add a file called ``test_mybackend_functional.py``
to the ``cloudkitty/tests/storage/v2`` directory. You will then need to write a
class inheriting from ``BaseFunctionalStorageTest``. Specify the storage version
and the backend name as class attributes
Example:
.. code:: python
import testtools
from cloudkitty.tests.storage.v2 import base_functional
from cloudkitty.tests.utils import is_functional_test
@testtools.skipUnless(is_functional_test(), 'Test is not a functional test')
class GnocchiBaseFunctionalStorageTest(
base_functional.BaseFunctionalStorageTest):
storage_backend = 'gnocchi'
storage_version = 2
Two methods need to be implemented:
* ``wait_for_backend``. This method is called once data has been once
dataframes have been pushed to the storage backend (in gnocchi's case, it
waits for all measures to have been processed). It is a classmethod.
* ``cleanup_backend``: This method is called at the end of the test suite in
order to delete all data from the storage backend. It is a classmethod.

View File

@ -16,6 +16,7 @@ to prices.
install/index
configuration/index
admin/index
developer/index
devstack
arch
api

View File

@ -65,12 +65,15 @@ cloudkitty.rating.processors =
hashmap = cloudkitty.rating.hash:HashMap
pyscripts = cloudkitty.rating.pyscripts:PyScripts
cloudkitty.storage.backends =
sqlalchemy = cloudkitty.storage.sqlalchemy:SQLAlchemyStorage
hybrid = cloudkitty.storage.hybrid:HybridStorage
cloudkitty.storage.v1.backends =
sqlalchemy = cloudkitty.storage.v1.sqlalchemy:SQLAlchemyStorage
hybrid = cloudkitty.storage.v1.hybrid:HybridStorage
cloudkitty.storage.v2.backends =
gnocchi = cloudkitty.storage.v2.gnocchi:GnocchiStorage
cloudkitty.storage.hybrid.backends =
gnocchi = cloudkitty.storage.hybrid.backends.gnocchi:GnocchiStorage
gnocchi = cloudkitty.storage.v1.hybrid.backends.gnocchi:GnocchiStorage
cloudkitty.output.writers =
osrf = cloudkitty.writer.osrf:OSRFBackend

View File

@ -68,3 +68,10 @@ local-check-factory = cloudkitty.hacking.checks.factory
[testenv:releasenotes]
basepython = python3
commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:functional]
basepython = python3
setenv = TEST_FUNCTIONAL = 1
# Some tests do push and remove data from the storage backend, so this is done
# in order to keep data consistency
commands = stestr run --concurrency 1 {posargs}