Improve metrics configuration

This patch provides a refactoring of the metric
configuration model (and file description) to
improve genericity, maintainability and usage for
non-openstack deployment.

The new metric yaml format is defined in the
attached story task and is validated on load with
voluptuous.

Now, a processor is dedicated to one collector and
one storage backend. Thus, collector and storage
configuration go back to the cloudkitty oslo conf.

Collectors have been refactored to have a code as similar as possible,
in order to ease comprehension for new contributors.

Story: 2001883
Task: 14354
Task: 14355
Task: 14431

Change-Id: I948dd9cd5c113bdaa4e49c532354938ffb45f0e7
This commit is contained in:
Luka Peschke 2018-04-17 14:49:34 +02:00 committed by Maxime Cottret
parent 5035de30a8
commit 059a940392
34 changed files with 894 additions and 808 deletions

View File

@ -20,6 +20,7 @@ from oslo_log import log as logging
import pecan
from pecan import rest
import six
import voluptuous
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
@ -36,37 +37,45 @@ CONF = cfg.CONF
def get_all_metrics():
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
METADATA = collector.get_metrics_based_collector_metadata()
if 'metrics' not in METRICS_CONF:
try:
metrics_conf = collector.validate_conf(
ck_utils.load_conf(CONF.collect.metrics_conf))
except (voluptuous.Invalid, voluptuous.MultipleInvalid):
msg = 'Invalid endpoint: no metrics in current configuration.'
pecan.abort(405, msg)
policy.authorize(pecan.request.context, 'info:list_metrics_info', {})
metrics_info_list = []
for metric, metadata in METADATA.items():
info = metadata.copy()
info['metric_id'] = metric
for metric_name, metric in metrics_conf.items():
info = metric.copy()
info['metric_id'] = info['alt_name']
metrics_info_list.append(
info_models.CloudkittyMetricInfo(**info))
return info_models.CloudkittyMetricInfoCollection(
metrics=metrics_info_list)
def _find_metric(name, conf):
for metric_name, metric in conf.items():
if metric['alt_name'] == name:
return metric
def get_one_metric(metric_name):
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
METADATA = collector.get_metrics_based_collector_metadata()
if 'metrics' not in METRICS_CONF:
try:
metrics_conf = collector.validate_conf(
ck_utils.load_conf(CONF.collect.metrics_conf))
except (voluptuous.Invalid, voluptuous.MultipleInvalid):
msg = 'Invalid endpoint: no metrics in current configuration.'
pecan.abort(405, msg)
policy.authorize(pecan.request.context, 'info:get_metric_info', {})
try:
info = METADATA[metric_name].copy()
info['metric_id'] = metric_name
return info_models.CloudkittyMetricInfo(**info)
except KeyError:
metric = _find_metric(metric_name, metrics_conf)
if not metric:
pecan.abort(404, six.text_type(metric_name))
info = metric.copy()
info['metric_id'] = info['alt_name']
return info_models.CloudkittyMetricInfo(**info)
class MetricInfoController(rest.RestController):
@ -131,4 +140,4 @@ class InfoController(rest.RestController):
def config(self):
"""Return current configuration."""
policy.authorize(pecan.request.context, 'info:get_config', {})
return ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
return ck_utils.load_conf(CONF.collect.metrics_conf)

View File

@ -18,29 +18,14 @@
from oslo_config import cfg
from wsme import types as wtypes
from cloudkitty.default_metrics_conf import DEFAULT_METRICS_CONF
from cloudkitty import utils as ck_utils
CONF = cfg.CONF
def get_metrics_list():
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
try:
metrics = list(metrics_conf['metrics'].keys())
cloudkitty_metrics = wtypes.Enum(wtypes.text, *metrics)
except KeyError:
metrics = list(DEFAULT_METRICS_CONF['metrics'].keys())
cloudkitty_metrics = wtypes.Enum(wtypes.text, *metrics)
return cloudkitty_metrics
class CloudkittyMetricInfo(wtypes.Base):
"""Type describing a metric info in CloudKitty."""
metric_id = get_metrics_list()
metric_id = wtypes.text
"""Name of the metric."""
metadata = [wtypes.text]

View File

@ -28,16 +28,7 @@ LOG = log.getLogger(__name__)
CONF = cfg.CONF
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
try:
SERVICE_NAMES = list(METRICS_CONF['metrics'].keys())
except KeyError:
LOG.error("No metrics specified in YAML configuration, "
"CloudKitty won't work as expected")
SERVICE_NAMES = ['compute', 'image']
CLOUDKITTY_SERVICES = wtypes.Enum(wtypes.text, *SERVICE_NAMES)
METRICS_CONF = ck_utils.load_conf(CONF.collect.metrics_conf)
class CloudkittyResource(wtypes.Base):
@ -45,7 +36,7 @@ class CloudkittyResource(wtypes.Base):
"""
service = CLOUDKITTY_SERVICES
service = wtypes.text
"""Name of the service."""
# FIXME(sheeprine): values should be dynamic

View File

@ -16,42 +16,105 @@
# @author: Stéphane Albert
#
import abc
import fractions
from oslo_config import cfg
import six
from stevedore import driver
from voluptuous import All
from voluptuous import Any
from voluptuous import Coerce
from voluptuous import In
from voluptuous import Invalid
from voluptuous import Length
from voluptuous import Optional
from voluptuous import Required
from voluptuous import Schema
from cloudkitty import transformer
from cloudkitty import utils as ck_utils
collect_opts = [
cfg.StrOpt('collector',
default='gnocchi',
help='Data collector.'),
cfg.IntOpt('period',
default=3600,
help='Rating period in seconds.'),
cfg.IntOpt('wait_periods',
default=2,
help='Wait for N periods before collecting new data.'),
cfg.StrOpt('metrics_conf',
default='/etc/cloudkitty/metrics.yml',
help='Metrology configuration file.'),
]
CONF = cfg.CONF
CONF.register_opts(collect_opts, 'collect')
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
def MetricDict(value):
if isinstance(value, dict) and len(value.keys()) > 0:
return value
raise Invalid("Not a dict with at least one key")
CONF_BASE_SCHEMA = {Required('metrics'): MetricDict}
METRIC_BASE_SCHEMA = {
# Display unit
Required('unit'): All(str, Length(min=1)),
# Factor for unit converion
Required('factor', default=1):
Any(int, float, Coerce(fractions.Fraction)),
# Offset for unit conversion
Required('offset', default=0):
# [int, float, fractions.Fraction],
Any(int, float, Coerce(fractions.Fraction)),
# Name to be used in dataframes, and used for service creation in hashmap
# module. Defaults to the name of the metric
Optional('alt_name'): All(str, Length(min=1)),
# This is what metrics are grouped by on collection.
Required('groupby', default=list): [
All(str, Length(min=1))
],
# Available in HashMap
Required('metadata', default=list): [
All(str, Length(min=1))
],
# Mutate collected value. May be any of (NONE, NUMBOOL, FLOOR, CEIL).
# Defaults to NONE
Required('mutate', default='NONE'):
In(['NONE', 'NUMBOOL', 'FLOOR', 'CEIL']),
# Collector-specific args. Should be overriden by schema provided for
# the given collector
Optional('extra_args'): dict,
}
def get_collector(transformers=None):
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
metrics_conf = ck_utils.load_conf(CONF.collect.metrics_conf)
if not transformers:
transformers = transformer.get_transformers()
collector_args = {
'period': metrics_conf.get('period', 3600),
'period': CONF.collect.period,
'transformers': transformers,
}
collector_args.update({'conf': metrics_conf})
return driver.DriverManager(
COLLECTORS_NAMESPACE,
metrics_conf.get('collector', 'gnocchi'),
CONF.collect.collector,
invoke_on_load=True,
invoke_kwds=collector_args).driver
def get_collector_without_invoke():
"""Return the collector without invoke it."""
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
return driver.DriverManager(
COLLECTORS_NAMESPACE,
metrics_conf.get('collector', 'gnocchi'),
CONF.collect.collector,
invoke_on_load=False
).driver
@ -61,14 +124,15 @@ def get_metrics_based_collector_metadata():
Results are based on enabled collector and metrics in CONF.
"""
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
metrics_conf = ck_utils.load_conf(CONF.collect.metrics_conf)
transformers = transformer.get_transformers()
collector = get_collector_without_invoke()
metadata = {}
if 'metrics' in metrics_conf:
for metric in metrics_conf.get('metrics', {}):
metadata[metric] = collector.get_metadata(
metric,
for metric_name, metric in metrics_conf.get('metrics', {}).items():
alt_name = metric.get('alt_name', metric_name)
metadata[alt_name] = collector.get_metadata(
metric_name,
transformers,
metrics_conf,
)
@ -102,17 +166,18 @@ class NoDataCollected(Exception):
@six.add_metaclass(abc.ABCMeta)
class BaseCollector(object):
collector_name = None
dependencies = []
dependencies = ['CloudKittyFormatTransformer']
def __init__(self, transformers, **kwargs):
try:
self.transformers = transformers
self.period = kwargs['period']
self.conf = kwargs['conf']
except IndexError as e:
self.conf = self.check_configuration(kwargs['conf'])
except KeyError as e:
raise ValueError("Missing argument (%s)" % e)
self._check_transformers()
self.t_cloudkitty = self.transformers['CloudKittyFormatTransformer']
def _check_transformers(self):
"""Check for transformer prerequisites
@ -123,6 +188,13 @@ class BaseCollector(object):
raise TransformerDependencyError(self.collector_name,
dependency)
@staticmethod
def check_configuration(self, conf):
"""Check metrics configuration
"""
return Schema(METRIC_BASE_SCHEMA)(conf)
@staticmethod
def last_month():
month_start = ck_utils.get_month_start()
@ -152,16 +224,35 @@ class BaseCollector(object):
"""
return {"metadata": [], "unit": "undefined"}
def retrieve(self,
resource,
start,
end=None,
project_id=None,
q_filter=None):
trans_resource = self._res_to_func(resource)
if not hasattr(self, trans_resource):
raise NotImplementedError(
"No method found in collector '%s' for resource '%s'."
% (self.collector_name, resource))
func = getattr(self, trans_resource)
return func(resource, start, end, project_id, q_filter)
@abc.abstractmethod
def fetch_all(self, metric_name, start, end,
project_id=None, q_filter=None):
pass
def retrieve(self, metric_name, start, end,
project_id=None, q_filter=None):
data = self.fetch_all(
metric_name,
start,
end,
project_id,
q_filter=q_filter,
)
name = self.conf[metric_name].get('alt_name', metric_name)
if data:
data = self.t_cloudkitty.format_service(name, data)
if not data:
raise NoDataCollected(self.collector_name, name)
return data
def validate_conf(conf):
"""Validates the provided configuration."""
collector = get_collector_without_invoke()
output = collector.check_configuration(conf)
for metric_name, metric in output.items():
if 'alt_name' not in metric.keys():
metric['alt_name'] = metric_name
return output

View File

@ -13,13 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
#
import decimal
from gnocchiclient import auth as gauth
from gnocchiclient import client as gclient
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log as logging
from voluptuous import All
from voluptuous import In
from voluptuous import Length
from voluptuous import Required
from voluptuous import Schema
from cloudkitty import collector
from cloudkitty import utils as ck_utils
@ -65,10 +68,25 @@ ks_loading.register_auth_conf_options(
CONF = cfg.CONF
GNOCCHI_EXTRA_SCHEMA = {
Required('extra_args'): {
Required('resource_type'): All(str, Length(min=1)),
# Due to Gnocchi model, metric are grouped by resource.
# This parameter permits to adapt the key of the resource identifier
Required('resource_key', default='id'): All(str, Length(min=1)),
# This is needed to allow filtering on the project for the Openstack
# usecase.
# NOTE(MCO): maybe be removed in following releases
Required('scope_key', default='project_id'): All(str, Length(min=1)),
Required('aggregation_method', default='max'):
In(['max', 'mean', 'min']),
},
}
class GnocchiCollector(collector.BaseCollector):
collector_name = 'gnocchi'
dependencies = ('GnocchiTransformer',
'CloudKittyFormatTransformer')
def __init__(self, transformers, **kwargs):
super(GnocchiCollector, self).__init__(transformers, **kwargs)
@ -94,14 +112,34 @@ class GnocchiCollector(collector.BaseCollector):
adapter_options=adapter_options,
)
@staticmethod
def check_configuration(conf):
"""Check metrics configuration
"""
conf = Schema(collector.CONF_BASE_SCHEMA)(conf)
metric_schema = Schema(collector.METRIC_BASE_SCHEMA).extend(
GNOCCHI_EXTRA_SCHEMA)
output = dict()
for metric_name, metric in conf['metrics'].items():
output[metric_name] = metric_schema(metric)
output[metric_name]['groupby'].append(
output[metric_name]['extra_args']['resource_key']
)
return output
@classmethod
def get_metadata(cls, resource_name, transformers, conf):
info = super(GnocchiCollector, cls).get_metadata(resource_name,
transformers)
try:
info["metadata"].extend(transformers['GnocchiTransformer']
.get_metadata(resource_name))
info['unit'] = conf['metrics'][resource_name]['unit']
info["metadata"].extend(
conf[resource_name]['groupby']
).extend(
conf[resource_name]['metadata']
)
info['unit'] = conf[resource_name]['unit']
except KeyError:
pass
return info
@ -154,38 +192,43 @@ class GnocchiCollector(collector.BaseCollector):
self.gen_filter(cop="<=", started_at=end))
return time_filter
def _expand(self, metrics, resource, name, aggregate, start, end):
try:
values = self._conn.metric.get_measures(
metric=metrics[name],
start=ck_utils.ts2dt(start),
stop=ck_utils.ts2dt(end),
aggregation=aggregate)
# NOTE(sheeprine): Get the list of values for the current
# metric and get the first result value.
# [point_date, granularity, value]
# ["2015-11-24T00:00:00+00:00", 86400.0, 64.0]
resource[name] = values[0][2]
except (IndexError, KeyError):
resource[name] = 0
def _expand_metrics(self, resources, mappings, start, end, resource_name):
for resource in resources:
metrics = resource.get('metrics', {})
self._expand(
metrics,
resource,
resource_name,
mappings,
start,
end,
)
def get_resources(self, resource_name, start, end,
project_id, q_filter=None):
def _fetch_resources(self, metric_name, start, end,
project_id=None, q_filter=None):
"""Get resources during the timeframe.
:param resource_name: Resource name to filter on.
:type metric_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
:param project_id: Filter on a specific tenant/project.
:type project_id: str
:param q_filter: Append a custom filter.
:type q_filter: list
"""
# Get gnocchi specific conf
extra_args = self.conf[metric_name]['extra_args']
# Build query
query_parameters = self._generate_time_filter(start, end)
resource_type = extra_args['resource_type']
query_parameters.append(
self.gen_filter(cop="=", type=resource_type))
if project_id:
kwargs = {extra_args['scope_key']: project_id}
query_parameters.append(self.gen_filter(**kwargs))
if q_filter:
query_parameters.append(q_filter)
resources = self._conn.resource.search(
resource_type=resource_type,
query=self.extend_filter(*query_parameters))
return {res[extra_args['resource_key']]: res for res in resources}
def _fetch_metric(self, metric_name, start, end,
project_id=None, q_filter=None):
"""Get metric during the timeframe.
:param metric_name: metric name to filter on.
:type resource_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
@ -194,89 +237,94 @@ class GnocchiCollector(collector.BaseCollector):
:param q_filter: Append a custom filter.
:type q_filter: list
"""
# NOTE(sheeprine): We first get the list of every resource running
# without any details or history.
# Then we get information about the resource getting details and
# history.
# Translating the resource name if needed
query_parameters = self._generate_time_filter(start, end)
# Get gnocchi specific conf
extra_args = self.conf[metric_name]['extra_args']
resource_type = self.conf['metrics'][resource_name]['resource']
# get ressource type
resource_type = extra_args['resource_type']
# build search query using ressource type and project_id if provided
query_parameters = list()
query_parameters.append(
self.gen_filter(cop="=", type=resource_type))
query_parameters.append(
self.gen_filter(project_id=project_id))
if project_id:
kwargs = {extra_args['scope_key']: project_id}
query_parameters.append(self.gen_filter(**kwargs))
if q_filter:
query_parameters.append(q_filter)
resources = self._conn.resource.search(
# build aggregration operation
op = ["aggregate", extra_args['aggregation_method'],
["metric", metric_name, extra_args['aggregation_method']]]
# get groupby
groupby = self.conf[metric_name]['groupby']
return self._conn.aggregates.fetch(
op,
resource_type=resource_type,
query=self.extend_filter(*query_parameters))
return resources
start=ck_utils.ts2dt(start),
stop=ck_utils.ts2dt(end),
groupby=groupby,
search=self.extend_filter(*query_parameters))
def resource_info(self, resource_name, start, end,
project_id, q_filter=None):
met = self.conf['metrics'][resource_name]
unit = met['unit']
qty = 1 if met.get('countable_unit') else met['resource']
def _format_data(self, metconf, data, resources_info=None):
"""Formats gnocchi data to CK data.
resources = self.get_resources(
resource_name,
Returns metadata, groupby and qty
"""
groupby = data['group']
# if resource info is provided, add additional
# metadata as defined in the conf
metadata = dict()
if resources_info:
resource = resources_info[
groupby[metconf['extra_args']['resource_key']]]
for i in metconf['metadata']:
metadata[i] = resource.get(i, '')
qty = data['measures']['measures']['aggregated'][0][2]
converted_qty = ck_utils.convert_unit(
qty, metconf['factor'], metconf['offset'])
mutated_qty = ck_utils.mutate(converted_qty, metconf['mutate'])
return metadata, groupby, mutated_qty
def fetch_all(self, metric_name, start, end,
project_id=None, q_filter=None):
met = self.conf[metric_name]
data = self._fetch_metric(
metric_name,
start,
end,
project_id=project_id,
q_filter=q_filter,
)
formated_resources = list()
for resource in resources:
resource_data = self.t_gnocchi.strip_resource_data(
resource_name, resource)
mapp = self.conf['metrics'][resource_name]['aggregation_method']
self._expand_metrics(
[resource_data],
mapp,
resources_info = None
if met['metadata']:
resources_info = self._fetch_resources(
metric_name,
start,
end,
resource_name,
project_id=project_id,
q_filter=q_filter
)
resource_data.pop('metrics', None)
# Unit conversion
if isinstance(qty, str):
resource_data[resource_name] = ck_utils.convert_unit(
resource_data[resource_name],
self.conf['metrics'][resource_name].get('factor', 1),
self.conf['metrics'][resource_name].get('offset', 0),
formated_resources = list()
for d in data:
# Only if aggregates have been found
if d['measures']['measures']['aggregated']:
metadata, groupby, qty = self._format_data(
met, d, resources_info)
data = self.t_cloudkitty.format_item(
groupby,
metadata,
met['unit'],
qty=qty,
)
val = qty if isinstance(qty, int) else resource_data[resource_name]
data = self.t_cloudkitty.format_item(
resource_data,
unit,
decimal.Decimal(val)
)
# NOTE(sheeprine): Reference to gnocchi resource used by storage
data['resource_id'] = data['desc']['resource_id']
formated_resources.append(data)
formated_resources.append(data)
return formated_resources
def retrieve(self, resource_name, start, end,
project_id, q_filter=None):
resources = self.resource_info(
resource_name,
start,
end,
project_id,
q_filter=q_filter,
)
if not resources:
raise collector.NoDataCollected(self.collector_name, resource_name)
return self.t_cloudkitty.format_service(resource_name, resources)

View File

@ -15,13 +15,16 @@
#
# @author: Luka Peschke
#
import decimal
from keystoneauth1 import loading as ks_loading
from keystoneclient.v3 import client as ks_client
from monascaclient import client as mclient
from oslo_config import cfg
from oslo_log import log as logging
from voluptuous import All
from voluptuous import In
from voluptuous import Length
from voluptuous import Required
from voluptuous import Schema
from cloudkitty import collector
from cloudkitty import transformer
@ -43,7 +46,22 @@ ks_loading.register_auth_conf_options(
COLLECTOR_MONASCA_OPTS)
CONF = cfg.CONF
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
METRICS_CONF = ck_utils.load_conf(CONF.collect.metrics_conf)
MONASCA_EXTRA_SCHEMA = {
Required('extra_args'): {
# Key corresponding to the resource id in a metric's dimensions
# Allows to adapt the resource identifier. Should not need to be
# modified in a standard OpenStack installation
Required('resource_key', default='resource_id'):
All(str, Length(min=1)),
# This is needed to allow filtering on the project for the Openstack
# usecase. May be removed in following releases
Required('scope_key', default='project_id'): All(str, Length(min=1)),
Required('aggregation_method', default='max'):
In(['max', 'mean', 'min']),
},
}
class EndpointNotFound(Exception):
@ -53,13 +71,24 @@ class EndpointNotFound(Exception):
class MonascaCollector(collector.BaseCollector):
collector_name = 'monasca'
dependencies = ['CloudKittyFormatTransformer']
@staticmethod
def check_configuration(conf):
"""Check metrics configuration
"""
conf = Schema(collector.CONF_BASE_SCHEMA)(conf)
metric_schema = Schema(collector.METRIC_BASE_SCHEMA).extend(
MONASCA_EXTRA_SCHEMA)
output = dict()
for metric_name, metric in conf['metrics'].items():
output[metric_name] = metric_schema(metric)
return output
def __init__(self, transformers, **kwargs):
super(MonascaCollector, self).__init__(transformers, **kwargs)
self.t_cloudkitty = self.transformers['CloudKittyFormatTransformer']
self.auth = ks_loading.load_auth_from_conf_options(
CONF,
COLLECTOR_MONASCA_OPTS)
@ -90,28 +119,13 @@ class MonascaCollector(collector.BaseCollector):
return endpoint.url
return None
def _get_metadata(self, resource_type, transformers, conf):
def _get_metadata(self, metric_name, transformers, conf):
info = {}
info['unit'] = conf['metrics'][resource_type]['unit']
info['unit'] = conf['metrics'][metric_name]['unit']
start = ck_utils.dt2ts(ck_utils.get_month_start())
end = ck_utils.dt2ts(ck_utils.get_month_end())
try:
resource = self.active_resources(
resource_type,
start,
end,
None,
)[0]
except IndexError:
resource = {}
info['metadata'] = resource.get('dimensions', {}).keys()
service_metrics = METRICS_CONF['services_metrics'][resource_type]
for service_metric in service_metrics:
metric, statistics = list(service_metric.items())[0]
info['metadata'].append(metric)
dimension_names = self._conn.metric.list_dimension_names(
metric_name=metric_name)
info['metadata'] = [d['dimension_name'] for d in dimension_names]
return info
# NOTE(lukapeschke) if anyone sees a better way to do this,
@ -124,144 +138,124 @@ class MonascaCollector(collector.BaseCollector):
tmp = cls(**args)
return tmp._get_metadata(resource_type, transformers, conf)
def _get_resource_metadata(self, resource_type, start,
end, resource_id, conf):
meter = conf['metrics'][resource_type]['resource']
if not meter:
return {}
measurements = self._conn.metrics.list_measurements(
name=meter,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
merge_metrics=True,
dimensions={'resource_id': resource_id},
)
try:
# Getting the last measurement of given period
metadata = measurements[-1]['measurements'][-1][2]
except (KeyError, IndexError):
metadata = {}
return metadata
def _get_resource_qty(self, meter, start, end, resource_id, statistics):
# NOTE(lukapeschke) the period trick is used to aggregate
# the measurements
period = end - start
statistics = self._conn.metrics.list_statistics(
name=meter,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
dimensions={'resource_id': resource_id},
statistics=statistics,
period=period,
merge_metrics=True,
)
try:
# If several statistics are returned (should not happen),
# use the latest
qty = decimal.Decimal(statistics[-1]['statistics'][-1][1])
except (KeyError, IndexError):
qty = decimal.Decimal(0)
return qty
def _is_resource_active(self, meter, resource_id, start, end):
measurements = self._conn.metrics.list_measurements(
name=meter,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
group_by='resource_id',
merge_metrics=True,
dimensions={'resource_id': resource_id},
)
return len(measurements) > 0
def active_resources(self, resource_type, start,
end, project_id, conf, **kwargs):
meter = conf['metrics'][resource_type]['resource']
if not meter:
return {}
def _get_dimensions(self, metric_name, project_id, q_filter):
extra_args = self.conf[metric_name]['extra_args']
dimensions = {}
if project_id:
dimensions['project_id'] = project_id
dimensions.update(kwargs)
resources = self._conn.metrics.list(name=meter, dimensions=dimensions)
output = []
for resource in resources:
try:
resource_id = resource['dimensions']['resource_id']
if (resource_id not in
[item['dimensions']['resource_id'] for item in output]
and self._is_resource_active(meter, resource_id,
start, end)):
output.append(resource)
except KeyError:
continue
return output
dimensions[extra_args['scope_key']] = project_id
if q_filter:
dimensions.update(q_filter)
return dimensions
def _expand_metrics(self, resource, resource_id,
mappings, start, end, resource_type):
for mapping in mappings:
name, statistics = list(mapping.items())[0]
qty = self._get_resource_qty(
name,
start,
end,
resource_id,
statistics,
)
def _fetch_measures(self, metric_name, start, end,
project_id=None, q_filter=None):
"""Get measures for given metric during the timeframe.
conv_data = METRICS_CONF['metrics'][resource_type].get(name)
if conv_data:
resource[name] = ck_utils.convert_unit(
qty,
conv_data.get('factor', 1),
conv_data.get('offset', 0),
)
:param metric_name: metric name to filter on.
:type metric_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
:param project_id: Filter on a specific tenant/project.
:type project_id: str
:param q_filter: Append a custom filter.
:type q_filter: list
"""
def resource_info(self, resource_type, start, end,
project_id, q_filter=None):
met = self.conf['metrics'][resource_type]
unit = met['unit']
qty = 1 if met.get('countable_unit') else met['resource']
dimensions = self._get_dimensions(metric_name, project_id, q_filter)
group_by = self.conf[metric_name]['groupby']
# NOTE(lpeschke): One aggregated measure per collect period
period = end - start
active_resources = self.active_resources(
resource_type, start, end, project_id
extra_args = self.conf[metric_name]['extra_args']
return self._conn.metrics.list_statistics(
name=metric_name,
merge_metrics=True,
dimensions=dimensions,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
period=period,
statistics=extra_args['aggregation_method'],
group_by=group_by)
def _fetch_metrics(self, metric_name, start, end,
project_id=None, q_filter=None):
"""List active metrics during the timeframe.
:param metric_name: metric name to filter on.
:type metric_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
:param project_id: Filter on a specific tenant/project.
:type project_id: str
:param q_filter: Append a custom filter.
:type q_filter: list
"""
dimensions = self._get_dimensions(metric_name, project_id, q_filter)
metrics = self._conn.metrics.list(
name=metric_name,
dimensions=dimensions,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
)
resource_data = []
for resource in active_resources:
resource_id = resource['dimensions']['resource_id']
data = resource['dimensions']
mappings = (
resource_type,
METRICS_CONF['metrics'][resource_type]['aggregation_method'],
)
resource_key = self.conf[metric_name]['extra_args']['resource_key']
self._expand_metrics(
data,
resource_id,
mappings,
return {metric['dimensions'][resource_key]:
metric['dimensions'] for metric in metrics}
def _format_data(self, metconf, data, resources_info=None):
"""Formats Monasca data to CK data.
Returns metadata, groupby and qty
"""
groupby = data['dimensions']
resource_key = metconf['extra_args']['resource_key']
metadata = dict()
if resources_info:
resource = resources_info[groupby[resource_key]]
for i in metconf['metadata']:
metadata[i] = resource.get(i, '')
qty = data['statistics'][0][1]
converted_qty = ck_utils.convert_unit(
qty, metconf['factor'], metconf['offset'])
mutated_qty = ck_utils.mutate(converted_qty, metconf['mutate'])
return metadata, groupby, mutated_qty
def fetch_all(self, metric_name, start, end,
project_id=None, q_filter=None):
met = self.conf[metric_name]
data = self._fetch_measures(
metric_name,
start,
end,
project_id=project_id,
q_filter=q_filter,
)
resources_info = None
if met['metadata']:
resources_info = self._fetch_metrics(
metric_name,
start,
end,
resource_type,
project_id=project_id,
q_filter=q_filter,
)
resource_qty = qty
if not (isinstance(qty, int) or isinstance(qty, decimal.Decimal)):
resource_qty = METRICS_CONF['services_objects'][resource_type]
resource_qty = data[resource_qty]
resource = self.t_cloudkitty.format_item(data, unit, resource_qty)
resource['desc']['resource_id'] = resource_id
resource['resource_id'] = resource_id
resource_data.append(resource)
return resource_data
def retrieve(self, resource_type, start, end, project_id, q_filter=None):
resources = self.resource_info(resource_type, start, end,
project_id=project_id,
q_filter=q_filter)
if not resources:
raise collector.NoDataCollected(self.collector_name, resource_type)
return self.t_cloudkitty.format_service(resource_type, resources)
formated_resources = list()
for d in data:
if len(d['statistics']):
metadata, groupby, qty = self._format_data(
met, d, resources_info)
data = self.t_cloudkitty.format_item(
groupby,
metadata,
met['unit'],
qty=qty,
)
formated_resources.append(data)
return formated_resources

View File

@ -22,6 +22,7 @@ import cloudkitty.collector.monasca
import cloudkitty.config
import cloudkitty.fetcher
import cloudkitty.fetcher.keystone
import cloudkitty.fetcher.source
import cloudkitty.orchestrator
import cloudkitty.service
import cloudkitty.storage
@ -34,7 +35,7 @@ _opts = [
('api', list(itertools.chain(
cloudkitty.api.app.api_opts,))),
('collect', list(itertools.chain(
cloudkitty.utils.collect_opts))),
cloudkitty.collector.collect_opts))),
('collector_monasca', list(itertools.chain(
cloudkitty.collector.monasca.collector_monasca_opts))),
('gnocchi_collector', list(itertools.chain(
@ -43,6 +44,8 @@ _opts = [
('keystone_fetcher', list(itertools.chain(
cloudkitty.fetcher.keystone.keystone_fetcher_opts,
cloudkitty.fetcher.keystone.keystone_common_opts))),
('source_fetcher', list(itertools.chain(
cloudkitty.fetcher.source.source_fetcher_opts))),
('orchestrator', list(itertools.chain(
cloudkitty.orchestrator.orchestrator_opts))),
('output', list(itertools.chain(
@ -50,7 +53,7 @@ _opts = [
('state', list(itertools.chain(
cloudkitty.config.state_opts))),
('storage', list(itertools.chain(
cloudkitty.utils.storage_opts))),
cloudkitty.storage.storage_opts))),
('storage_gnocchi', list(itertools.chain(
cloudkitty.storage.hybrid.backends.gnocchi.gnocchi_storage_opts))),
('fetcher', list(itertools.chain(

View File

@ -1,124 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Martin CAMEY
#
DEFAULT_METRICS_CONF = {
'name': 'OpenStack',
'fetcher': 'keystone',
'collector': 'gnocchi',
'period': 3600,
'wait_periods': 2,
'window': 1800,
'services_objects': {
'compute': 'instance',
'volume': 'volume',
'network.bw.out': 'instance_network_interface',
'network.bw.in': 'instance_network_interface',
'network.floating': 'network',
'image': 'image',
'radosgw.usage': 'ceph_account',
},
'metrics': {
'vcpus': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'memory': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'cpu': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'disk.root.size': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'disk.ephemeral.size': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'image.size': {
'resource': 'image',
'unit': 'MiB',
'factor': 1 / 1048576,
'aggregation_method': 'max',
},
'image.download': {
'resource': 'image',
'unit': 'MiB',
'factor': 1 / 1048576,
'aggregation_method': 'max',
},
'image.serve': {
'resource': 'image',
'unit': 'MiB',
'factor': 1 / 1048576,
'aggregation_method': 'max',
},
'volume.size': {
'resource': 'volume',
'unit': 'GiB',
'factor': 1,
'aggregation_method': 'max',
},
'network.outgoing.bytes': {
'resource': 'instance_network_interface',
'unit': 'MB',
'factor': 1 / 1000000,
'aggregation_method': 'max',
},
'network.incoming.bytes': {
'resource': 'instance_network_interface',
'unit': 'MB',
'factor': 1 / 1000000,
'aggregation_method': 'max',
},
'ip.floating': {
'resource': 'network',
'unit': 'ip',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'radosgw.objects.size': {
'resource': 'ceph_account',
'unit': 'GiB',
'factor': 1 / 1073741824,
'aggregation_method': 'max',
},
},
}

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -21,13 +20,15 @@ import abc
from oslo_config import cfg
import six
FETCHER_OPTS = 'fetcher'
DEPRECATED_FETCHER_OPTS = 'tenant_fetcher'
fetchers_opts = [
cfg.StrOpt('backend',
default='keystone',
help='Driver used to fetch tenant list.')
help='Driver used to fetch tenant list.',
deprecated_group=DEPRECATED_FETCHER_OPTS)
]
cfg.CONF.register_opts(fetchers_opts, 'tenant_fetcher')
cfg.CONF.register_opts(fetchers_opts, 'fetcher')
@six.add_metaclass(abc.ABCMeta)

View File

@ -61,18 +61,18 @@ class KeystoneFetcher(fetcher.BaseFetcher):
session=self.session,
auth_url=self.auth.auth_url)
def get_tenants(self, conf=None):
def get_tenants(self):
keystone_version = discover.normalize_version_number(
CONF.keystone_fetcher.keystone_version)
auth_dispatch = {(3,): ('project', 'projects', 'list'),
(2,): ('tenant', 'tenants', 'roles_for_user')}
for auth_version, auth_version_mapping in auth_dispatch.items():
if discover.version_match(auth_version, keystone_version):
return self._do_get_tenants(auth_version_mapping, conf)
return self._do_get_tenants(auth_version_mapping)
msg = "Keystone version you've specified is not supported"
raise exceptions.VersionNotAvailable(msg)
def _do_get_tenants(self, auth_version_mapping, conf):
def _do_get_tenants(self, auth_version_mapping):
tenant_attr, tenants_attr, role_func = auth_version_mapping
tenant_list = getattr(self.admin_ks, tenants_attr).list()
my_user_id = self.session.get_user_id()
@ -82,8 +82,4 @@ class KeystoneFetcher(fetcher.BaseFetcher):
tenant_attr: tenant})
if 'rating' not in [role.name for role in roles]:
tenant_list.remove(tenant)
if conf:
res = [{'tenant_id': tenant.id} for tenant in tenant_list]
for tenant in res:
tenant.update(conf)
return res
return [tenant.id for tenant in tenant_list]

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -16,22 +15,25 @@
#
# @author: Martin CAMEY
#
import hashlib
from oslo_config import cfg
from cloudkitty import fetcher
SOURCE_FETCHER_OPTS = 'source_fetcher'
source_fetcher_opts = [
cfg.ListOpt('sources',
default=list(),
help='list of source identifiers'), ]
cfg.CONF.register_opts(source_fetcher_opts, SOURCE_FETCHER_OPTS)
CONF = cfg.CONF
class SourceFetcher(fetcher.BaseFetcher):
"""Source projects fetcher."""
name = 'source'
def get_projects(self, conf=None):
if conf:
tmp = hashlib.md5()
tmp.update(conf['name'])
conf['tenant_id'] = tmp.hexdigest()
return [conf]
def get_tenants(self, conf=None):
return self.get_projects(conf=conf)
def get_tenants(self):
return CONF.source_fetcher.sources

View File

@ -41,7 +41,6 @@ eventlet.monkey_patch()
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('backend', 'cloudkitty.fetcher', 'tenant_fetcher')
orchestrator_opts = [
cfg.StrOpt('coordination_url',
@ -51,9 +50,9 @@ orchestrator_opts = [
]
CONF.register_opts(orchestrator_opts, group='orchestrator')
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
CONF.import_opt('backend', 'cloudkitty.fetcher', 'fetcher')
FETCHERS_NAMESPACE = 'cloudkitty.tenant.fetchers'
FETCHERS_NAMESPACE = 'cloudkitty.fetchers'
PROCESSORS_NAMESPACE = 'cloudkitty.rating.processors'
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
@ -153,13 +152,13 @@ class APIWorker(BaseWorker):
class Worker(BaseWorker):
def __init__(self, collector, storage, tenant):
def __init__(self, collector, storage, tenant_id):
self._collector = collector
self._storage = storage
self._period = tenant['period']
self._wait_time = tenant['wait_periods'] * self._period
self._tenant_id = tenant['tenant_id']
self.conf = tenant
self._period = CONF.collect.period
self._wait_time = CONF.collect.wait_periods * self._period
self._tenant_id = tenant_id
self._conf = ck_utils.load_conf(CONF.collect.metrics_conf)
super(Worker, self).__init__(self._tenant_id)
@ -182,7 +181,7 @@ class Worker(BaseWorker):
timestamp = self._storage.get_state(self._tenant_id)
return ck_utils.check_time_state(timestamp,
self._period,
self._wait_time)
CONF.collect.wait_periods)
def run(self):
while True:
@ -190,7 +189,7 @@ class Worker(BaseWorker):
if not timestamp:
break
metrics = list(self.conf['metrics'].keys())
metrics = list(self._conf['metrics'].keys())
for metric in metrics:
try:
@ -225,8 +224,8 @@ class Orchestrator(object):
def __init__(self):
self.fetcher = driver.DriverManager(
FETCHERS_NAMESPACE,
METRICS_CONF['fetcher'],
invoke_on_load=True
CONF.fetcher.backend,
invoke_on_load=True,
).driver
transformers = transformer.get_transformers()
@ -258,11 +257,11 @@ class Orchestrator(object):
self.server = messaging.get_server(target, endpoints)
self.server.start()
def _check_state(self, tenant_id, period, wait_time):
def _check_state(self, tenant_id):
timestamp = self.storage.get_state(tenant_id)
return ck_utils.check_time_state(timestamp,
period,
wait_time)
CONF.collect.period,
CONF.collect.wait_periods)
def process_messages(self):
# TODO(sheeprine): Code kept to handle threading and asynchronous
@ -273,36 +272,31 @@ class Orchestrator(object):
def process(self):
while True:
self.tenants = self.fetcher.get_tenants(METRICS_CONF)
self.tenants = self.fetcher.get_tenants()
random.shuffle(self.tenants)
LOG.info('Tenants loaded for fetcher %s', self.fetcher.name)
for tenant in self.tenants:
lock = self._lock(tenant['tenant_id'])
for tenant_id in self.tenants:
lock = self._lock(tenant_id)
if lock.acquire(blocking=False):
state = self._check_state(
tenant['tenant_id'],
tenant['period'],
tenant['wait_periods'],
)
if not state:
self.tenants.remove(tenant)
else:
state = self._check_state(tenant_id)
if state:
worker = Worker(
self.collector,
self.storage,
tenant,
tenant_id,
)
worker.run()
lock.release()
self.coord.heartbeat()
# NOTE(sheeprine): Slow down looping if all tenants are
# being processed
eventlet.sleep(1)
# FIXME(sheeprine): We may cause a drift here
eventlet.sleep(tenant['period'])
eventlet.sleep(CONF.collect.period)
def terminate(self):
self.coord.stop()

View File

@ -25,21 +25,28 @@ from stevedore import driver
from cloudkitty import utils as ck_utils
storage_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
help='Name of the storage backend driver.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# NOTE(mc): This hack is possible because only
# one OpenStack configuration is allowed.
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
CONF.import_opt('period', 'cloudkitty.collector', 'collect')
CONF.register_opts(storage_opts, 'storage')
STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
def get_storage():
def get_storage(**kwargs):
storage_args = {
'period': METRICS_CONF.get('period', 3600),
'period': CONF.collect.period,
}
storage_args.update(kwargs)
backend = driver.DriverManager(
STORAGES_NAMESPACE,
cfg.CONF.storage.backend,

View File

@ -27,8 +27,8 @@ from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from cloudkitty.collector import validate_conf
from cloudkitty.storage.hybrid.backends import BaseHybridBackend
from cloudkitty.transformer import gnocchi as gtransformer
import cloudkitty.utils as ck_utils
@ -36,10 +36,6 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# NOTE(mc): This hack is possible because only
# one OpenStack configuration is allowed.
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
CONF.import_opt('period', 'cloudkitty.collector', 'collect')
GNOCCHI_STORAGE_OPTS = 'storage_gnocchi'
@ -53,7 +49,7 @@ gnocchi_storage_opts = [
# The archive policy definition MUST include the collect period granularity
cfg.StrOpt('archive_policy_definition',
default='[{"granularity": '
+ six.text_type(METRICS_CONF.get('period', 3600)) +
+ six.text_type(CONF.collect.period) +
', "timespan": "90 days"}, '
'{"granularity": 86400, "timespan": "360 days"}, '
'{"granularity": 2592000, "timespan": "1800 days"}]',
@ -68,6 +64,7 @@ ks_loading.register_auth_conf_options(
GNOCCHI_STORAGE_OPTS)
RESOURCE_TYPE_NAME_ROOT = 'rating_service_'
METADATA_NAME_ROOT = 'ckmeta_'
class DecimalJSONEncoder(json.JSONEncoder):
@ -92,34 +89,27 @@ class GnocchiStorage(BaseHybridBackend):
"""
# NOTE(lukapeschke): List taken directly from gnocchi code
invalid_attribute_names = [
"id", "type", "metrics",
"revision", "revision_start", "revision_end",
"started_at", "ended_at",
"user_id", "project_id",
"created_by_user_id", "created_by_project_id", "get_metric",
"creator",
]
groupby_keys = ['res_type', 'tenant_id']
groupby_values = ['type', 'project_id']
def _init_resource_types(self):
transformer = gtransformer.GnocchiTransformer()
for metric in list(self.conf['metrics'].keys()):
for metric_name, metric in self.conf.items():
metric_dict = dict()
metric_dict['attributes'] = list()
for attribute in transformer.get_metadata(metric):
if attribute not in self.invalid_attribute_names:
metric_dict['attributes'].append(attribute)
metric_dict['required_attributes'] = [
'resource_id',
'unit',
]
metric_dict['name'] = RESOURCE_TYPE_NAME_ROOT + metric
metric_dict['qty_metric'] = 1
if self.conf['metrics'][metric].get('countable_unit'):
resource = self.conf['metrics'][metric]['resource']
metric_dict['qty_metric'] = resource
self._resource_type_data[metric] = metric_dict
for attribute in metric.get('metadata', {}):
metric_dict['attributes'].append(
METADATA_NAME_ROOT + attribute)
metric_dict['required_attributes'] = ['unit', 'resource_id']
for attribute in metric['groupby']:
metric_dict['required_attributes'].append(
METADATA_NAME_ROOT + attribute)
metric_dict['name'] = RESOURCE_TYPE_NAME_ROOT + metric['alt_name']
if metric['mutate'] == 'NUMBOOL':
metric_dict['qty_metric'] = 1
else:
metric_dict['qty_metric'] = metric_name
self._resource_type_data[metric['alt_name']] = metric_dict
def _get_res_type_dict(self, res_type):
res_type_data = self._resource_type_data.get(res_type, None)
@ -148,16 +138,19 @@ class GnocchiStorage(BaseHybridBackend):
"Unknown resource type '{}'".format(res_type))
res_dict = {
'id': data['resource_id'],
'resource_id': data['resource_id'],
'id': data['id'],
'resource_id': data['id'],
'project_id': tenant_id,
'user_id': data['user_id'],
'user_id': 'cloudkitty',
'unit': data['unit'],
}
for attr in res_type_data['attributes']:
res_dict[attr] = data.get(attr, None) or 'None'
if isinstance(res_dict[attr], decimal.Decimal):
res_dict[attr] = float(res_dict[attr])
for key in ['attributes', 'required_attributes']:
for attr in res_type_data[key]:
if METADATA_NAME_ROOT in attr:
res_dict[attr] = data.get(
attr.replace(METADATA_NAME_ROOT, ''), None) or ''
if isinstance(res_dict[attr], decimal.Decimal):
res_dict[attr] = float(res_dict[attr])
created_metrics = [
self._conn.metric.create({
@ -224,7 +217,9 @@ class GnocchiStorage(BaseHybridBackend):
def __init__(self, **kwargs):
super(GnocchiStorage, self).__init__(**kwargs)
self.conf = kwargs['conf'] if 'conf' in kwargs else METRICS_CONF
conf = kwargs.get('conf') or ck_utils.load_conf(
CONF.collect.metrics_conf)
self.conf = validate_conf(conf)
self.auth = ks_loading.load_auth_from_conf_options(
CONF,
GNOCCHI_STORAGE_OPTS)
@ -241,9 +236,7 @@ class GnocchiStorage(BaseHybridBackend):
CONF.storage_gnocchi.archive_policy_name)
self._archive_policy_definition = json.loads(
CONF.storage_gnocchi.archive_policy_definition)
self._period = self.conf['period']
if "period" in kwargs:
self._period = kwargs["period"]
self._period = kwargs.get('period') or CONF.collect.period
self._measurements = dict()
self._resource_type_data = dict()
self._init_resource_types()
@ -288,21 +281,57 @@ class GnocchiStorage(BaseHybridBackend):
def get_total(self, begin=None, end=None, tenant_id=None,
service=None, groupby=None):
# Query can't be None if we don't specify a resource_id
query = {}
query = {'and': [{
'like': {'type': RESOURCE_TYPE_NAME_ROOT + '%'},
}]}
if tenant_id:
query['='] = {"project_id": tenant_id}
measures = self._conn.metric.aggregation(
metrics='price', query=query,
start=begin, stop=end,
aggregation='sum',
granularity=self._period,
needed_overlap=0)
rate = sum(measure[2] for measure in measures) if len(measures) else 0
return [{
'begin': begin,
'end': end,
'rate': rate,
}]
query['and'].append({'=': {'project_id': tenant_id}})
gb = []
if groupby:
for elem in groupby.split(','):
if elem in self.groupby_keys:
gb.append(self.groupby_values[
self.groupby_keys.index(elem)])
# Setting gb to None instead of an empty list
gb = gb if len(gb) > 0 else None
# build aggregration operation
op = ['aggregate', 'sum', ['metric', 'price', 'sum']]
try:
aggregates = self._conn.aggregates.fetch(
op,
start=begin,
stop=end,
groupby=gb,
search=query)
# No 'price' metric found
except gexceptions.BadRequest:
return [dict(begin=begin, end=end, rate=0)]
# In case no group_by was specified
if not isinstance(aggregates, list):
aggregates = [aggregates]
total_list = list()
for aggregate in aggregates:
if groupby:
measures = aggregate['measures']['measures']['aggregated']
else:
measures = aggregate['measures']['aggregated']
if len(measures) > 0:
rate = sum(measure[2] for measure in measures
if (measure[1] == self._period))
total = dict(begin=begin, end=end, rate=rate)
if gb:
for value in gb:
key = self.groupby_keys[
self.groupby_values.index(value)]
total[key] = aggregate['group'][value].replace(
RESOURCE_TYPE_NAME_ROOT, '')
total_list.append(total)
return total_list
def _append_measurements(self, resource, data, tenant_id):
if not self._measurements.get(tenant_id, None):
@ -322,7 +351,7 @@ class GnocchiStorage(BaseHybridBackend):
def append_time_frame(self, res_type, frame, tenant_id):
flat_frame = ck_utils.flat_dict(frame)
resource = self._find_resource(res_type, flat_frame['resource_id'])
resource = self._find_resource(res_type, flat_frame['id'])
if not resource:
resource = self._create_resource(res_type, tenant_id, flat_frame)
self._append_measurements(resource, flat_frame, tenant_id)
@ -441,7 +470,8 @@ class GnocchiStorage(BaseHybridBackend):
resource_type, resource_measures['group']['id'])
if not resource:
continue
desc = {a: resource.get(a, None) for a in attributes}
desc = {attr.replace(METADATA_NAME_ROOT, ''):
resource.get(attr, None) for attr in attributes}
formatted_frame = self._format_frame(
resource_type, resource, desc, measure, tenant_id)
output.append(formatted_frame)

View File

@ -43,6 +43,7 @@ from cloudkitty import rating
from cloudkitty import storage
from cloudkitty.storage.sqlalchemy import models
from cloudkitty import tests
from cloudkitty.tests import test_utils
from cloudkitty import utils as ck_utils
@ -278,7 +279,7 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
"begin": begin,
"end": end},
"usage": {
"compute": [
"cpu": [
{
"desc": {
"dummy": True,
@ -292,7 +293,7 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
"begin": begin,
"end": end},
"usage": {
"image": [
"image.size": [
{
"desc": {
"dummy": True,
@ -313,7 +314,7 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
return_value=dict())
with auth:
with session:
self.storage = storage.get_storage()
self.storage = storage.get_storage(conf=test_utils.load_conf())
self.storage.init()
self.initialize_data()
@ -391,14 +392,14 @@ class MetricsConfFixture(fixture.GabbiFixture):
"""Inject Metrics configuration mock to the get_metrics_conf() function"""
def start_fixture(self):
self._original_function = ck_utils.get_metrics_conf
ck_utils.get_metrics_conf = mock.Mock(
self._original_function = ck_utils.load_conf
ck_utils.load_conf = mock.Mock(
return_value=tests.samples.METRICS_CONF,
)
def stop_fixture(self):
"""Remove the get_metrics_conf() monkeypatch."""
ck_utils.get_metrics_conf = self._original_function
ck_utils.load_conf = self._original_function
def setup_app():

View File

@ -123,7 +123,7 @@ tests:
x-roles: admin
data:
resources:
- service: "compute"
- service: "cpu"
volume: "1.0"
desc:
test: 1

View File

@ -7,20 +7,9 @@ tests:
url: /v1/info/config
status: 200
response_json_paths:
$.collector: gnocchi
$.window: 1800
$.wait_periods: 2
$.period: 3600
$.metrics.`len`: 13
$.metrics.vcpus.unit: instance
$.metrics.memory.unit: instance
$.metrics.cpu.unit: instance
$.metrics['disk.root.size'].unit: instance
$.metrics['disk.ephemeral.size'].unit: instance
$.metrics.`len`: 7
$.metrics['cpu'].unit: instance
$.metrics['image.size'].unit: MiB
$.metrics['image.download'].unit: MiB
$.metrics['image.serve'].unit: MiB
$.metrics['volume.size'].unit: GiB
$.metrics['network.incoming.bytes'].unit: MB
$.metrics['network.outgoing.bytes'].unit: MB
@ -31,38 +20,25 @@ tests:
url: /v1/info/metrics
status: 200
response_json_paths:
$.metrics.`len`: 13
$.metrics[/metric_id][0].metric_id: cpu
$.metrics[/metric_id][0].unit: instance
$.metrics[/metric_id][1].metric_id: disk.ephemeral.size
$.metrics.`len`: 7
$.metrics[/metric_id][0].metric_id: image.size
$.metrics[/metric_id][0].unit: MiB
$.metrics[/metric_id][1].metric_id: instance
$.metrics[/metric_id][1].unit: instance
$.metrics[/metric_id][2].metric_id: disk.root.size
$.metrics[/metric_id][2].unit: instance
$.metrics[/metric_id][3].metric_id: image.download
$.metrics[/metric_id][3].unit: MiB
$.metrics[/metric_id][4].metric_id: image.serve
$.metrics[/metric_id][4].unit: MiB
$.metrics[/metric_id][5].metric_id: image.size
$.metrics[/metric_id][5].unit: MiB
$.metrics[/metric_id][6].metric_id: ip.floating
$.metrics[/metric_id][6].unit: ip
$.metrics[/metric_id][7].metric_id: memory
$.metrics[/metric_id][7].unit: instance
$.metrics[/metric_id][8].metric_id: network.incoming.bytes
$.metrics[/metric_id][8].unit: MB
$.metrics[/metric_id][9].metric_id: network.outgoing.bytes
$.metrics[/metric_id][9].unit: MB
$.metrics[/metric_id][10].metric_id: radosgw.objects.size
$.metrics[/metric_id][10].unit: GiB
$.metrics[/metric_id][11].metric_id: vcpus
$.metrics[/metric_id][11].unit: instance
$.metrics[/metric_id][12].metric_id: volume.size
$.metrics[/metric_id][12].unit: GiB
$.metrics[/metric_id][2].metric_id: ip.floating
$.metrics[/metric_id][2].unit: ip
$.metrics[/metric_id][3].metric_id: network.incoming.bytes
$.metrics[/metric_id][3].unit: MB
$.metrics[/metric_id][4].metric_id: network.outgoing.bytes
$.metrics[/metric_id][4].unit: MB
$.metrics[/metric_id][5].metric_id: radosgw.objects.size
$.metrics[/metric_id][5].unit: GiB
$.metrics[/metric_id][6].metric_id: volume.size
$.metrics[/metric_id][6].unit: GiB
- name: get cpu metric info
url: /v1/info/metrics/cpu
url: /v1/info/metrics/instance
status: 200
response_json_paths:
$.metric_id: cpu
$.metric_id: instance
$.unit: instance
$.metadata.`len`: 4

View File

@ -123,7 +123,7 @@ tests:
x-roles: admin
data:
resources:
- service: "compute"
- service: "cpu"
volume: "1.0"
desc:
test: 1

View File

@ -73,7 +73,7 @@ tests:
query_parameters:
begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00"
service: "compute"
service: "cpu"
status: 200
response_strings:
- "110.971"
@ -83,7 +83,7 @@ tests:
query_parameters:
begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00"
service: "image"
service: "image.size"
status: 200
response_strings:
- "10.043"
@ -94,7 +94,7 @@ tests:
begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00"
tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
service: "compute"
service: "cpu"
status: 200
response_strings:
- "37.436"
@ -133,19 +133,19 @@ tests:
query_parameters:
begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00"
service: "compute"
service: "cpu"
groupby: "tenant_id"
status: 200
response_json_paths:
$.summary.`len`: 2
$.summary[0].rate: "37.436"
$.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[0].res_type: "compute"
$.summary[0].res_type: "cpu"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "73.535"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].res_type: "compute"
$.summary[1].res_type: "cpu"
$.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00"
@ -159,12 +159,12 @@ tests:
response_json_paths:
$.summary.`len`: 2
$.summary[0].rate: "10.043"
$.summary[0].res_type: "image"
$.summary[0].res_type: "image.size"
$.summary[0].tenant_id: "ALL"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "110.971"
$.summary[1].res_type: "compute"
$.summary[1].res_type: "cpu"
$.summary[1].tenant_id: "ALL"
$.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00"
@ -181,12 +181,12 @@ tests:
$.summary.`len`: 2
$.summary[0].rate: "6.655"
$.summary[0].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[0].res_type: "image"
$.summary[0].res_type: "image.size"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "73.535"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].res_type: "compute"
$.summary[1].res_type: "cpu"
$.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00"
@ -200,22 +200,22 @@ tests:
response_json_paths:
$.summary.`len`: 4
$.summary[0].rate: "3.388"
$.summary[0].res_type: "image"
$.summary[0].res_type: "image.size"
$.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "6.655"
$.summary[1].res_type: "image"
$.summary[1].res_type: "image.size"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00"
$.summary[2].rate: "37.436"
$.summary[2].res_type: "compute"
$.summary[2].res_type: "cpu"
$.summary[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[2].begin: "2015-01-01T00:00:00"
$.summary[2].end: "2015-02-04T00:00:00"
$.summary[3].rate: "73.535"
$.summary[3].res_type: "compute"
$.summary[3].res_type: "cpu"
$.summary[3].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[3].begin: "2015-01-01T00:00:00"
$.summary[3].end: "2015-02-04T00:00:00"

View File

@ -74,7 +74,7 @@ tests:
$.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
@ -83,7 +83,7 @@ tests:
$.dataframes[1].resources.`len`: 1
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "0.121"
$.dataframes[1].resources[0].service: "image"
$.dataframes[1].resources[0].service: "image.size"
$.dataframes[1].resources[0].desc.dummy: true
$.dataframes[1].resources[0].desc.fake_meta: 1.0
@ -102,7 +102,7 @@ tests:
$.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
@ -111,7 +111,7 @@ tests:
$.dataframes[1].resources.`len`: 1
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "0.121"
$.dataframes[1].resources[0].service: "image"
$.dataframes[1].resources[0].service: "image.size"
$.dataframes[1].resources[0].desc.dummy: true
$.dataframes[1].resources[0].desc.fake_meta: 1.0
@ -129,7 +129,7 @@ tests:
$.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
@ -138,7 +138,7 @@ tests:
$.dataframes[1].resources.`len`: 1
$.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "0.121"
$.dataframes[1].resources[0].service: "image"
$.dataframes[1].resources[0].service: "image.size"
$.dataframes[1].resources[0].desc.dummy: true
$.dataframes[1].resources[0].desc.fake_meta: 1.0
$.dataframes[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
@ -147,7 +147,7 @@ tests:
$.dataframes[2].resources.`len`: 1
$.dataframes[2].resources[0].volume: "1"
$.dataframes[2].resources[0].rating: "1.337"
$.dataframes[2].resources[0].service: "compute"
$.dataframes[2].resources[0].service: "cpu"
$.dataframes[2].resources[0].desc.dummy: true
$.dataframes[2].resources[0].desc.fake_meta: 1.0
$.dataframes[3].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
@ -156,16 +156,16 @@ tests:
$.dataframes[3].resources.`len`: 1
$.dataframes[3].resources[0].volume: "1"
$.dataframes[3].resources[0].rating: "0.121"
$.dataframes[3].resources[0].service: "image"
$.dataframes[3].resources[0].service: "image.size"
$.dataframes[3].resources[0].desc.dummy: true
$.dataframes[3].resources[0].desc.fake_meta: 1.0
- name: fetch data filtering on compute service and tenant
- name: fetch data filtering on cpu service and tenant
url: /v1/storage/dataframes
query_parameters:
begin: "2015-01-04T13:00:00"
end: "2015-01-04T14:00:00"
resource_type: "compute"
resource_type: "cpu"
tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
status: 200
response_json_paths:
@ -176,7 +176,7 @@ tests:
$.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute"
$.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0
@ -185,7 +185,7 @@ tests:
query_parameters:
begin: "2015-01-04T13:00:00"
end: "2015-01-04T14:00:00"
resource_type: "image"
resource_type: "image.size"
tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
status: 200
response_json_paths:
@ -196,7 +196,7 @@ tests:
$.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "0.121"
$.dataframes[0].resources[0].service: "image"
$.dataframes[0].resources[0].service: "image.size"
$.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0

View File

@ -40,10 +40,10 @@ tests:
content-type: application/json
x-roles: admin
data:
name: "compute"
name: "cpu"
status: 201
response_json_paths:
$.name: "compute"
$.name: "cpu"
response_store_environ:
hash_error_service_id: $.service_id
@ -54,10 +54,10 @@ tests:
content-type: application/json
x-roles: admin
data:
name: "compute"
name: "cpu"
status: 409
response_strings:
- "Service compute already exists (UUID: $RESPONSE['$.service_id'])"
- "Service cpu already exists (UUID: $RESPONSE['$.service_id'])"
- name: create a service mapping with an invalid type
url: /v1/rating/module_config/hashmap/mappings

View File

@ -11,11 +11,11 @@ tests:
content-type: application/json
x-roles: admin
data:
name: "compute"
name: "cpu"
status: 201
response_json_paths:
$.service_id: "6c1b8a30-797f-4b7e-ad66-9879b79059fb"
$.name: "compute"
$.name: "cpu"
response_headers:
location: $SCHEME://$NETLOC/v1/rating/module_config/hashmap/services/6c1b8a30-797f-4b7e-ad66-9879b79059fb

View File

@ -20,10 +20,10 @@ tests:
content-type: application/json
x-roles: admin
data:
name: "compute"
name: "cpu"
status: 201
response_json_paths:
$.name: "compute"
$.name: "cpu"
response_store_environ:
hash_service_id: $.service_id
@ -32,7 +32,7 @@ tests:
status: 200
response_json_paths:
$.service_id: $RESPONSE['$.service_id']
$.name: "compute"
$.name: "cpu"
- name: create a flat service mapping
url: /v1/rating/module_config/hashmap/mappings
@ -60,7 +60,7 @@ tests:
status: 200
response_json_paths:
$.services.`len`: 1
$.services[0].name: "compute"
$.services[0].name: "cpu"
- name: create a rate service mapping
url: /v1/rating/module_config/hashmap/mappings

View File

@ -18,7 +18,6 @@
import copy
import decimal
from cloudkitty.default_metrics_conf import DEFAULT_METRICS_CONF
from cloudkitty import utils as ck_utils
TENANT = 'f266f30b11f246b589fd266f85eeec39'
@ -37,6 +36,7 @@ COMPUTE_METADATA = {
'flavor': 'm1.nano',
'image_id': 'f5600101-8fa2-4864-899e-ebcb7ed6b568',
'instance_id': '26c084e1-b8f1-4cbc-a7ec-e8b356788a17',
'id': '1558f911-b55a-4fd2-9173-c8f1f23e5639',
'resource_id': '1558f911-b55a-4fd2-9173-c8f1f23e5639',
'memory': '64',
'metadata': {
@ -50,6 +50,7 @@ COMPUTE_METADATA = {
IMAGE_METADATA = {
'checksum': '836c69cbcd1dc4f225daedbab6edc7c7',
'resource_id': '7b5b73f2-9181-4307-a710-b1aa6472526d',
'id': '7b5b73f2-9181-4307-a710-b1aa6472526d',
'container_format': 'aki',
'created_at': '2014-06-04T16:26:01',
'deleted': 'False',
@ -75,7 +76,7 @@ SECOND_PERIOD = {
COLLECTED_DATA = [{
'period': FIRST_PERIOD,
'usage': {
'cpu': [{
'instance': [{
'desc': COMPUTE_METADATA,
'vol': {
'qty': decimal.Decimal(1.0),
@ -88,22 +89,134 @@ COLLECTED_DATA = [{
}}, {
'period': SECOND_PERIOD,
'usage': {
'cpu': [{
'instance': [{
'desc': COMPUTE_METADATA,
'vol': {
'qty': decimal.Decimal(1.0),
'unit': 'instance'}}]
}}]
},
}]
RATED_DATA = copy.deepcopy(COLLECTED_DATA)
RATED_DATA[0]['usage']['cpu'][0]['rating'] = {
RATED_DATA[0]['usage']['instance'][0]['rating'] = {
'price': decimal.Decimal('0.42')}
RATED_DATA[0]['usage']['image.size'][0]['rating'] = {
'price': decimal.Decimal('0.1337')}
RATED_DATA[1]['usage']['cpu'][0]['rating'] = {
RATED_DATA[1]['usage']['instance'][0]['rating'] = {
'price': decimal.Decimal('0.42')}
DEFAULT_METRICS_CONF = {
"metrics": {
"cpu": {
"unit": "instance",
"alt_name": "instance",
"groupby": [
"id",
"project_id"
],
"metadata": [
"flavor",
"flavor_id",
"vcpus"
],
"mutate": "NUMBOOL",
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance"
}
},
"image.size": {
"unit": "MiB",
"factor": "1/1048576",
"groupby": [
"id",
"project_id"
],
"metadata": [
"container_format",
"disk_format"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "image"
}
},
"volume.size": {
"unit": "GiB",
"groupby": [
"id",
"project_id"
],
"metadata": [
"volume_type"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "volume"
}
},
"network.outgoing.bytes": {
"unit": "MB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1000000",
"metadata": [
"instance_id"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance_network_interface"
}
},
"network.incoming.bytes": {
"unit": "MB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1000000",
"metadata": [
"instance_id"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance_network_interface"
}
},
"ip.floating": {
"unit": "ip",
"groupby": [
"id",
"project_id"
],
"metadata": [
"state"
],
"mutate": "NUMBOOL",
"extra_args": {
"aggregation_method": "max",
"resource_type": "network"
}
},
"radosgw.objects.size": {
"unit": "GiB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1073741824",
"extra_args": {
"aggregation_method": "max",
"resource_type": "ceph_account"
}
}
}
}
def split_storage_data(raw_data):
final_data = []
for frame in raw_data:
@ -122,11 +235,11 @@ def split_storage_data(raw_data):
# FIXME(sheeprine): storage is not using decimal for rates, we need to
# transition to decimal.
STORED_DATA = copy.deepcopy(COLLECTED_DATA)
STORED_DATA[0]['usage']['cpu'][0]['rating'] = {
STORED_DATA[0]['usage']['instance'][0]['rating'] = {
'price': 0.42}
STORED_DATA[0]['usage']['image.size'][0]['rating'] = {
'price': 0.1337}
STORED_DATA[1]['usage']['cpu'][0]['rating'] = {
STORED_DATA[1]['usage']['instance'][0]['rating'] = {
'price': 0.42}
STORED_DATA = split_storage_data(STORED_DATA)

View File

@ -21,18 +21,17 @@ import mock
from gnocchiclient import exceptions as gexc
from cloudkitty import storage
from cloudkitty.storage.hybrid.backends import gnocchi as hgnocchi
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests import test_utils
class BaseHybridStorageTest(tests.TestCase):
@mock.patch('cloudkitty.utils.load_conf', new=test_utils.load_conf)
def setUp(self):
super(BaseHybridStorageTest, self).setUp()
self.conf.set_override('backend', 'hybrid', 'storage')
hgnocchi.METRICS_CONF = samples.METRICS_CONF
self.storage = storage.get_storage()
self.storage = storage.get_storage(conf=test_utils.load_conf())
with mock.patch.object(
self.storage._hybrid_backend, 'init'):
self.storage.init()

View File

@ -22,9 +22,9 @@ import sqlalchemy
import testscenarios
from cloudkitty import storage
from cloudkitty.storage.hybrid.backends import gnocchi as hgnocchi
from cloudkitty import tests
from cloudkitty.tests import samples
from cloudkitty.tests import test_utils
from cloudkitty import utils as ck_utils
@ -40,13 +40,13 @@ class StorageTest(tests.TestCase):
cls.storage_scenarios)
@mock.patch('cloudkitty.storage.hybrid.backends.gnocchi.gclient')
@mock.patch('cloudkitty.utils.load_conf', new=test_utils.load_conf)
def setUp(self, gclient_mock):
super(StorageTest, self).setUp()
hgnocchi.METRICS_CONF = samples.METRICS_CONF
self._tenant_id = samples.TENANT
self._other_tenant_id = '8d3ae50089ea4142-9c6e1269db6a0b64'
self.conf.set_override('backend', self.storage_backend, 'storage')
self.storage = storage.get_storage()
self.storage = storage.get_storage(conf=test_utils.load_conf())
self.storage.init()
def insert_data(self):
@ -234,10 +234,10 @@ class StorageTotalTest(StorageTest):
total = self.storage.get_total(
begin=begin,
end=end,
service='cpu')
service='instance')
self.assertEqual(1, len(total))
self.assertEqual(0.84, total[0]["rate"])
self.assertEqual('cpu', total[0]["res_type"])
self.assertEqual('instance', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
@ -273,7 +273,7 @@ class StorageTotalTest(StorageTest):
self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"])
self.assertEqual(1.68, total[1]["rate"])
self.assertEqual('cpu', total[1]["res_type"])
self.assertEqual('instance', total[1]["res_type"])
self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"])
@ -298,12 +298,12 @@ class StorageTotalTest(StorageTest):
self.assertEqual(end, total[1]["end"])
self.assertEqual(0.84, total[2]["rate"])
self.assertEqual(self._other_tenant_id, total[2]["tenant_id"])
self.assertEqual('cpu', total[2]["res_type"])
self.assertEqual('instance', total[2]["res_type"])
self.assertEqual(begin, total[2]["begin"])
self.assertEqual(end, total[2]["end"])
self.assertEqual(0.84, total[3]["rate"])
self.assertEqual(self._tenant_id, total[3]["tenant_id"])
self.assertEqual('cpu', total[3]["res_type"])
self.assertEqual('instance', total[3]["res_type"])
self.assertEqual(begin, total[3]["begin"])
self.assertEqual(end, total[3]["end"])
@ -429,6 +429,9 @@ class StorageDataIntegrityTest(StorageTest):
if 'image.size' in stored_data[0]['usage']:
stored_data[0]['usage'], stored_data[1]['usage'] = (
stored_data[1]['usage'], stored_data[0]['usage'])
if 'image.size' in expected_data[0]['usage']:
expected_data[0]['usage'], expected_data[1]['usage'] = (
expected_data[1]['usage'], expected_data[0]['usage'])
self.assertEqual(
expected_data,
stored_data)

View File

@ -38,7 +38,6 @@ class OrchestratorTest(tests.TestCase):
super(OrchestratorTest, self).setUp()
messaging_conf = self.useFixture(conffixture.ConfFixture(self.conf))
messaging_conf.transport_url = 'fake:/'
self.conf.set_override('backend', 'keystone', 'tenant_fetcher')
self.conf.import_group('keystone_fetcher',
'cloudkitty.fetcher.keystone')

View File

@ -24,6 +24,7 @@ import unittest
import mock
from oslo_utils import timeutils
from cloudkitty.tests.samples import DEFAULT_METRICS_CONF
from cloudkitty import utils as ck_utils
@ -195,3 +196,7 @@ class ConvertUnitTest(unittest.TestCase):
def test_convert_decimal(self):
result = ck_utils.num2decimal(decimal.Decimal(2))
self.assertEqual(result, decimal.Decimal(2))
def load_conf(*args):
return DEFAULT_METRICS_CONF

View File

@ -15,13 +15,23 @@
#
# @author: Stéphane Albert
#
from oslo_log import log
from cloudkitty import transformer
LOG = log.getLogger(__name__)
class CloudKittyFormatTransformer(transformer.BaseTransformer):
def format_item(self, desc, unit, qty=1.0):
def format_item(self, groupby, metadata, unit, qty=1.0):
data = {}
data['desc'] = desc
data['groupby'] = groupby
data['metadata'] = metadata
# For backward compatibility.
data['desc'] = data['groupby'].copy()
data['desc'].update(data['metadata'])
data['vol'] = {'unit': unit, 'qty': qty}
return data

View File

@ -26,74 +26,24 @@ import contextlib
import datetime
import decimal
import fractions
import math
import shutil
import six
import sys
import tempfile
import yaml
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from six import moves
from stevedore import extension
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
LOG = logging.getLogger(__name__)
collect_opts = [
cfg.StrOpt('fetcher',
default='keystone',
deprecated_for_removal=True,
help='Project fetcher.'),
cfg.StrOpt('collector',
default='gnocchi',
deprecated_for_removal=True,
help='Data collector.'),
cfg.IntOpt('window',
default=1800,
deprecated_for_removal=True,
help='Number of samples to collect per call.'),
cfg.IntOpt('period',
default=3600,
deprecated_for_removal=True,
help='Rating period in seconds.'),
cfg.IntOpt('wait_periods',
default=2,
deprecated_for_removal=True,
help='Wait for N periods before collecting new data.'),
cfg.ListOpt('services',
default=[
'compute',
'volume',
'network.bw.in',
'network.bw.out',
'network.floating',
'image',
],
deprecated_for_removal=True,
help='Services to monitor.'),
cfg.StrOpt('metrics_conf',
default='/etc/cloudkitty/metrics.yml',
help='Metrology configuration file.'),
]
storage_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
help='Name of the storage backend driver.')
]
CONF = cfg.CONF
CONF.register_opts(collect_opts, 'collect')
CONF.register_opts(storage_opts, 'storage')
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
@ -263,21 +213,22 @@ def refresh_stevedore(namespace=None):
cache.clear()
def check_time_state(timestamp=None, period=0, wait_time=0):
def check_time_state(timestamp=None, period=0, wait_periods=0):
if not timestamp:
return get_month_start_timestamp()
now = utcnow_ts()
next_timestamp = timestamp + period
wait_time = wait_periods * period
if next_timestamp + wait_time < now:
return next_timestamp
return 0
def get_metrics_conf(conf_path):
"""Return loaded yaml metrology configuration.
def load_conf(conf_path):
"""Return loaded yaml configuration.
In case not found metrics.yml file,
In case not found yaml file,
return an empty dict.
"""
# NOTE(mc): We can not raise any exception in this function as it called
@ -286,10 +237,9 @@ def get_metrics_conf(conf_path):
try:
with open(conf_path) as conf:
res = yaml.safe_load(conf)
res.update({'storage': CONF.storage.backend})
return res or {}
except Exception:
LOG.warning('Error when trying to retrieve yaml metrology conf file.')
LOG.warning("Error when trying to retrieve {} file.".format(conf_path))
return {}
@ -306,6 +256,21 @@ def tempdir(**kwargs):
six.text_type(e))
def mutate(value, mode='NONE'):
"""Mutate value according provided mode."""
if mode == 'NUMBOOL':
return float(value != 0.0)
if mode == 'FLOOR':
return math.floor(value)
if mode == 'CEIL':
return math.ceil(value)
return value
def num2decimal(num):
"""Converts a number into a decimal.Decimal.
@ -322,7 +287,7 @@ def num2decimal(num):
return decimal.Decimal(num)
def convert_unit(value, factor=1, offset=0):
def convert_unit(value, factor, offset):
"""Return converted value depending on the provided factor and offset."""
return num2decimal(value) * num2decimal(factor) + num2decimal(offset)

View File

@ -1,102 +1,85 @@
name: OpenStack
fetcher: keystone
collector: gnocchi
period: 3600
wait_periods: 2
window: 1800
services_objects:
compute: instance
volume: volume
network.bw.out: instance_network_interface
network.bw.in: instance_network_interface
network.floating: network
image: image
radosgw.usage: ceph_account
metrics:
vcpus:
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
memory:
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
cpu:
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
disk.root.size:
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
disk.ephemeral.size:
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
alt_name: instance
groupby:
- id
- project_id
metadata:
- flavor
- flavor_id
- vcpus
mutate: NUMBOOL
extra_args:
aggregation_method: max
resource_type: instance
image.size:
resource: image
unit: MiB
factor: 1/1048576
aggregation_method: max
image.download:
resource: image
unit: MiB
factor: 1/1048576
aggregation_method: max
image.serve:
resource: image
unit: MiB
factor: 1/1048576
aggregation_method: max
groupby:
- id
- project_id
metadata:
- container_format
- disk_format
extra_args:
aggregation_method: max
resource_type: image
volume.size:
resource: volume
unit: GiB
factor: 1
aggregation_method: max
groupby:
- id
- project_id
metadata:
- volume_type
extra_args:
aggregation_method: max
resource_type: volume
network.outgoing.bytes:
resource: instance_network_interface
unit: MB
groupby:
- id
- project_id
factor: 1/1000000
aggregation_method: max
metadata:
- instance_id
extra_args:
aggregation_method: max
resource_type: instance_network_interface
network.incoming.bytes:
resource: instance_network_interface
unit: MB
groupby:
- id
- project_id
factor: 1/1000000
aggregation_method: max
metadata:
- instance_id
extra_args:
aggregation_method: max
resource_type: instance_network_interface
ip.floating:
resource: network
unit: ip
factor: 1
aggregation_method: max
countable_unit: true
groupby:
- id
- project_id
metadata:
- state
mutate: NUMBOOL
extra_args:
aggregation_method: max
resource_type: network
radosgw.objects.size:
resource: ceph_account
unit: GiB
groupby:
- id
- project_id
factor: 1/1073741824
aggregation_method: max
extra_args:
aggregation_method: max
resource_type: ceph_account

View File

@ -0,0 +1,4 @@
features:
- |
The format of the 'metrics.yml' configuration file has been improved,
and will be stable.

View File

@ -27,3 +27,4 @@ SQLAlchemy<1.1.0,>=1.0.10 # MIT
six>=1.9.0 # MIT
stevedore>=1.5.0 # Apache-2.0
tooz>=1.28.0 # Apache-2.0
voluptuous>=0.11.1,<1.0.0 # BSD-3

View File

@ -50,7 +50,7 @@ cloudkitty.collector.backends =
monasca = cloudkitty.collector.monasca:MonascaCollector
meta = cloudkitty.collector.meta:MetaCollector
cloudkitty.tenant.fetchers =
cloudkitty.fetchers =
fake = cloudkitty.fetcher.fake:FakeFetcher
keystone = cloudkitty.fetcher.keystone:KeystoneFetcher
source = cloudkitty.fetcher.source:SourceFetcher