Merge "gnocchi: configure archive policies on Ceilo side"

This commit is contained in:
Zuul 2018-02-28 14:47:45 +00:00 committed by Gerrit Code Review
commit 9d8ce8b77c
5 changed files with 362 additions and 237 deletions

View File

@ -1,71 +1,95 @@
---
archive_policy_default: ceilometer-low
archive_policies:
# NOTE(sileht): We keep "mean" for now to not break all gating that
# use the current tempest scenario.
- name: ceilometer-low
aggregation_methods:
- mean
back_window: 0
definition:
- granularity: 5 minutes
timespan: 30 days
- name: ceilometer-low-rate
aggregation_methods:
- mean
- rate:mean
back_window: 0
definition:
- granularity: 5 minutes
timespan: 30 days
resources:
- resource_type: identity
metrics:
- 'identity.authenticate.success'
- 'identity.authenticate.pending'
- 'identity.authenticate.failure'
- 'identity.user.created'
- 'identity.user.deleted'
- 'identity.user.updated'
- 'identity.group.created'
- 'identity.group.deleted'
- 'identity.group.updated'
- 'identity.role.created'
- 'identity.role.deleted'
- 'identity.role.updated'
- 'identity.project.created'
- 'identity.project.deleted'
- 'identity.project.updated'
- 'identity.trust.created'
- 'identity.trust.deleted'
- 'identity.role_assignment.created'
- 'identity.role_assignment.deleted'
identity.authenticate.success:
identity.authenticate.pending:
identity.authenticate.failure:
identity.user.created:
identity.user.deleted:
identity.user.updated:
identity.group.created:
identity.group.deleted:
identity.group.updated:
identity.role.created:
identity.role.deleted:
identity.role.updated:
identity.project.created:
identity.project.deleted:
identity.project.updated:
identity.trust.created:
identity.trust.deleted:
identity.role_assignment.created:
identity.role_assignment.deleted:
- resource_type: ceph_account
metrics:
- 'radosgw.objects'
- 'radosgw.objects.size'
- 'radosgw.objects.containers'
- 'radosgw.api.request'
- 'radosgw.containers.objects'
- 'radosgw.containers.objects.size'
radosgw.objects:
radosgw.objects.size:
radosgw.objects.containers:
radosgw.api.request:
radosgw.containers.objects:
radosgw.containers.objects.size:
- resource_type: instance
metrics:
- 'memory'
- 'memory.usage'
- 'memory.resident'
- 'memory.swap.in'
- 'memory.swap.out'
- 'memory.bandwidth.total'
- 'memory.bandwidth.local'
- 'vcpus'
- 'cpu'
- 'cpu.delta'
- 'cpu_util'
- 'cpu_l3_cache'
- 'disk.root.size'
- 'disk.ephemeral.size'
- 'disk.read.requests'
- 'disk.read.requests.rate'
- 'disk.write.requests'
- 'disk.write.requests.rate'
- 'disk.read.bytes'
- 'disk.read.bytes.rate'
- 'disk.write.bytes'
- 'disk.write.bytes.rate'
- 'disk.latency'
- 'disk.iops'
- 'disk.capacity'
- 'disk.allocation'
- 'disk.usage'
- 'compute.instance.booting.time'
- 'perf.cpu.cycles'
- 'perf.instructions'
- 'perf.cache.references'
- 'perf.cache.misses'
memory:
memory.usage:
memory.resident:
memory.swap.in:
memory.swap.out:
memory.bandwidth.total:
memory.bandwidth.local:
vcpus:
cpu:
archive_policy_name: ceilometer-low-rate
cpu.delta:
cpu_util:
cpu_l3_cache:
disk.root.size:
disk.ephemeral.size:
disk.read.requests:
archive_policy_name: ceilometer-low-rate
disk.read.requests.rate:
disk.write.requests:
archive_policy_name: ceilometer-low-rate
disk.write.requests.rate:
disk.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.read.bytes.rate:
disk.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.write.bytes.rate:
disk.latency:
disk.iops:
disk.capacity:
disk.allocation:
disk.usage:
compute.instance.booting.time:
perf.cpu.cycles:
perf.instructions:
perf.cache.references:
perf.cache.misses:
attributes:
host: resource_metadata.(instance_host|host)
image_ref: resource_metadata.image_ref
@ -85,48 +109,60 @@ resources:
- resource_type: instance_network_interface
metrics:
- 'network.outgoing.packets.rate'
- 'network.incoming.packets.rate'
- 'network.outgoing.packets'
- 'network.incoming.packets'
- 'network.outgoing.packets.drop'
- 'network.incoming.packets.drop'
- 'network.outgoing.packets.error'
- 'network.incoming.packets.error'
- 'network.outgoing.bytes.rate'
- 'network.incoming.bytes.rate'
- 'network.outgoing.bytes'
- 'network.incoming.bytes'
network.outgoing.packets.rate:
network.incoming.packets.rate:
network.outgoing.packets:
archive_policy_name: ceilometer-low-rate
network.incoming.packets:
archive_policy_name: ceilometer-low-rate
network.outgoing.packets.drop:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.drop:
archive_policy_name: ceilometer-low-rate
network.outgoing.packets.error:
archive_policy_name: ceilometer-low-rate
network.incoming.packets.error:
archive_policy_name: ceilometer-low-rate
network.outgoing.bytes.rate:
network.incoming.bytes.rate:
network.outgoing.bytes:
archive_policy_name: ceilometer-low-rate
network.incoming.bytes:
archive_policy_name: ceilometer-low-rate
attributes:
name: resource_metadata.vnic_name
instance_id: resource_metadata.instance_id
- resource_type: instance_disk
metrics:
- 'disk.device.read.requests'
- 'disk.device.read.requests.rate'
- 'disk.device.write.requests'
- 'disk.device.write.requests.rate'
- 'disk.device.read.bytes'
- 'disk.device.read.bytes.rate'
- 'disk.device.write.bytes'
- 'disk.device.write.bytes.rate'
- 'disk.device.latency'
- 'disk.device.read.latency'
- 'disk.device.write.latency'
- 'disk.device.iops'
- 'disk.device.capacity'
- 'disk.device.allocation'
- 'disk.device.usage'
disk.device.read.requests:
archive_policy_name: ceilometer-low-rate
disk.device.read.requests.rate:
disk.device.write.requests:
archive_policy_name: ceilometer-low-rate
disk.device.write.requests.rate:
disk.device.read.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.read.bytes.rate:
disk.device.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.write.bytes.rate:
disk.device.latency:
disk.device.read.latency:
disk.device.write.latency:
disk.device.iops:
disk.device.capacity:
disk.device.allocation:
disk.device.usage:
attributes:
name: resource_metadata.disk_name
instance_id: resource_metadata.instance_id
- resource_type: image
metrics:
- 'image.size'
- 'image.download'
- 'image.serve'
image.size:
image.download:
image.serve:
attributes:
name: resource_metadata.name
container_format: resource_metadata.container_format
@ -137,18 +173,18 @@ resources:
- resource_type: ipmi
metrics:
- 'hardware.ipmi.node.power'
- 'hardware.ipmi.node.temperature'
- 'hardware.ipmi.node.inlet_temperature'
- 'hardware.ipmi.node.outlet_temperature'
- 'hardware.ipmi.node.fan'
- 'hardware.ipmi.node.current'
- 'hardware.ipmi.node.voltage'
- 'hardware.ipmi.node.airflow'
- 'hardware.ipmi.node.cups'
- 'hardware.ipmi.node.cpu_util'
- 'hardware.ipmi.node.mem_util'
- 'hardware.ipmi.node.io_util'
hardware.ipmi.node.power:
hardware.ipmi.node.temperature:
hardware.ipmi.node.inlet_temperature:
hardware.ipmi.node.outlet_temperature:
hardware.ipmi.node.fan:
hardware.ipmi.node.current:
hardware.ipmi.node.voltage:
hardware.ipmi.node.airflow:
hardware.ipmi.node.cups:
hardware.ipmi.node.cpu_util:
hardware.ipmi.node.mem_util:
hardware.ipmi.node.io_util:
- resource_type: ipmi_sensor
metrics:
@ -161,38 +197,38 @@ resources:
- resource_type: network
metrics:
- 'bandwidth'
- 'ip.floating'
bandwidth:
ip.floating:
event_delete: floatingip.delete.end
event_attributes:
id: resource_id
- resource_type: stack
metrics:
- 'stack.create'
- 'stack.update'
- 'stack.delete'
- 'stack.resume'
- 'stack.suspend'
stack.create:
stack.update:
stack.delete:
stack.resume:
stack.suspend:
- resource_type: swift_account
metrics:
- 'storage.objects.incoming.bytes'
- 'storage.objects.outgoing.bytes'
- 'storage.api.request'
- 'storage.objects.size'
- 'storage.objects'
- 'storage.objects.containers'
- 'storage.containers.objects'
- 'storage.containers.objects.size'
storage.objects.incoming.bytes:
storage.objects.outgoing.bytes:
storage.api.request:
storage.objects.size:
storage.objects:
storage.objects.containers:
storage.containers.objects:
storage.containers.objects.size:
- resource_type: volume
metrics:
- 'volume'
- 'volume.size'
- 'snapshot.size'
- 'volume.snapshot.size'
- 'volume.backup.size'
volume:
volume.size:
snapshot.size:
volume.snapshot.size:
volume.backup.size:
attributes:
display_name: resource_metadata.(display_name|name)
volume_type: resource_metadata.volume_type
@ -204,81 +240,81 @@ resources:
- resource_type: volume_provider
metrics:
- 'volume.provider.capacity.total'
- 'volume.provider.capacity.free'
- 'volume.provider.capacity.allocated'
- 'volume.provider.capacity.provisioned'
- 'volume.provider.capacity.virtual_free'
volume.provider.capacity.total:
volume.provider.capacity.free:
volume.provider.capacity.allocated:
volume.provider.capacity.provisioned:
volume.provider.capacity.virtual_free:
- resource_type: volume_provider_pool
metrics:
- 'volume.provider.pool.capacity.total'
- 'volume.provider.pool.capacity.free'
- 'volume.provider.pool.capacity.allocated'
- 'volume.provider.pool.capacity.provisioned'
- 'volume.provider.pool.capacity.virtual_free'
volume.provider.pool.capacity.total:
volume.provider.pool.capacity.free:
volume.provider.pool.capacity.allocated:
volume.provider.pool.capacity.provisioned:
volume.provider.pool.capacity.virtual_free:
attributes:
provider: resource_metadata.provider
- resource_type: host
metrics:
- 'hardware.cpu.load.1min'
- 'hardware.cpu.load.5min'
- 'hardware.cpu.load.15min'
- 'hardware.cpu.util'
- 'hardware.memory.total'
- 'hardware.memory.used'
- 'hardware.memory.swap.total'
- 'hardware.memory.swap.avail'
- 'hardware.memory.buffer'
- 'hardware.memory.cached'
- 'hardware.network.ip.outgoing.datagrams'
- 'hardware.network.ip.incoming.datagrams'
- 'hardware.system_stats.cpu.idle'
- 'hardware.system_stats.io.outgoing.blocks'
- 'hardware.system_stats.io.incoming.blocks'
hardware.cpu.load.1min:
hardware.cpu.load.5min:
hardware.cpu.load.15min:
hardware.cpu.util:
hardware.memory.total:
hardware.memory.used:
hardware.memory.swap.total:
hardware.memory.swap.avail:
hardware.memory.buffer:
hardware.memory.cached:
hardware.network.ip.outgoing.datagrams:
hardware.network.ip.incoming.datagrams:
hardware.system_stats.cpu.idle:
hardware.system_stats.io.outgoing.blocks:
hardware.system_stats.io.incoming.blocks:
attributes:
host_name: resource_metadata.resource_url
- resource_type: host_disk
metrics:
- 'hardware.disk.size.total'
- 'hardware.disk.size.used'
- 'hardware.disk.read.bytes'
- 'hardware.disk.write.bytes'
- 'hardware.disk.read.requests'
- 'hardware.disk.write.requests'
hardware.disk.size.total:
hardware.disk.size.used:
hardware.disk.read.bytes:
hardware.disk.write.bytes:
hardware.disk.read.requests:
hardware.disk.write.requests:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.device
- resource_type: host_network_interface
metrics:
- 'hardware.network.incoming.bytes'
- 'hardware.network.outgoing.bytes'
- 'hardware.network.outgoing.errors'
hardware.network.incoming.bytes:
hardware.network.outgoing.bytes:
hardware.network.outgoing.errors:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.name
- resource_type: nova_compute
metrics:
- 'compute.node.cpu.frequency'
- 'compute.node.cpu.idle.percent'
- 'compute.node.cpu.idle.time'
- 'compute.node.cpu.iowait.percent'
- 'compute.node.cpu.iowait.time'
- 'compute.node.cpu.kernel.percent'
- 'compute.node.cpu.kernel.time'
- 'compute.node.cpu.percent'
- 'compute.node.cpu.user.percent'
- 'compute.node.cpu.user.time'
compute.node.cpu.frequency:
compute.node.cpu.idle.percent:
compute.node.cpu.idle.time:
compute.node.cpu.iowait.percent:
compute.node.cpu.iowait.time:
compute.node.cpu.kernel.percent:
compute.node.cpu.kernel.time:
compute.node.cpu.percent:
compute.node.cpu.user.percent:
compute.node.cpu.user.time:
attributes:
host_name: resource_metadata.host
- resource_type: manila_share
metrics:
- 'manila.share.size'
manila.share.size:
attributes:
name: resource_metadata.name
host: resource_metadata.host
@ -288,27 +324,27 @@ resources:
- resource_type: switch
metrics:
- 'switch'
- 'switch.ports'
switch:
switch.ports:
attributes:
controller: resource_metadata.controller
- resource_type: switch_port
metrics:
- 'switch.port'
- 'switch.port.uptime'
- 'switch.port.receive.packets'
- 'switch.port.transmit.packets'
- 'switch.port.receive.bytes'
- 'switch.port.transmit.bytes'
- 'switch.port.receive.drops'
- 'switch.port.transmit.drops'
- 'switch.port.receive.errors'
- 'switch.port.transmit.errors'
- 'switch.port.receive.frame_error'
- 'switch.port.receive.overrun_error'
- 'switch.port.receive.crc_error'
- 'switch.port.collision.count'
switch.port:
switch.port.uptime:
switch.port.receive.packets:
switch.port.transmit.packets:
switch.port.receive.bytes:
switch.port.transmit.bytes:
switch.port.receive.drops:
switch.port.transmit.drops:
switch.port.receive.errors:
switch.port.transmit.errors:
switch.port.receive.frame_error:
switch.port.receive.overrun_error:
switch.port.receive.crc_error:
switch.port.collision.count:
attributes:
switch: resource_metadata.switch
port_number_on_switch: resource_metadata.port_number_on_switch
@ -317,20 +353,20 @@ resources:
- resource_type: port
metrics:
- 'port'
- 'port.uptime'
- 'port.receive.packets'
- 'port.transmit.packets'
- 'port.receive.bytes'
- 'port.transmit.bytes'
- 'port.receive.drops'
- 'port.receive.errors'
port:
port.uptime:
port.receive.packets:
port.transmit.packets:
port.receive.bytes:
port.transmit.bytes:
port.receive.drops:
port.receive.errors:
attributes:
controller: resource_metadata.controller
- resource_type: switch_table
metrics:
- 'switch.table.active.entries'
switch.table.active.entries:
attributes:
controller: resource_metadata.controller
switch: resource_metadata.switch

View File

@ -54,12 +54,12 @@ EVENT_CREATE, EVENT_UPDATE, EVENT_DELETE = ("create", "update", "delete")
class ResourcesDefinition(object):
MANDATORY_FIELDS = {'resource_type': six.string_types,
'metrics': list}
'metrics': (dict, list)}
MANDATORY_EVENT_FIELDS = {'id': six.string_types}
def __init__(self, definition_cfg, default_archive_policy, plugin_manager):
self._default_archive_policy = default_archive_policy
def __init__(self, definition_cfg, archive_policy_default,
archive_policy_override, plugin_manager):
self.cfg = definition_cfg
self._check_required_and_types(self.MANDATORY_FIELDS, self.cfg)
@ -79,24 +79,44 @@ class ResourcesDefinition(object):
name, attr_cfg, plugin_manager)
self.metrics = {}
for t in self.cfg['metrics']:
archive_policy = self.cfg.get('archive_policy',
self._default_archive_policy)
if archive_policy is None:
self.metrics[t] = {}
else:
self.metrics[t] = dict(archive_policy_name=archive_policy)
# NOTE(sileht): Convert old list to new dict format
if isinstance(self.cfg['metrics'], list):
values = [None] * len(self.cfg['metrics'])
self.cfg['metrics'] = dict(zip(self.cfg['metrics'], values))
for m, extra in self.cfg['metrics'].items():
if not extra:
extra = {}
if not extra.get("archive_policy_name"):
extra["archive_policy_name"] = archive_policy_default
if archive_policy_override:
extra["archive_policy_name"] = archive_policy_override
# NOTE(sileht): For backward compat, this is after the override to
# preserve the wierd previous behavior. We don't really care as we
# deprecate it.
if 'archive_policy' in self.cfg:
LOG.warning("archive_policy '%s' for a resource-type (%s) is "
"deprecated, set it for each metric instead.",
self.cfg["archive_policy"],
self.cfg["resource_type"])
extra["archive_policy_name"] = self.cfg['archive_policy']
self.metrics[m] = extra
@staticmethod
def _check_required_and_types(expected, definition):
for field, field_type in expected.items():
for field, field_types in expected.items():
if field not in definition:
raise declarative.ResourceDefinitionException(
_("Required field %s not specified") % field, definition)
if not isinstance(definition[field], field_type):
if not isinstance(definition[field], field_types):
raise declarative.ResourceDefinitionException(
_("Required field %(field)s should be a %(type)s") %
{'field': field, 'type': field_type}, definition)
{'field': field, 'type': field_types}, definition)
@staticmethod
def _ensure_list(value):
@ -185,11 +205,13 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
resources_definition_file = options.get(
'resources_definition_file',
[conf.dispatcher_gnocchi.resources_definition_file])[-1]
archive_policy = options.get(
archive_policy_override = options.get(
'archive_policy',
[conf.dispatcher_gnocchi.archive_policy])[-1]
self.resources_definition = self._load_resources_definitions(
conf, archive_policy, resources_definition_file)
self.resources_definition, self.archive_policies_definition = (
self._load_definitions(conf, archive_policy_override,
resources_definition_file))
self.metric_map = dict((metric, rd) for rd in self.resources_definition
for metric in rd.metrics)
@ -224,25 +246,38 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
self._already_logged_event_types = set()
self._already_logged_metric_names = set()
self.ensures_archives_policies()
@staticmethod
def _load_resources_definitions(conf, archive_policy,
resources_definition_file):
def _load_definitions(conf, archive_policy_override,
resources_definition_file):
plugin_manager = extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin')
data = declarative.load_definitions(
conf, {}, resources_definition_file,
pkg_resources.resource_filename(__name__,
"data/gnocchi_resources.yaml"))
archive_policy_default = data.get("archive_policy_default", "low")
resource_defs = []
for resource in data.get('resources', []):
try:
resource_defs.append(ResourcesDefinition(
resource,
archive_policy, plugin_manager))
archive_policy_default,
archive_policy_override,
plugin_manager))
except Exception as exc:
LOG.error("Failed to load resource due to error %s" %
exc)
return resource_defs
return resource_defs, data.get("archive_policies", [])
def ensures_archives_policies(self):
for ap in self.archive_policies_definition:
try:
self._gnocchi.archive_policy.get(ap["name"])
except gnocchi_exc.ArchivePolicyNotFound:
self._gnocchi.archive_policy.create(ap)
@property
def gnocchi_project_id(self):
@ -323,8 +358,15 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
gnocchi_data[resource_id].setdefault(
"resource_extra", {}).update(rd.sample_attributes(sample))
measures.setdefault(resource_id, {}).setdefault(
metric_name, []).append({'timestamp': sample.timestamp,
'value': sample.volume})
metric_name,
{"measures": [],
"archive_policy_name":
rd.metrics[metric_name]["archive_policy_name"],
"unit": sample.unit}
)["measures"].append(
{'timestamp': sample.timestamp,
'value': sample.volume}
)
# TODO(gordc): unit should really be part of metric definition
gnocchi_data[resource_id]['resource']['metrics'][
metric_name]['unit'] = sample.unit
@ -395,7 +437,9 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
LOG.debug(
"%d measures posted against %d metrics through %d resources",
sum(len(m) for rid in measures for m in measures[rid].values()),
sum(len(m["measures"])
for rid in measures
for m in measures[rid].values()),
sum(len(m) for m in measures.values()), len(resource_infos))
def _create_resource(self, resource_type, resource):

View File

@ -175,6 +175,9 @@ class PublisherTest(base.BaseTestCase):
self.useFixture(fixtures.MockPatch(
'ceilometer.keystone_client.get_client',
return_value=ks_client))
self.useFixture(fixtures.MockPatch(
'gnocchiclient.v1.client.Client',
return_value=mock.Mock()))
self.ks_client = ks_client
def test_config_load(self):
@ -198,7 +201,7 @@ class PublisherTest(base.BaseTestCase):
plugin_manager = extension.ExtensionManager(
namespace='ceilometer.event.trait.trait_plugin')
rd = gnocchi.ResourcesDefinition(
resource, "low", plugin_manager)
resource, "high", "low", plugin_manager)
operation = rd.event_match("image.delete")
self.assertEqual('delete', operation)
@ -245,12 +248,14 @@ class PublisherTest(base.BaseTestCase):
def _do_test_activity_filter(self, expected_measures, fake_batch):
url = netutils.urlsplit("gnocchi://")
d = gnocchi.GnocchiPublisher(self.conf.conf, url)
d._already_checked_archive_policies = True
d.publish_samples(self.samples)
self.assertEqual(1, len(fake_batch.mock_calls))
measures = fake_batch.mock_calls[0][1][0]
self.assertEqual(
expected_measures,
sum(len(m) for rid in measures for m in measures[rid].values()))
sum(len(m["measures"]) for rid in measures
for m in measures[rid].values()))
def test_activity_filter_match_project_id(self):
self.samples[0].project_id = (
@ -290,6 +295,7 @@ class PublisherTest(base.BaseTestCase):
)]
url = netutils.urlsplit("gnocchi://")
d = gnocchi.GnocchiPublisher(self.conf.conf, url)
d._already_checked_archive_policies = True
d.publish_samples(samples)
self.assertEqual(0, len(fake_batch.call_args[0][1]))
@ -328,10 +334,14 @@ class PublisherWorkflowTest(base.BaseTestCase,
'display_name': 'myinstance',
},
),
measures_attributes=[{
'timestamp': '2012-05-08 20:23:48.028195',
'value': 2
}],
metric_attributes={
"archive_policy_name": "ceilometer-low",
"unit": "GB",
"measures": [{
'timestamp': '2012-05-08 20:23:48.028195',
'value': 2
}]
},
postable_attributes={
'user_id': 'test_user',
'project_id': 'test_project',
@ -372,10 +382,14 @@ class PublisherWorkflowTest(base.BaseTestCase,
'useless': 'not_used',
},
),
measures_attributes=[{
'timestamp': '2012-05-08 20:23:48.028195',
'value': 2
}],
metric_attributes={
"archive_policy_name": "ceilometer-low",
"unit": "W",
"measures": [{
'timestamp': '2012-05-08 20:23:48.028195',
'value': 2
}]
},
postable_attributes={
'user_id': 'test_user',
'project_id': 'test_project',
@ -460,9 +474,11 @@ class PublisherWorkflowTest(base.BaseTestCase,
self.useFixture(utils_fixture.TimeFixture(now))
expected_calls = [
mock.call.resource.search('instance_disk', search_params),
mock.call.archive_policy.get("ceilometer-low"),
mock.call.archive_policy.get("ceilometer-low-rate"),
mock.call.resource.search('instance_network_interface',
search_params),
mock.call.resource.search('instance_disk', search_params),
mock.call.resource.update(
'instance', '9f9d01b9-4a58-4271-9e27-398b21ab20d1',
{'ended_at': now.isoformat()}),
@ -489,15 +505,13 @@ class PublisherWorkflowTest(base.BaseTestCase,
IMAGE_DELETE_START,
VOLUME_DELETE_START,
FLOATINGIP_DELETE_END])
self.assertEqual(8, len(fakeclient.mock_calls))
self.assertEqual(10, len(fakeclient.mock_calls))
for call in expected_calls:
self.assertIn(call, fakeclient.mock_calls)
@mock.patch('ceilometer.publisher.gnocchi.LOG')
@mock.patch('gnocchiclient.v1.client.Client')
def test_workflow(self, fakeclient_cls, logger):
url = netutils.urlsplit("gnocchi://")
self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url)
fakeclient = fakeclient_cls.return_value
@ -506,8 +520,10 @@ class PublisherWorkflowTest(base.BaseTestCase,
gnocchi_id = uuid.uuid4()
expected_calls = [
mock.call.archive_policy.get("ceilometer-low"),
mock.call.archive_policy.get("ceilometer-low-rate"),
mock.call.metric.batch_resources_metrics_measures(
{resource_id: {metric_name: self.measures_attributes}},
{resource_id: {metric_name: self.metric_attributes}},
create_metrics=True)
]
expected_debug = [
@ -533,12 +549,16 @@ class PublisherWorkflowTest(base.BaseTestCase,
attributes['metrics'] = dict((metric_name, {})
for metric_name in self.metric_names)
for k, v in six.iteritems(attributes['metrics']):
if k in ["cpu", "disk.read.requests", "disk.write.requests",
"disk.read.bytes", "disk.write.bytes"]:
v["archive_policy_name"] = "ceilometer-low-rate"
else:
v["archive_policy_name"] = "ceilometer-low"
if k == 'disk.root.size':
v['unit'] = 'GB'
continue
if k == 'hardware.ipmi.node.power':
elif k == 'hardware.ipmi.node.power':
v['unit'] = 'W'
continue
expected_calls.append(mock.call.resource.create(
self.resource_type, attributes))
@ -554,7 +574,7 @@ class PublisherWorkflowTest(base.BaseTestCase,
if not self.create_resource_fail:
expected_calls.append(
mock.call.metric.batch_resources_metrics_measures(
{resource_id: {metric_name: self.measures_attributes}},
{resource_id: {metric_name: self.metric_attributes}},
create_metrics=True)
)
@ -570,7 +590,8 @@ class PublisherWorkflowTest(base.BaseTestCase,
batch_side_effect += [None]
expected_debug.append(
mock.call("%d measures posted against %d metrics through %d "
"resources", len(self.measures_attributes), 1, 1)
"resources", len(self.metric_attributes["measures"]),
1, 1)
)
if self.patchable_attributes:
@ -586,7 +607,9 @@ class PublisherWorkflowTest(base.BaseTestCase,
batch = fakeclient.metric.batch_resources_metrics_measures
batch.side_effect = batch_side_effect
self.publisher.publish_samples([self.sample])
url = netutils.urlsplit("gnocchi://")
publisher = gnocchi.GnocchiPublisher(self.conf.conf, url)
publisher.publish_samples([self.sample])
# Check that the last log message is the expected one
if (self.post_measure_fail
@ -599,4 +622,5 @@ class PublisherWorkflowTest(base.BaseTestCase,
self.assertEqual(expected_calls, fakeclient.mock_calls)
self.assertEqual(expected_debug, logger.debug.mock_calls)
PublisherWorkflowTest.generate_scenarios()

View File

@ -393,10 +393,13 @@ above table is the following:
.. note::
If storing data in Gnocchi v4.1+, derived rate_of_change metrics can be
computed using Gnocchi rather than Ceilometer transformers. This will
minimize Ceilometer memory requirements and avoid missing data when
Ceilometer services restart.
If storing data in Gnocchi, derived rate_of_change metrics are also
computed using Gnocchi in addition to Ceilometer transformers. It avoids
missing data when Ceilometer services restart.
To minimize Ceilometer memory requirements transformers can be disabled.
These ``rate_of_change`` meters are deprecated and will be removed in
default Ceilometer configuration in future release.
OpenStack Compute is capable of collecting ``CPU`` related meters from
the compute host machines. In order to use that you need to set the

View File

@ -0,0 +1,18 @@
---
features:
- |
Archive policies can now be configured per metrics in gnocchi_resources.yaml.
A default list of archive policies is now created by Ceilometer.
They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low"
for others.
upgrade:
- |
Ceilometer now creates it own archive policies in Gnocchi and use them to
create metrics in Gnocchi. Old metrics kept their current archive policies
and will not be updated with ceilometer-upgrade. Only newly created metrics
will be impacted. Archive policy can still be overriden with the publisher url
(e.g: gnocchi://archive_policy=high).
deprecations:
- |
cpu_util and \*.rate meters are deprecated and will be removed in future
release in favor of the Gnocchi rate calculation equivalent.