Remove code that is not related to events storage and API
Change-Id: I63128835613eb5959244c2fd34bc266ddcf4251c
This commit is contained in:
parent
984042084f
commit
ba4f572fd3
|
@ -13,6 +13,3 @@ doc/build
|
|||
doc/source/api/
|
||||
etc/ceilometer/ceilometer.conf
|
||||
subunit.log
|
||||
|
||||
# Files created by releasenotes build
|
||||
releasenotes/build
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
ceilometer
|
||||
==========
|
||||
|
||||
Release notes can be read online at:
|
||||
http://docs.openstack.org/developer/ceilometer/releasenotes/index.html
|
||||
|
||||
Documentation for the project can be found at:
|
||||
http://docs.openstack.org/developer/ceilometer/
|
||||
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
# Copyright 2014-2015 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from ceilometer.agent import plugin_base as plugin
|
||||
from ceilometer.i18n import _LW
|
||||
from ceilometer import keystone_client
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client')
|
||||
|
||||
|
||||
class EndpointDiscovery(plugin.DiscoveryBase):
|
||||
"""Discovery that supplies service endpoints.
|
||||
|
||||
This discovery should be used when the relevant APIs are not well suited
|
||||
to dividing the pollster's work into smaller pieces than a whole service
|
||||
at once.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def discover(manager, param=None):
|
||||
endpoints = keystone_client.get_service_catalog(
|
||||
manager.keystone).get_urls(
|
||||
service_type=param,
|
||||
interface=cfg.CONF.service_credentials.interface,
|
||||
region_name=cfg.CONF.service_credentials.region_name)
|
||||
if not endpoints:
|
||||
LOG.warning(_LW('No endpoints found for service %s'),
|
||||
"<all services>" if param is None else param)
|
||||
return []
|
||||
return endpoints
|
|
@ -1,21 +0,0 @@
|
|||
# Copyright 2015 Intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
|
||||
|
||||
class LocalNodeDiscovery(plugin_base.DiscoveryBase):
|
||||
def discover(self, manager, param=None):
|
||||
"""Return local node as resource."""
|
||||
return ['local_host']
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2014 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ceilometer.agent import plugin_base as plugin
|
||||
|
||||
cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client')
|
||||
|
||||
|
||||
class TenantDiscovery(plugin.DiscoveryBase):
|
||||
"""Discovery that supplies keystone tenants.
|
||||
|
||||
This discovery should be used when the pollster's work can't be divided
|
||||
into smaller pieces than per-tenants. Example of this is the Swift
|
||||
pollster, which polls account details and does so per-project.
|
||||
"""
|
||||
|
||||
def discover(self, manager, param=None):
|
||||
tenants = manager.keystone.projects.list()
|
||||
return tenants or []
|
|
@ -1,509 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 Julien Danjou
|
||||
# Copyright 2014 Red Hat, Inc
|
||||
#
|
||||
# Authors: Julien Danjou <julien@danjou.info>
|
||||
# Eoghan Glynn <eglynn@redhat.com>
|
||||
# Nejc Saje <nsaje@redhat.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
import random
|
||||
|
||||
from keystoneauth1 import exceptions as ka_exceptions
|
||||
from keystoneclient import exceptions as ks_exceptions
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import oslo_messaging
|
||||
from oslo_utils import fnmatch
|
||||
from oslo_utils import timeutils
|
||||
from six import moves
|
||||
from six.moves.urllib import parse as urlparse
|
||||
from stevedore import extension
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer import coordination
|
||||
from ceilometer.i18n import _, _LE, _LI, _LW
|
||||
from ceilometer import keystone_client
|
||||
from ceilometer import messaging
|
||||
from ceilometer import pipeline
|
||||
from ceilometer.publisher import utils as publisher_utils
|
||||
from ceilometer import service_base
|
||||
from ceilometer import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.BoolOpt('batch_polled_samples',
|
||||
default=True,
|
||||
help='To reduce polling agent load, samples are sent to the '
|
||||
'notification agent in a batch. To gain higher '
|
||||
'throughput at the cost of load set this to False.'),
|
||||
cfg.IntOpt('shuffle_time_before_polling_task',
|
||||
default=0,
|
||||
help='To reduce large requests at same time to Nova or other '
|
||||
'components from different compute agents, shuffle '
|
||||
'start time of polling task.'),
|
||||
]
|
||||
|
||||
POLLING_OPTS = [
|
||||
cfg.StrOpt('partitioning_group_prefix',
|
||||
deprecated_group='central',
|
||||
help='Work-load partitioning group prefix. Use only if you '
|
||||
'want to run multiple polling agents with different '
|
||||
'config files. For each sub-group of the agent '
|
||||
'pool with the same partitioning_group_prefix a disjoint '
|
||||
'subset of pollsters should be loaded.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
cfg.CONF.register_opts(POLLING_OPTS, group='polling')
|
||||
cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging',
|
||||
group='publisher_notifier')
|
||||
cfg.CONF.import_group('service_types', 'ceilometer.energy.kwapi')
|
||||
cfg.CONF.import_group('service_types', 'ceilometer.image.glance')
|
||||
cfg.CONF.import_group('service_types', 'ceilometer.neutron_client')
|
||||
cfg.CONF.import_group('service_types', 'ceilometer.nova_client')
|
||||
cfg.CONF.import_group('service_types', 'ceilometer.objectstore.rgw')
|
||||
cfg.CONF.import_group('service_types', 'ceilometer.objectstore.swift')
|
||||
|
||||
|
||||
class PollsterListForbidden(Exception):
|
||||
def __init__(self):
|
||||
msg = ('It is forbidden to use pollster-list option of polling agent '
|
||||
'in case of using coordination between multiple agents. Please '
|
||||
'use either multiple agents being coordinated or polling list '
|
||||
'option for one polling agent.')
|
||||
super(PollsterListForbidden, self).__init__(msg)
|
||||
|
||||
|
||||
class EmptyPollstersList(Exception):
|
||||
def __init__(self):
|
||||
msg = ('No valid pollsters can be loaded with the startup parameters'
|
||||
' polling-namespaces and pollster-list.')
|
||||
super(EmptyPollstersList, self).__init__(msg)
|
||||
|
||||
|
||||
class Resources(object):
|
||||
def __init__(self, agent_manager):
|
||||
self.agent_manager = agent_manager
|
||||
self._resources = []
|
||||
self._discovery = []
|
||||
self.blacklist = []
|
||||
|
||||
def setup(self, source):
|
||||
self._resources = source.resources
|
||||
self._discovery = source.discovery
|
||||
|
||||
def get(self, discovery_cache=None):
|
||||
source_discovery = (self.agent_manager.discover(self._discovery,
|
||||
discovery_cache)
|
||||
if self._discovery else [])
|
||||
static_resources = []
|
||||
if self._resources:
|
||||
static_resources_group = self.agent_manager.construct_group_id(
|
||||
utils.hash_of_set(self._resources))
|
||||
p_coord = self.agent_manager.partition_coordinator
|
||||
static_resources = p_coord.extract_my_subset(
|
||||
static_resources_group, self._resources)
|
||||
return static_resources + source_discovery
|
||||
|
||||
@staticmethod
|
||||
def key(source_name, pollster):
|
||||
return '%s-%s' % (source_name, pollster.name)
|
||||
|
||||
|
||||
class PollingTask(object):
|
||||
"""Polling task for polling samples and notifying.
|
||||
|
||||
A polling task can be invoked periodically or only once.
|
||||
"""
|
||||
|
||||
def __init__(self, agent_manager):
|
||||
self.manager = agent_manager
|
||||
|
||||
# elements of the Cartesian product of sources X pollsters
|
||||
# with a common interval
|
||||
self.pollster_matches = collections.defaultdict(set)
|
||||
|
||||
# we relate the static resources and per-source discovery to
|
||||
# each combination of pollster and matching source
|
||||
resource_factory = lambda: Resources(agent_manager)
|
||||
self.resources = collections.defaultdict(resource_factory)
|
||||
|
||||
self._batch = cfg.CONF.batch_polled_samples
|
||||
self._telemetry_secret = cfg.CONF.publisher.telemetry_secret
|
||||
|
||||
def add(self, pollster, source):
|
||||
self.pollster_matches[source.name].add(pollster)
|
||||
key = Resources.key(source.name, pollster)
|
||||
self.resources[key].setup(source)
|
||||
|
||||
def poll_and_notify(self):
|
||||
"""Polling sample and notify."""
|
||||
cache = {}
|
||||
discovery_cache = {}
|
||||
poll_history = {}
|
||||
for source_name in self.pollster_matches:
|
||||
for pollster in self.pollster_matches[source_name]:
|
||||
key = Resources.key(source_name, pollster)
|
||||
candidate_res = list(
|
||||
self.resources[key].get(discovery_cache))
|
||||
if not candidate_res and pollster.obj.default_discovery:
|
||||
candidate_res = self.manager.discover(
|
||||
[pollster.obj.default_discovery], discovery_cache)
|
||||
|
||||
# Remove duplicated resources and black resources. Using
|
||||
# set() requires well defined __hash__ for each resource.
|
||||
# Since __eq__ is defined, 'not in' is safe here.
|
||||
polling_resources = []
|
||||
black_res = self.resources[key].blacklist
|
||||
history = poll_history.get(pollster.name, [])
|
||||
for x in candidate_res:
|
||||
if x not in history:
|
||||
history.append(x)
|
||||
if x not in black_res:
|
||||
polling_resources.append(x)
|
||||
poll_history[pollster.name] = history
|
||||
|
||||
# If no resources, skip for this pollster
|
||||
if not polling_resources:
|
||||
p_context = 'new ' if history else ''
|
||||
LOG.info(_LI("Skip pollster %(name)s, no %(p_context)s"
|
||||
"resources found this cycle"),
|
||||
{'name': pollster.name, 'p_context': p_context})
|
||||
continue
|
||||
|
||||
LOG.info(_LI("Polling pollster %(poll)s in the context of "
|
||||
"%(src)s"),
|
||||
dict(poll=pollster.name, src=source_name))
|
||||
try:
|
||||
polling_timestamp = timeutils.utcnow().isoformat()
|
||||
samples = pollster.obj.get_samples(
|
||||
manager=self.manager,
|
||||
cache=cache,
|
||||
resources=polling_resources
|
||||
)
|
||||
sample_batch = []
|
||||
|
||||
for sample in samples:
|
||||
# Note(yuywz): Unify the timestamp of polled samples
|
||||
sample.set_timestamp(polling_timestamp)
|
||||
sample_dict = (
|
||||
publisher_utils.meter_message_from_counter(
|
||||
sample, self._telemetry_secret
|
||||
))
|
||||
if self._batch:
|
||||
sample_batch.append(sample_dict)
|
||||
else:
|
||||
self._send_notification([sample_dict])
|
||||
|
||||
if sample_batch:
|
||||
self._send_notification(sample_batch)
|
||||
|
||||
except plugin_base.PollsterPermanentError as err:
|
||||
LOG.error(_(
|
||||
'Prevent pollster %(name)s for '
|
||||
'polling source %(source)s anymore!')
|
||||
% ({'name': pollster.name, 'source': source_name}))
|
||||
self.resources[key].blacklist.extend(err.fail_res_list)
|
||||
except Exception as err:
|
||||
LOG.warning(_(
|
||||
'Continue after error from %(name)s: %(error)s')
|
||||
% ({'name': pollster.name, 'error': err}),
|
||||
exc_info=True)
|
||||
|
||||
def _send_notification(self, samples):
|
||||
self.manager.notifier.sample(
|
||||
{},
|
||||
'telemetry.polling',
|
||||
{'samples': samples}
|
||||
)
|
||||
|
||||
|
||||
class AgentManager(service_base.PipelineBasedService):
|
||||
|
||||
def __init__(self, namespaces=None, pollster_list=None):
|
||||
namespaces = namespaces or ['compute', 'central']
|
||||
pollster_list = pollster_list or []
|
||||
group_prefix = cfg.CONF.polling.partitioning_group_prefix
|
||||
|
||||
# features of using coordination and pollster-list are exclusive, and
|
||||
# cannot be used at one moment to avoid both samples duplication and
|
||||
# samples being lost
|
||||
if pollster_list and cfg.CONF.coordination.backend_url:
|
||||
raise PollsterListForbidden()
|
||||
|
||||
super(AgentManager, self).__init__()
|
||||
|
||||
def _match(pollster):
|
||||
"""Find out if pollster name matches to one of the list."""
|
||||
return any(fnmatch.fnmatch(pollster.name, pattern) for
|
||||
pattern in pollster_list)
|
||||
|
||||
if type(namespaces) is not list:
|
||||
namespaces = [namespaces]
|
||||
|
||||
# we'll have default ['compute', 'central'] here if no namespaces will
|
||||
# be passed
|
||||
extensions = (self._extensions('poll', namespace).extensions
|
||||
for namespace in namespaces)
|
||||
# get the extensions from pollster builder
|
||||
extensions_fb = (self._extensions_from_builder('poll', namespace)
|
||||
for namespace in namespaces)
|
||||
if pollster_list:
|
||||
extensions = (moves.filter(_match, exts)
|
||||
for exts in extensions)
|
||||
extensions_fb = (moves.filter(_match, exts)
|
||||
for exts in extensions_fb)
|
||||
|
||||
self.extensions = list(itertools.chain(*list(extensions))) + list(
|
||||
itertools.chain(*list(extensions_fb)))
|
||||
|
||||
if self.extensions == []:
|
||||
raise EmptyPollstersList()
|
||||
|
||||
self.discovery_manager = self._extensions('discover')
|
||||
self.partition_coordinator = coordination.PartitionCoordinator()
|
||||
|
||||
# Compose coordination group prefix.
|
||||
# We'll use namespaces as the basement for this partitioning.
|
||||
namespace_prefix = '-'.join(sorted(namespaces))
|
||||
self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix)
|
||||
if group_prefix else namespace_prefix)
|
||||
|
||||
self.notifier = oslo_messaging.Notifier(
|
||||
messaging.get_transport(),
|
||||
driver=cfg.CONF.publisher_notifier.telemetry_driver,
|
||||
publisher_id="ceilometer.polling")
|
||||
|
||||
self._keystone = None
|
||||
self._keystone_last_exception = None
|
||||
|
||||
@staticmethod
|
||||
def _get_ext_mgr(namespace):
|
||||
def _catch_extension_load_error(mgr, ep, exc):
|
||||
# Extension raising ExtensionLoadError can be ignored,
|
||||
# and ignore anything we can't import as a safety measure.
|
||||
if isinstance(exc, plugin_base.ExtensionLoadError):
|
||||
LOG.exception(_("Skip loading extension for %s") % ep.name)
|
||||
return
|
||||
if isinstance(exc, ImportError):
|
||||
LOG.error(_("Failed to import extension for %(name)s: "
|
||||
"%(error)s"),
|
||||
{'name': ep.name, 'error': exc})
|
||||
return
|
||||
raise exc
|
||||
|
||||
return extension.ExtensionManager(
|
||||
namespace=namespace,
|
||||
invoke_on_load=True,
|
||||
on_load_failure_callback=_catch_extension_load_error,
|
||||
)
|
||||
|
||||
def _extensions(self, category, agent_ns=None):
|
||||
namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns
|
||||
else 'ceilometer.%s' % category)
|
||||
return self._get_ext_mgr(namespace)
|
||||
|
||||
def _extensions_from_builder(self, category, agent_ns=None):
|
||||
ns = ('ceilometer.builder.%s.%s' % (category, agent_ns) if agent_ns
|
||||
else 'ceilometer.builder.%s' % category)
|
||||
mgr = self._get_ext_mgr(ns)
|
||||
|
||||
def _build(ext):
|
||||
return ext.plugin.get_pollsters_extensions()
|
||||
|
||||
# NOTE: this seems a stevedore bug. if no extensions are found,
|
||||
# map will raise runtimeError which is not documented.
|
||||
if mgr.names():
|
||||
return list(itertools.chain(*mgr.map(_build)))
|
||||
else:
|
||||
return []
|
||||
|
||||
def join_partitioning_groups(self):
|
||||
self.groups = set([self.construct_group_id(d.obj.group_id)
|
||||
for d in self.discovery_manager])
|
||||
# let each set of statically-defined resources have its own group
|
||||
static_resource_groups = set([
|
||||
self.construct_group_id(utils.hash_of_set(p.resources))
|
||||
for p in self.polling_manager.sources
|
||||
if p.resources
|
||||
])
|
||||
self.groups.update(static_resource_groups)
|
||||
for group in self.groups:
|
||||
self.partition_coordinator.join_group(group)
|
||||
|
||||
def create_polling_task(self):
|
||||
"""Create an initially empty polling task."""
|
||||
return PollingTask(self)
|
||||
|
||||
def setup_polling_tasks(self):
|
||||
polling_tasks = {}
|
||||
for source in self.polling_manager.sources:
|
||||
polling_task = None
|
||||
for pollster in self.extensions:
|
||||
if source.support_meter(pollster.name):
|
||||
polling_task = polling_tasks.get(source.get_interval())
|
||||
if not polling_task:
|
||||
polling_task = self.create_polling_task()
|
||||
polling_tasks[source.get_interval()] = polling_task
|
||||
polling_task.add(pollster, source)
|
||||
return polling_tasks
|
||||
|
||||
def construct_group_id(self, discovery_group_id):
|
||||
return ('%s-%s' % (self.group_prefix,
|
||||
discovery_group_id)
|
||||
if discovery_group_id else None)
|
||||
|
||||
def configure_polling_tasks(self):
|
||||
# allow time for coordination if necessary
|
||||
delay_start = self.partition_coordinator.is_active()
|
||||
|
||||
# set shuffle time before polling task if necessary
|
||||
delay_polling_time = random.randint(
|
||||
0, cfg.CONF.shuffle_time_before_polling_task)
|
||||
|
||||
pollster_timers = []
|
||||
data = self.setup_polling_tasks()
|
||||
for interval, polling_task in data.items():
|
||||
delay_time = (interval + delay_polling_time if delay_start
|
||||
else delay_polling_time)
|
||||
pollster_timers.append(self.tg.add_timer(interval,
|
||||
self.interval_task,
|
||||
initial_delay=delay_time,
|
||||
task=polling_task))
|
||||
self.tg.add_timer(cfg.CONF.coordination.heartbeat,
|
||||
self.partition_coordinator.heartbeat)
|
||||
|
||||
return pollster_timers
|
||||
|
||||
def start(self):
|
||||
super(AgentManager, self).start()
|
||||
self.polling_manager = pipeline.setup_polling()
|
||||
|
||||
self.partition_coordinator.start()
|
||||
self.join_partitioning_groups()
|
||||
|
||||
self.pollster_timers = self.configure_polling_tasks()
|
||||
|
||||
self.init_pipeline_refresh()
|
||||
|
||||
def stop(self):
|
||||
if self.started:
|
||||
self.partition_coordinator.stop()
|
||||
super(AgentManager, self).stop()
|
||||
|
||||
def interval_task(self, task):
|
||||
# NOTE(sileht): remove the previous keystone client
|
||||
# and exception to get a new one in this polling cycle.
|
||||
self._keystone = None
|
||||
self._keystone_last_exception = None
|
||||
|
||||
task.poll_and_notify()
|
||||
|
||||
@property
|
||||
def keystone(self):
|
||||
# NOTE(sileht): we do lazy loading of the keystone client
|
||||
# for multiple reasons:
|
||||
# * don't use it if no plugin need it
|
||||
# * use only one client for all plugins per polling cycle
|
||||
if self._keystone is None and self._keystone_last_exception is None:
|
||||
try:
|
||||
self._keystone = keystone_client.get_client()
|
||||
self._keystone_last_exception = None
|
||||
except (ka_exceptions.ClientException,
|
||||
ks_exceptions.ClientException) as e:
|
||||
self._keystone = None
|
||||
self._keystone_last_exception = e
|
||||
if self._keystone is not None:
|
||||
return self._keystone
|
||||
else:
|
||||
raise self._keystone_last_exception
|
||||
|
||||
@staticmethod
|
||||
def _parse_discoverer(url):
|
||||
s = urlparse.urlparse(url)
|
||||
return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None)
|
||||
|
||||
def _discoverer(self, name):
|
||||
for d in self.discovery_manager:
|
||||
if d.name == name:
|
||||
return d.obj
|
||||
return None
|
||||
|
||||
def discover(self, discovery=None, discovery_cache=None):
|
||||
resources = []
|
||||
discovery = discovery or []
|
||||
for url in discovery:
|
||||
if discovery_cache is not None and url in discovery_cache:
|
||||
resources.extend(discovery_cache[url])
|
||||
continue
|
||||
name, param = self._parse_discoverer(url)
|
||||
discoverer = self._discoverer(name)
|
||||
if discoverer:
|
||||
try:
|
||||
if discoverer.KEYSTONE_REQUIRED_FOR_SERVICE:
|
||||
service_type = getattr(
|
||||
cfg.CONF.service_types,
|
||||
discoverer.KEYSTONE_REQUIRED_FOR_SERVICE)
|
||||
if not keystone_client.get_service_catalog(
|
||||
self.keystone).get_endpoints(
|
||||
service_type=service_type):
|
||||
LOG.warning(_LW(
|
||||
'Skipping %(name)s, %(service_type)s service '
|
||||
'is not registered in keystone'),
|
||||
{'name': name, 'service_type': service_type})
|
||||
continue
|
||||
|
||||
discovered = discoverer.discover(self, param)
|
||||
partitioned = self.partition_coordinator.extract_my_subset(
|
||||
self.construct_group_id(discoverer.group_id),
|
||||
discovered)
|
||||
resources.extend(partitioned)
|
||||
if discovery_cache is not None:
|
||||
discovery_cache[url] = partitioned
|
||||
except (ka_exceptions.ClientException,
|
||||
ks_exceptions.ClientException) as e:
|
||||
LOG.error(_LE('Skipping %(name)s, keystone issue: '
|
||||
'%(exc)s'), {'name': name, 'exc': e})
|
||||
except Exception as err:
|
||||
LOG.exception(_('Unable to discover resources: %s') % err)
|
||||
else:
|
||||
LOG.warning(_('Unknown discovery extension: %s') % name)
|
||||
return resources
|
||||
|
||||
def stop_pollsters(self):
|
||||
for x in self.pollster_timers:
|
||||
try:
|
||||
x.stop()
|
||||
self.tg.timer_done(x)
|
||||
except Exception:
|
||||
LOG.error(_('Error stopping pollster.'), exc_info=True)
|
||||
self.pollster_timers = []
|
||||
|
||||
def reload_pipeline(self):
|
||||
if self.pipeline_validated:
|
||||
LOG.info(_LI("Reconfiguring polling tasks."))
|
||||
|
||||
# stop existing pollsters and leave partitioning groups
|
||||
self.stop_pollsters()
|
||||
for group in self.groups:
|
||||
self.partition_coordinator.leave_group(group)
|
||||
|
||||
# re-create partitioning groups according to pipeline
|
||||
# and configure polling tasks with latest pipeline conf
|
||||
self.join_partitioning_groups()
|
||||
self.pollster_timers = self.configure_polling_tasks()
|
|
@ -1,270 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Base class for plugins.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import collections
|
||||
|
||||
from oslo_log import log
|
||||
import oslo_messaging
|
||||
import six
|
||||
from stevedore import extension
|
||||
|
||||
from ceilometer.i18n import _LE
|
||||
from ceilometer import messaging
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
ExchangeTopics = collections.namedtuple('ExchangeTopics',
|
||||
['exchange', 'topics'])
|
||||
|
||||
|
||||
class PluginBase(object):
|
||||
"""Base class for all plugins."""
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class NotificationBase(PluginBase):
|
||||
"""Base class for plugins that support the notification API."""
|
||||
def __init__(self, manager):
|
||||
super(NotificationBase, self).__init__()
|
||||
# NOTE(gordc): this is filter rule used by oslo.messaging to dispatch
|
||||
# messages to an endpoint.
|
||||
if self.event_types:
|
||||
self.filter_rule = oslo_messaging.NotificationFilter(
|
||||
event_type='|'.join(self.event_types))
|
||||
self.manager = manager
|
||||
|
||||
@staticmethod
|
||||
def get_notification_topics(conf):
|
||||
if 'notification_topics' in conf:
|
||||
return conf.notification_topics
|
||||
return conf.oslo_messaging_notifications.topics
|
||||
|
||||
@abc.abstractproperty
|
||||
def event_types(self):
|
||||
"""Return a sequence of strings.
|
||||
|
||||
Strings are defining the event types to be given to this plugin.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_targets(self, conf):
|
||||
"""Return a sequence of oslo.messaging.Target.
|
||||
|
||||
Sequence is defining the exchange and topics to be connected for this
|
||||
plugin.
|
||||
:param conf: Configuration.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def process_notification(self, message):
|
||||
"""Return a sequence of Counter instances for the given message.
|
||||
|
||||
:param message: Message to process.
|
||||
"""
|
||||
|
||||
def info(self, notifications):
|
||||
"""RPC endpoint for notification messages at info level
|
||||
|
||||
When another service sends a notification over the message
|
||||
bus, this method receives it.
|
||||
|
||||
:param notifications: list of notifications
|
||||
"""
|
||||
self._process_notifications('info', notifications)
|
||||
|
||||
def sample(self, notifications):
|
||||
"""RPC endpoint for notification messages at sample level
|
||||
|
||||
When another service sends a notification over the message
|
||||
bus at sample priority, this method receives it.
|
||||
|
||||
:param notifications: list of notifications
|
||||
"""
|
||||
self._process_notifications('sample', notifications)
|
||||
|
||||
def _process_notifications(self, priority, notifications):
|
||||
for notification in notifications:
|
||||
try:
|
||||
notification = messaging.convert_to_old_notification_format(
|
||||
priority, notification)
|
||||
self.to_samples_and_publish(notification)
|
||||
except Exception:
|
||||
LOG.error(_LE('Fail to process notification'), exc_info=True)
|
||||
|
||||
def to_samples_and_publish(self, notification):
|
||||
"""Return samples produced by *process_notification*.
|
||||
|
||||
Samples produced for the given notification.
|
||||
:param context: Execution context from the service or RPC call
|
||||
:param notification: The notification to process.
|
||||
"""
|
||||
with self.manager.publisher() as p:
|
||||
p(list(self.process_notification(notification)))
|
||||
|
||||
|
||||
class NonMetricNotificationBase(object):
|
||||
"""Use to mark non-measurement meters
|
||||
|
||||
There are a number of historical non-measurement meters that should really
|
||||
be captured as events. This common base allows us to disable these invalid
|
||||
meters.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ExtensionLoadError(Exception):
|
||||
"""Error of loading pollster plugin.
|
||||
|
||||
PollsterBase provides a hook, setup_environment, called in pollster loading
|
||||
to setup required HW/SW dependency. Any exception from it would be
|
||||
propagated as ExtensionLoadError, then skip loading this pollster.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class PollsterPermanentError(Exception):
|
||||
"""Permanent error when polling.
|
||||
|
||||
When unrecoverable error happened in polling, pollster can raise this
|
||||
exception with failed resource to prevent itself from polling any more.
|
||||
Resource is one of parameter resources from get_samples that cause polling
|
||||
error.
|
||||
"""
|
||||
|
||||
def __init__(self, resources):
|
||||
self.fail_res_list = resources
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PollsterBase(PluginBase):
|
||||
"""Base class for plugins that support the polling API."""
|
||||
|
||||
def setup_environment(self):
|
||||
"""Setup required environment for pollster.
|
||||
|
||||
Each subclass could overwrite it for specific usage. Any exception
|
||||
raised in this function would prevent pollster being loaded.
|
||||
"""
|
||||
pass
|
||||
|
||||
def __init__(self):
|
||||
super(PollsterBase, self).__init__()
|
||||
try:
|
||||
self.setup_environment()
|
||||
except Exception as err:
|
||||
raise ExtensionLoadError(err)
|
||||
|
||||
@abc.abstractproperty
|
||||
def default_discovery(self):
|
||||
"""Default discovery to use for this pollster.
|
||||
|
||||
There are three ways a pollster can get a list of resources to poll,
|
||||
listed here in ascending order of precedence:
|
||||
1. from the per-agent discovery,
|
||||
2. from the per-pollster discovery (defined here)
|
||||
3. from the per-pipeline configured discovery and/or per-pipeline
|
||||
configured static resources.
|
||||
|
||||
If a pollster should only get resources from #1 or #3, this property
|
||||
should be set to None.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_samples(self, manager, cache, resources):
|
||||
"""Return a sequence of Counter instances from polling the resources.
|
||||
|
||||
:param manager: The service manager class invoking the plugin.
|
||||
:param cache: A dictionary to allow pollsters to pass data
|
||||
between themselves when recomputing it would be
|
||||
expensive (e.g., asking another service for a
|
||||
list of objects).
|
||||
:param resources: A list of resources the pollster will get data
|
||||
from. It's up to the specific pollster to decide
|
||||
how to use it. It is usually supplied by a discovery,
|
||||
see ``default_discovery`` for more information.
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def build_pollsters(cls):
|
||||
"""Return a list of tuple (name, pollster).
|
||||
|
||||
The name is the meter name which the pollster would return, the
|
||||
pollster is a pollster object instance. The pollster which implements
|
||||
this method should be registered in the namespace of
|
||||
ceilometer.builder.xxx instead of ceilometer.poll.xxx.
|
||||
"""
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def get_pollsters_extensions(cls):
|
||||
"""Return a list of stevedore extensions.
|
||||
|
||||
The returned stevedore extensions wrap the pollster object instances
|
||||
returned by build_pollsters.
|
||||
"""
|
||||
extensions = []
|
||||
try:
|
||||
for name, pollster in cls.build_pollsters():
|
||||
ext = extension.Extension(name, None, cls, pollster)
|
||||
extensions.append(ext)
|
||||
except Exception as err:
|
||||
raise ExtensionLoadError(err)
|
||||
return extensions
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class DiscoveryBase(object):
|
||||
KEYSTONE_REQUIRED_FOR_SERVICE = None
|
||||
"""Service type required in keystone catalog to works"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def discover(self, manager, param=None):
|
||||
"""Discover resources to monitor.
|
||||
|
||||
The most fine-grained discovery should be preferred, so the work is
|
||||
the most evenly distributed among multiple agents (if they exist).
|
||||
|
||||
For example:
|
||||
if the pollster can separately poll individual resources, it should
|
||||
have its own discovery implementation to discover those resources. If
|
||||
it can only poll per-tenant, then the `TenantDiscovery` should be
|
||||
used. If even that is not possible, use `EndpointDiscovery` (see
|
||||
their respective docstrings).
|
||||
|
||||
:param manager: The service manager class invoking the plugin.
|
||||
:param param: an optional parameter to guide the discovery
|
||||
"""
|
||||
|
||||
@property
|
||||
def group_id(self):
|
||||
"""Return group id of this discovery.
|
||||
|
||||
All running recoveries with the same group_id should return the same
|
||||
set of resources at a given point in time. By default, a discovery is
|
||||
put into a global group, meaning that all discoveries of its type
|
||||
running anywhere in the cloud, return the same set of resources.
|
||||
|
||||
This property can be overridden to provide correct grouping of
|
||||
localized discoveries. For example, compute discovery is localized
|
||||
to a host, which is reflected in its group_id.
|
||||
|
||||
A None value signifies that this discovery does not want to be part
|
||||
of workload partitioning at all.
|
||||
"""
|
||||
return 'global'
|
|
@ -36,6 +36,7 @@ OPTS = [
|
|||
),
|
||||
]
|
||||
|
||||
|
||||
API_OPTS = [
|
||||
cfg.BoolOpt('pecan_debug',
|
||||
default=False,
|
||||
|
@ -45,6 +46,12 @@ API_OPTS = [
|
|||
default=100,
|
||||
help='Default maximum number of items returned by API request.'
|
||||
),
|
||||
cfg.IntOpt('workers',
|
||||
default=1,
|
||||
min=1,
|
||||
deprecated_group='DEFAULT',
|
||||
deprecated_name='api_workers',
|
||||
help='Number of workers for api, default value is 1.'),
|
||||
]
|
||||
|
||||
CONF.register_opts(OPTS)
|
||||
|
@ -55,7 +62,6 @@ def setup_app(pecan_config=None):
|
|||
# FIXME: Replace DBHook with a hooks.TransactionHook
|
||||
app_hooks = [hooks.ConfigHook(),
|
||||
hooks.DBHook(),
|
||||
hooks.NotifierHook(),
|
||||
hooks.TranslationHook()]
|
||||
|
||||
pecan_config = pecan_config or {
|
||||
|
|
|
@ -108,24 +108,6 @@ class Base(wtypes.DynamicBase):
|
|||
getattr(self, k) != wsme.Unset)
|
||||
|
||||
|
||||
class Link(Base):
|
||||
"""A link representation."""
|
||||
|
||||
href = wtypes.text
|
||||
"The url of a link"
|
||||
|
||||
rel = wtypes.text
|
||||
"The name of a link"
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(href=('http://localhost:8777/v2/meters/volume?'
|
||||
'q.field=resource_id&'
|
||||
'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
|
||||
rel='volume'
|
||||
)
|
||||
|
||||
|
||||
class Query(Base):
|
||||
"""Query filter."""
|
||||
|
||||
|
|
|
@ -39,8 +39,6 @@ class Capabilities(base.Base):
|
|||
|
||||
api = {wtypes.text: bool}
|
||||
"A flattened dictionary of API capabilities"
|
||||
storage = {wtypes.text: bool}
|
||||
"A flattened dictionary of storage capabilities"
|
||||
event_storage = {wtypes.text: bool}
|
||||
"A flattened dictionary of event storage capabilities"
|
||||
|
||||
|
@ -48,30 +46,8 @@ class Capabilities(base.Base):
|
|||
def sample(cls):
|
||||
return cls(
|
||||
api=_flatten_capabilities({
|
||||
'meters': {'query': {'simple': True,
|
||||
'metadata': True}},
|
||||
'resources': {'query': {'simple': True,
|
||||
'metadata': True}},
|
||||
'samples': {'query': {'simple': True,
|
||||
'metadata': True,
|
||||
'complex': True}},
|
||||
'statistics': {'groupby': True,
|
||||
'query': {'simple': True,
|
||||
'metadata': True},
|
||||
'aggregation': {'standard': True,
|
||||
'selectable': {
|
||||
'max': True,
|
||||
'min': True,
|
||||
'sum': True,
|
||||
'avg': True,
|
||||
'count': True,
|
||||
'stddev': True,
|
||||
'cardinality': True,
|
||||
'quartile': False}}},
|
||||
'events': {'query': {'simple': True}},
|
||||
}),
|
||||
storage=_flatten_capabilities(
|
||||
{'storage': {'production_ready': True}}),
|
||||
event_storage=_flatten_capabilities(
|
||||
{'storage': {'production_ready': True}}),
|
||||
)
|
||||
|
@ -88,13 +64,10 @@ class CapabilitiesController(rest.RestController):
|
|||
"""
|
||||
# variation in API capabilities is effectively determined by
|
||||
# the lack of strict feature parity across storage drivers
|
||||
conn = pecan.request.storage_conn
|
||||
event_conn = pecan.request.event_storage_conn
|
||||
driver_capabilities = conn.get_capabilities().copy()
|
||||
driver_capabilities['events'] = event_conn.get_capabilities()['events']
|
||||
driver_perf = conn.get_storage_capabilities()
|
||||
driver_capabilities = {'events':
|
||||
event_conn.get_capabilities()['events']}
|
||||
event_driver_perf = event_conn.get_storage_capabilities()
|
||||
return Capabilities(api=_flatten_capabilities(driver_capabilities),
|
||||
storage=_flatten_capabilities(driver_perf),
|
||||
event_storage=_flatten_capabilities(
|
||||
event_driver_perf))
|
||||
|
|
|
@ -1,505 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import strutils
|
||||
from oslo_utils import timeutils
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import six
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from ceilometer.api.controllers.v2 import base
|
||||
from ceilometer.api.controllers.v2 import utils as v2_utils
|
||||
from ceilometer.api import rbac
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer.publisher import utils as publisher_utils
|
||||
from ceilometer import sample
|
||||
from ceilometer import storage
|
||||
from ceilometer.storage import base as storage_base
|
||||
from ceilometer import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class OldSample(base.Base):
|
||||
"""A single measurement for a given meter and resource.
|
||||
|
||||
This class is deprecated in favor of Sample.
|
||||
"""
|
||||
|
||||
source = wtypes.text
|
||||
"The ID of the source that identifies where the sample comes from"
|
||||
|
||||
counter_name = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The name of the meter"
|
||||
# FIXME(dhellmann): Make this meter_name?
|
||||
|
||||
counter_type = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The type of the meter (see :ref:`measurements`)"
|
||||
# FIXME(dhellmann): Make this meter_type?
|
||||
|
||||
counter_unit = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The unit of measure for the value in counter_volume"
|
||||
# FIXME(dhellmann): Make this meter_unit?
|
||||
|
||||
counter_volume = wsme.wsattr(float, mandatory=True)
|
||||
"The actual measured value"
|
||||
|
||||
user_id = wtypes.text
|
||||
"The ID of the user who last triggered an update to the resource"
|
||||
|
||||
project_id = wtypes.text
|
||||
"The ID of the project or tenant that owns the resource"
|
||||
|
||||
resource_id = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The ID of the :class:`Resource` for which the measurements are taken"
|
||||
|
||||
timestamp = datetime.datetime
|
||||
"UTC date and time when the measurement was made"
|
||||
|
||||
recorded_at = datetime.datetime
|
||||
"When the sample has been recorded."
|
||||
|
||||
resource_metadata = {wtypes.text: wtypes.text}
|
||||
"Arbitrary metadata associated with the resource"
|
||||
|
||||
message_id = wtypes.text
|
||||
"A unique identifier for the sample"
|
||||
|
||||
def __init__(self, counter_volume=None, resource_metadata=None,
|
||||
timestamp=None, **kwds):
|
||||
resource_metadata = resource_metadata or {}
|
||||
if counter_volume is not None:
|
||||
counter_volume = float(counter_volume)
|
||||
resource_metadata = v2_utils.flatten_metadata(resource_metadata)
|
||||
# this is to make it easier for clients to pass a timestamp in
|
||||
if timestamp and isinstance(timestamp, six.string_types):
|
||||
timestamp = timeutils.parse_isotime(timestamp)
|
||||
|
||||
super(OldSample, self).__init__(counter_volume=counter_volume,
|
||||
resource_metadata=resource_metadata,
|
||||
timestamp=timestamp, **kwds)
|
||||
|
||||
if self.resource_metadata in (wtypes.Unset, None):
|
||||
self.resource_metadata = {}
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(source='openstack',
|
||||
counter_name='instance',
|
||||
counter_type='gauge',
|
||||
counter_unit='instance',
|
||||
counter_volume=1,
|
||||
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
|
||||
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
|
||||
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
|
||||
recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
|
||||
timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
|
||||
resource_metadata={'name1': 'value1',
|
||||
'name2': 'value2'},
|
||||
message_id='5460acce-4fd6-480d-ab18-9735ec7b1996',
|
||||
)
|
||||
|
||||
|
||||
class Statistics(base.Base):
|
||||
"""Computed statistics for a query."""
|
||||
|
||||
groupby = {wtypes.text: wtypes.text}
|
||||
"Dictionary of field names for group, if groupby statistics are requested"
|
||||
|
||||
unit = wtypes.text
|
||||
"The unit type of the data set"
|
||||
|
||||
min = float
|
||||
"The minimum volume seen in the data"
|
||||
|
||||
max = float
|
||||
"The maximum volume seen in the data"
|
||||
|
||||
avg = float
|
||||
"The average of all of the volume values seen in the data"
|
||||
|
||||
sum = float
|
||||
"The total of all of the volume values seen in the data"
|
||||
|
||||
count = int
|
||||
"The number of samples seen"
|
||||
|
||||
aggregate = {wtypes.text: float}
|
||||
"The selectable aggregate value(s)"
|
||||
|
||||
duration = float
|
||||
"The difference, in seconds, between the oldest and newest timestamp"
|
||||
|
||||
duration_start = datetime.datetime
|
||||
"UTC date and time of the earliest timestamp, or the query start time"
|
||||
|
||||
duration_end = datetime.datetime
|
||||
"UTC date and time of the oldest timestamp, or the query end time"
|
||||
|
||||
period = int
|
||||
"The difference, in seconds, between the period start and end"
|
||||
|
||||
period_start = datetime.datetime
|
||||
"UTC date and time of the period start"
|
||||
|
||||
period_end = datetime.datetime
|
||||
"UTC date and time of the period end"
|
||||
|
||||
def __init__(self, start_timestamp=None, end_timestamp=None, **kwds):
|
||||
super(Statistics, self).__init__(**kwds)
|
||||
self._update_duration(start_timestamp, end_timestamp)
|
||||
|
||||
def _update_duration(self, start_timestamp, end_timestamp):
|
||||
# "Clamp" the timestamps we return to the original time
|
||||
# range, excluding the offset.
|
||||
if (start_timestamp and
|
||||
self.duration_start and
|
||||
self.duration_start < start_timestamp):
|
||||
self.duration_start = start_timestamp
|
||||
LOG.debug('clamping min timestamp to range')
|
||||
if (end_timestamp and
|
||||
self.duration_end and
|
||||
self.duration_end > end_timestamp):
|
||||
self.duration_end = end_timestamp
|
||||
LOG.debug('clamping max timestamp to range')
|
||||
|
||||
# If we got valid timestamps back, compute a duration in seconds.
|
||||
#
|
||||
# If the min > max after clamping then we know the
|
||||
# timestamps on the samples fell outside of the time
|
||||
# range we care about for the query, so treat them as
|
||||
# "invalid."
|
||||
#
|
||||
# If the timestamps are invalid, return None as a
|
||||
# sentinel indicating that there is something "funny"
|
||||
# about the range.
|
||||
if (self.duration_start and
|
||||
self.duration_end and
|
||||
self.duration_start <= self.duration_end):
|
||||
self.duration = timeutils.delta_seconds(self.duration_start,
|
||||
self.duration_end)
|
||||
else:
|
||||
self.duration_start = self.duration_end = self.duration = None
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(unit='GiB',
|
||||
min=1,
|
||||
max=9,
|
||||
avg=4.5,
|
||||
sum=45,
|
||||
count=10,
|
||||
duration_start=datetime.datetime(2013, 1, 4, 16, 42),
|
||||
duration_end=datetime.datetime(2013, 1, 4, 16, 47),
|
||||
period=7200,
|
||||
period_start=datetime.datetime(2013, 1, 4, 16, 00),
|
||||
period_end=datetime.datetime(2013, 1, 4, 18, 00),
|
||||
)
|
||||
|
||||
|
||||
class Aggregate(base.Base):
|
||||
|
||||
func = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The aggregation function name"
|
||||
|
||||
param = wsme.wsattr(wtypes.text, default=None)
|
||||
"The paramter to the aggregation function"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Aggregate, self).__init__(**kwargs)
|
||||
|
||||
@staticmethod
|
||||
def validate(aggregate):
|
||||
valid_agg = (storage_base.Connection.CAPABILITIES.get('statistics', {})
|
||||
.get('aggregation', {}).get('selectable', {}).keys())
|
||||
if aggregate.func not in valid_agg:
|
||||
msg = _('Invalid aggregation function: %s') % aggregate.func
|
||||
raise base.ClientSideError(msg)
|
||||
return aggregate
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(func='cardinality',
|
||||
param='resource_id')
|
||||
|
||||
|
||||
def _validate_groupby_fields(groupby_fields):
|
||||
"""Checks that the list of groupby fields from request is valid.
|
||||
|
||||
If all fields are valid, returns fields with duplicates removed.
|
||||
"""
|
||||
# NOTE(terriyu): Currently, metadata fields are supported in our
|
||||
# group by statistics implementation only for mongodb
|
||||
valid_fields = set(['user_id', 'resource_id', 'project_id', 'source',
|
||||
'resource_metadata.instance_type'])
|
||||
|
||||
invalid_fields = set(groupby_fields) - valid_fields
|
||||
if invalid_fields:
|
||||
raise wsme.exc.UnknownArgument(invalid_fields,
|
||||
"Invalid groupby fields")
|
||||
|
||||
# Remove duplicate fields
|
||||
# NOTE(terriyu): This assumes that we don't care about the order of the
|
||||
# group by fields.
|
||||
return list(set(groupby_fields))
|
||||
|
||||
|
||||
class MeterController(rest.RestController):
|
||||
"""Manages operations on a single meter."""
|
||||
_custom_actions = {
|
||||
'statistics': ['GET'],
|
||||
}
|
||||
|
||||
def __init__(self, meter_name):
|
||||
pecan.request.context['meter_name'] = meter_name
|
||||
self.meter_name = meter_name
|
||||
|
||||
@wsme_pecan.wsexpose([OldSample], [base.Query], int)
|
||||
def get_all(self, q=None, limit=None):
|
||||
"""Return samples for the meter.
|
||||
|
||||
:param q: Filter rules for the data to be returned.
|
||||
:param limit: Maximum number of samples to return.
|
||||
"""
|
||||
|
||||
rbac.enforce('get_samples', pecan.request)
|
||||
|
||||
q = q or []
|
||||
limit = v2_utils.enforce_limit(limit)
|
||||
kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
|
||||
kwargs['meter'] = self.meter_name
|
||||
f = storage.SampleFilter(**kwargs)
|
||||
return [OldSample.from_db_model(e)
|
||||
for e in pecan.request.storage_conn.get_samples(f, limit=limit)
|
||||
]
|
||||
|
||||
@wsme_pecan.wsexpose([OldSample], str, body=[OldSample], status_code=201)
|
||||
def post(self, direct='', samples=None):
|
||||
"""Post a list of new Samples to Telemetry.
|
||||
|
||||
:param direct: a flag indicates whether the samples will be posted
|
||||
directly to storage or not.
|
||||
:param samples: a list of samples within the request body.
|
||||
"""
|
||||
rbac.enforce('create_samples', pecan.request)
|
||||
|
||||
direct = strutils.bool_from_string(direct)
|
||||
if not samples:
|
||||
msg = _('Samples should be included in request body')
|
||||
raise base.ClientSideError(msg)
|
||||
|
||||
now = timeutils.utcnow()
|
||||
auth_project = rbac.get_limited_to_project(pecan.request.headers)
|
||||
def_source = pecan.request.cfg.sample_source
|
||||
def_project_id = pecan.request.headers.get('X-Project-Id')
|
||||
def_user_id = pecan.request.headers.get('X-User-Id')
|
||||
|
||||
published_samples = []
|
||||
for s in samples:
|
||||
if self.meter_name != s.counter_name:
|
||||
raise wsme.exc.InvalidInput('counter_name', s.counter_name,
|
||||
'should be %s' % self.meter_name)
|
||||
|
||||
if s.message_id:
|
||||
raise wsme.exc.InvalidInput('message_id', s.message_id,
|
||||
'The message_id must not be set')
|
||||
|
||||
if s.counter_type not in sample.TYPES:
|
||||
raise wsme.exc.InvalidInput('counter_type', s.counter_type,
|
||||
'The counter type must be: ' +
|
||||
', '.join(sample.TYPES))
|
||||
|
||||
s.user_id = (s.user_id or def_user_id)
|
||||
s.project_id = (s.project_id or def_project_id)
|
||||
s.source = '%s:%s' % (s.project_id, (s.source or def_source))
|
||||
s.timestamp = (s.timestamp or now)
|
||||
|
||||
if auth_project and auth_project != s.project_id:
|
||||
# non admin user trying to cross post to another project_id
|
||||
auth_msg = 'can not post samples to other projects'
|
||||
raise wsme.exc.InvalidInput('project_id', s.project_id,
|
||||
auth_msg)
|
||||
|
||||
published_sample = sample.Sample(
|
||||
name=s.counter_name,
|
||||
type=s.counter_type,
|
||||
unit=s.counter_unit,
|
||||
volume=s.counter_volume,
|
||||
user_id=s.user_id,
|
||||
project_id=s.project_id,
|
||||
resource_id=s.resource_id,
|
||||
timestamp=s.timestamp.isoformat(),
|
||||
resource_metadata=utils.restore_nesting(s.resource_metadata,
|
||||
separator='.'),
|
||||
source=s.source)
|
||||
s.message_id = published_sample.id
|
||||
|
||||
sample_dict = publisher_utils.meter_message_from_counter(
|
||||
published_sample, cfg.CONF.publisher.telemetry_secret)
|
||||
if direct:
|
||||
ts = timeutils.parse_isotime(sample_dict['timestamp'])
|
||||
sample_dict['timestamp'] = timeutils.normalize_time(ts)
|
||||
pecan.request.storage_conn.record_metering_data(sample_dict)
|
||||
else:
|
||||
published_samples.append(sample_dict)
|
||||
if not direct:
|
||||
pecan.request.notifier.sample(
|
||||
{'user': def_user_id,
|
||||
'tenant': def_project_id,
|
||||
'is_admin': True},
|
||||
'telemetry.api',
|
||||
{'samples': published_samples})
|
||||
|
||||
return samples
|
||||
|
||||
@wsme_pecan.wsexpose([Statistics],
|
||||
[base.Query], [six.text_type], int, [Aggregate])
|
||||
def statistics(self, q=None, groupby=None, period=None, aggregate=None):
|
||||
"""Computes the statistics of the samples in the time range given.
|
||||
|
||||
:param q: Filter rules for the data to be returned.
|
||||
:param groupby: Fields for group by aggregation
|
||||
:param period: Returned result will be an array of statistics for a
|
||||
period long of that number of seconds.
|
||||
:param aggregate: The selectable aggregation functions to be applied.
|
||||
"""
|
||||
|
||||
rbac.enforce('compute_statistics', pecan.request)
|
||||
|
||||
q = q or []
|
||||
groupby = groupby or []
|
||||
aggregate = aggregate or []
|
||||
|
||||
if period and period < 0:
|
||||
raise base.ClientSideError(_("Period must be positive."))
|
||||
|
||||
kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
|
||||
kwargs['meter'] = self.meter_name
|
||||
f = storage.SampleFilter(**kwargs)
|
||||
g = _validate_groupby_fields(groupby)
|
||||
|
||||
aggregate = utils.uniq(aggregate, ['func', 'param'])
|
||||
# Find the original timestamp in the query to use for clamping
|
||||
# the duration returned in the statistics.
|
||||
start = end = None
|
||||
for i in q:
|
||||
if i.field == 'timestamp' and i.op in ('lt', 'le'):
|
||||
end = timeutils.parse_isotime(i.value).replace(
|
||||
tzinfo=None)
|
||||
elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
|
||||
start = timeutils.parse_isotime(i.value).replace(
|
||||
tzinfo=None)
|
||||
|
||||
try:
|
||||
computed = pecan.request.storage_conn.get_meter_statistics(
|
||||
f, period, g, aggregate)
|
||||
return [Statistics(start_timestamp=start,
|
||||
end_timestamp=end,
|
||||
**c.as_dict())
|
||||
for c in computed]
|
||||
except OverflowError as e:
|
||||
params = dict(period=period, err=e)
|
||||
raise base.ClientSideError(
|
||||
_("Invalid period %(period)s: %(err)s") % params)
|
||||
|
||||
|
||||
class Meter(base.Base):
|
||||
"""One category of measurements."""
|
||||
|
||||
name = wtypes.text
|
||||
"The unique name for the meter"
|
||||
|
||||
type = wtypes.Enum(str, *sample.TYPES)
|
||||
"The meter type (see :ref:`measurements`)"
|
||||
|
||||
unit = wtypes.text
|
||||
"The unit of measure"
|
||||
|
||||
resource_id = wtypes.text
|
||||
"The ID of the :class:`Resource` for which the measurements are taken"
|
||||
|
||||
project_id = wtypes.text
|
||||
"The ID of the project or tenant that owns the resource"
|
||||
|
||||
user_id = wtypes.text
|
||||
"The ID of the user who last triggered an update to the resource"
|
||||
|
||||
source = wtypes.text
|
||||
"The ID of the source that identifies where the meter comes from"
|
||||
|
||||
meter_id = wtypes.text
|
||||
"The unique identifier for the meter"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
meter_id = '%s+%s' % (kwargs['resource_id'], kwargs['name'])
|
||||
# meter_id is of type Unicode but base64.encodestring() only accepts
|
||||
# strings. See bug #1333177
|
||||
meter_id = base64.b64encode(meter_id.encode('utf-8'))
|
||||
kwargs['meter_id'] = meter_id
|
||||
super(Meter, self).__init__(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(name='instance',
|
||||
type='gauge',
|
||||
unit='instance',
|
||||
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
|
||||
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
|
||||
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
|
||||
source='openstack',
|
||||
)
|
||||
|
||||
|
||||
class MetersController(rest.RestController):
|
||||
"""Works on meters."""
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, meter_name, *remainder):
|
||||
return MeterController(meter_name), remainder
|
||||
|
||||
@wsme_pecan.wsexpose([Meter], [base.Query], int, str)
|
||||
def get_all(self, q=None, limit=None, unique=''):
|
||||
"""Return all known meters, based on the data recorded so far.
|
||||
|
||||
:param q: Filter rules for the meters to be returned.
|
||||
:param unique: flag to indicate unique meters to be returned.
|
||||
"""
|
||||
|
||||
rbac.enforce('get_meters', pecan.request)
|
||||
|
||||
q = q or []
|
||||
|
||||
# Timestamp field is not supported for Meter queries
|
||||
limit = v2_utils.enforce_limit(limit)
|
||||
kwargs = v2_utils.query_to_kwargs(
|
||||
q, pecan.request.storage_conn.get_meters,
|
||||
['limit'], allow_timestamps=False)
|
||||
return [Meter.from_db_model(m)
|
||||
for m in pecan.request.storage_conn.get_meters(
|
||||
limit=limit, unique=strutils.bool_from_string(unique),
|
||||
**kwargs)]
|
|
@ -1,359 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
import jsonschema
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from ceilometer.api.controllers.v2 import base
|
||||
from ceilometer.api.controllers.v2 import samples
|
||||
from ceilometer.api.controllers.v2 import utils as v2_utils
|
||||
from ceilometer.api import rbac
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer import storage
|
||||
from ceilometer import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class ComplexQuery(base.Base):
|
||||
"""Holds a sample query encoded in json."""
|
||||
|
||||
filter = wtypes.text
|
||||
"The filter expression encoded in json."
|
||||
|
||||
orderby = wtypes.text
|
||||
"List of single-element dicts for specifying the ordering of the results."
|
||||
|
||||
limit = int
|
||||
"The maximum number of results to be returned."
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(filter='{"and": [{"and": [{"=": ' +
|
||||
'{"counter_name": "cpu_util"}}, ' +
|
||||
'{">": {"counter_volume": 0.23}}, ' +
|
||||
'{"<": {"counter_volume": 0.26}}]}, ' +
|
||||
'{"or": [{"and": [{">": ' +
|
||||
'{"timestamp": "2013-12-01T18:00:00"}}, ' +
|
||||
'{"<": ' +
|
||||
'{"timestamp": "2013-12-01T18:15:00"}}]}, ' +
|
||||
'{"and": [{">": ' +
|
||||
'{"timestamp": "2013-12-01T18:30:00"}}, ' +
|
||||
'{"<": ' +
|
||||
'{"timestamp": "2013-12-01T18:45:00"}}]}]}]}',
|
||||
orderby='[{"counter_volume": "ASC"}, ' +
|
||||
'{"timestamp": "DESC"}]',
|
||||
limit=42
|
||||
)
|
||||
|
||||
|
||||
def _list_to_regexp(items, regexp_prefix=""):
|
||||
regexp = ["^%s$" % item for item in items]
|
||||
regexp = regexp_prefix + "|".join(regexp)
|
||||
return regexp
|
||||
|
||||
|
||||
class ValidatedComplexQuery(object):
|
||||
complex_operators = ["and", "or"]
|
||||
order_directions = ["asc", "desc"]
|
||||
simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"]
|
||||
regexp_prefix = "(?i)"
|
||||
|
||||
complex_ops = _list_to_regexp(complex_operators, regexp_prefix)
|
||||
simple_ops = _list_to_regexp(simple_ops, regexp_prefix)
|
||||
order_directions = _list_to_regexp(order_directions, regexp_prefix)
|
||||
|
||||
timestamp_fields = ["timestamp", "state_timestamp"]
|
||||
|
||||
def __init__(self, query, db_model, additional_name_mapping=None,
|
||||
metadata_allowed=False):
|
||||
additional_name_mapping = additional_name_mapping or {}
|
||||
self.name_mapping = {"user": "user_id",
|
||||
"project": "project_id"}
|
||||
self.name_mapping.update(additional_name_mapping)
|
||||
valid_keys = db_model.get_field_names()
|
||||
valid_keys = list(valid_keys) + list(self.name_mapping.keys())
|
||||
valid_fields = _list_to_regexp(valid_keys)
|
||||
|
||||
if metadata_allowed:
|
||||
valid_filter_fields = valid_fields + "|^metadata\.[\S]+$"
|
||||
else:
|
||||
valid_filter_fields = valid_fields
|
||||
|
||||
schema_value = {
|
||||
"oneOf": [{"type": "string"},
|
||||
{"type": "number"},
|
||||
{"type": "boolean"}],
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}
|
||||
|
||||
schema_value_in = {
|
||||
"type": "array",
|
||||
"items": {"oneOf": [{"type": "string"},
|
||||
{"type": "number"}]},
|
||||
"minItems": 1}
|
||||
|
||||
schema_field = {
|
||||
"type": "object",
|
||||
"patternProperties": {valid_filter_fields: schema_value},
|
||||
"additionalProperties": False,
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}
|
||||
|
||||
schema_field_in = {
|
||||
"type": "object",
|
||||
"patternProperties": {valid_filter_fields: schema_value_in},
|
||||
"additionalProperties": False,
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}
|
||||
|
||||
schema_leaf_in = {
|
||||
"type": "object",
|
||||
"patternProperties": {"(?i)^in$": schema_field_in},
|
||||
"additionalProperties": False,
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}
|
||||
|
||||
schema_leaf_simple_ops = {
|
||||
"type": "object",
|
||||
"patternProperties": {self.simple_ops: schema_field},
|
||||
"additionalProperties": False,
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}
|
||||
|
||||
schema_and_or_array = {
|
||||
"type": "array",
|
||||
"items": {"$ref": "#"},
|
||||
"minItems": 2}
|
||||
|
||||
schema_and_or = {
|
||||
"type": "object",
|
||||
"patternProperties": {self.complex_ops: schema_and_or_array},
|
||||
"additionalProperties": False,
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}
|
||||
|
||||
schema_not = {
|
||||
"type": "object",
|
||||
"patternProperties": {"(?i)^not$": {"$ref": "#"}},
|
||||
"additionalProperties": False,
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}
|
||||
|
||||
self.schema = {
|
||||
"oneOf": [{"$ref": "#/definitions/leaf_simple_ops"},
|
||||
{"$ref": "#/definitions/leaf_in"},
|
||||
{"$ref": "#/definitions/and_or"},
|
||||
{"$ref": "#/definitions/not"}],
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1,
|
||||
"definitions": {"leaf_simple_ops": schema_leaf_simple_ops,
|
||||
"leaf_in": schema_leaf_in,
|
||||
"and_or": schema_and_or,
|
||||
"not": schema_not}}
|
||||
|
||||
self.orderby_schema = {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"patternProperties":
|
||||
{valid_fields:
|
||||
{"type": "string",
|
||||
"pattern": self.order_directions}},
|
||||
"additionalProperties": False,
|
||||
"minProperties": 1,
|
||||
"maxProperties": 1}}
|
||||
|
||||
self.original_query = query
|
||||
|
||||
def validate(self, visibility_field):
|
||||
"""Validates the query content and does the necessary conversions."""
|
||||
if self.original_query.filter is wtypes.Unset:
|
||||
self.filter_expr = None
|
||||
else:
|
||||
try:
|
||||
self.filter_expr = json.loads(self.original_query.filter)
|
||||
self._validate_filter(self.filter_expr)
|
||||
except (ValueError, jsonschema.exceptions.ValidationError) as e:
|
||||
raise base.ClientSideError(
|
||||
_("Filter expression not valid: %s") % e)
|
||||
self._replace_isotime_with_datetime(self.filter_expr)
|
||||
self._convert_operator_to_lower_case(self.filter_expr)
|
||||
self._normalize_field_names_for_db_model(self.filter_expr)
|
||||
|
||||
self._force_visibility(visibility_field)
|
||||
|
||||
if self.original_query.orderby is wtypes.Unset:
|
||||
self.orderby = None
|
||||
else:
|
||||
try:
|
||||
self.orderby = json.loads(self.original_query.orderby)
|
||||
self._validate_orderby(self.orderby)
|
||||
except (ValueError, jsonschema.exceptions.ValidationError) as e:
|
||||
raise base.ClientSideError(
|
||||
_("Order-by expression not valid: %s") % e)
|
||||
self._convert_orderby_to_lower_case(self.orderby)
|
||||
self._normalize_field_names_in_orderby(self.orderby)
|
||||
|
||||
self.limit = (None if self.original_query.limit is wtypes.Unset
|
||||
else self.original_query.limit)
|
||||
|
||||
self.limit = v2_utils.enforce_limit(self.limit)
|
||||
|
||||
@staticmethod
|
||||
def _convert_orderby_to_lower_case(orderby):
|
||||
for orderby_field in orderby:
|
||||
utils.lowercase_values(orderby_field)
|
||||
|
||||
def _normalize_field_names_in_orderby(self, orderby):
|
||||
for orderby_field in orderby:
|
||||
self._replace_field_names(orderby_field)
|
||||
|
||||
def _traverse_postorder(self, tree, visitor):
|
||||
op = list(tree.keys())[0]
|
||||
if op.lower() in self.complex_operators:
|
||||
for i, operand in enumerate(tree[op]):
|
||||
self._traverse_postorder(operand, visitor)
|
||||
if op.lower() == "not":
|
||||
self._traverse_postorder(tree[op], visitor)
|
||||
|
||||
visitor(tree)
|
||||
|
||||
def _check_cross_project_references(self, own_project_id,
|
||||
visibility_field):
|
||||
"""Do not allow other than own_project_id."""
|
||||
def check_project_id(subfilter):
|
||||
op, value = list(subfilter.items())[0]
|
||||
if (op.lower() not in self.complex_operators
|
||||
and list(value.keys())[0] == visibility_field
|
||||
and value[visibility_field] != own_project_id):
|
||||
raise base.ProjectNotAuthorized(value[visibility_field])
|
||||
|
||||
self._traverse_postorder(self.filter_expr, check_project_id)
|
||||
|
||||
def _force_visibility(self, visibility_field):
|
||||
"""Force visibility field.
|
||||
|
||||
If the tenant is not admin insert an extra
|
||||
"and <visibility_field>=<tenant's project_id>" clause to the query.
|
||||
"""
|
||||
authorized_project = rbac.get_limited_to_project(pecan.request.headers)
|
||||
is_admin = authorized_project is None
|
||||
if not is_admin:
|
||||
self._restrict_to_project(authorized_project, visibility_field)
|
||||
self._check_cross_project_references(authorized_project,
|
||||
visibility_field)
|
||||
|
||||
def _restrict_to_project(self, project_id, visibility_field):
|
||||
restriction = {"=": {visibility_field: project_id}}
|
||||
if self.filter_expr is None:
|
||||
self.filter_expr = restriction
|
||||
else:
|
||||
self.filter_expr = {"and": [restriction, self.filter_expr]}
|
||||
|
||||
def _replace_isotime_with_datetime(self, filter_expr):
|
||||
def replace_isotime(subfilter):
|
||||
op, value = list(subfilter.items())[0]
|
||||
if op.lower() not in self.complex_operators:
|
||||
field = list(value.keys())[0]
|
||||
if field in self.timestamp_fields:
|
||||
date_time = self._convert_to_datetime(subfilter[op][field])
|
||||
subfilter[op][field] = date_time
|
||||
|
||||
self._traverse_postorder(filter_expr, replace_isotime)
|
||||
|
||||
def _normalize_field_names_for_db_model(self, filter_expr):
|
||||
def _normalize_field_names(subfilter):
|
||||
op, value = list(subfilter.items())[0]
|
||||
if op.lower() not in self.complex_operators:
|
||||
self._replace_field_names(value)
|
||||
self._traverse_postorder(filter_expr,
|
||||
_normalize_field_names)
|
||||
|
||||
def _replace_field_names(self, subfilter):
|
||||
field, value = list(subfilter.items())[0]
|
||||
if field in self.name_mapping:
|
||||
del subfilter[field]
|
||||
subfilter[self.name_mapping[field]] = value
|
||||
if field.startswith("metadata."):
|
||||
del subfilter[field]
|
||||
subfilter["resource_" + field] = value
|
||||
|
||||
def _convert_operator_to_lower_case(self, filter_expr):
|
||||
self._traverse_postorder(filter_expr, utils.lowercase_keys)
|
||||
|
||||
@staticmethod
|
||||
def _convert_to_datetime(isotime):
|
||||
try:
|
||||
date_time = timeutils.parse_isotime(isotime)
|
||||
date_time = date_time.replace(tzinfo=None)
|
||||
return date_time
|
||||
except ValueError:
|
||||
LOG.exception(_("String %s is not a valid isotime") % isotime)
|
||||
msg = _('Failed to parse the timestamp value %s') % isotime
|
||||
raise base.ClientSideError(msg)
|
||||
|
||||
def _validate_filter(self, filter_expr):
|
||||
jsonschema.validate(filter_expr, self.schema)
|
||||
|
||||
def _validate_orderby(self, orderby_expr):
|
||||
jsonschema.validate(orderby_expr, self.orderby_schema)
|
||||
|
||||
|
||||
class QuerySamplesController(rest.RestController):
|
||||
"""Provides complex query possibilities for samples."""
|
||||
|
||||
@wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery)
|
||||
def post(self, body):
|
||||
"""Define query for retrieving Sample data.
|
||||
|
||||
:param body: Query rules for the samples to be returned.
|
||||
"""
|
||||
|
||||
rbac.enforce('query_sample', pecan.request)
|
||||
|
||||
sample_name_mapping = {"resource": "resource_id",
|
||||
"meter": "counter_name",
|
||||
"type": "counter_type",
|
||||
"unit": "counter_unit",
|
||||
"volume": "counter_volume"}
|
||||
|
||||
query = ValidatedComplexQuery(body,
|
||||
storage.models.Sample,
|
||||
sample_name_mapping,
|
||||
metadata_allowed=True)
|
||||
query.validate(visibility_field="project_id")
|
||||
conn = pecan.request.storage_conn
|
||||
return [samples.Sample.from_db_model(s)
|
||||
for s in conn.query_samples(query.filter_expr,
|
||||
query.orderby,
|
||||
query.limit)]
|
||||
|
||||
|
||||
class QueryController(rest.RestController):
|
||||
|
||||
samples = QuerySamplesController()
|
|
@ -1,157 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import urllib
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import six
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from ceilometer.api.controllers.v2 import base
|
||||
from ceilometer.api.controllers.v2 import utils
|
||||
from ceilometer.api import rbac
|
||||
from ceilometer.i18n import _
|
||||
|
||||
|
||||
class Resource(base.Base):
|
||||
"""An externally defined object for which samples have been received."""
|
||||
|
||||
resource_id = wtypes.text
|
||||
"The unique identifier for the resource"
|
||||
|
||||
project_id = wtypes.text
|
||||
"The ID of the owning project or tenant"
|
||||
|
||||
user_id = wtypes.text
|
||||
"The ID of the user who created the resource or updated it last"
|
||||
|
||||
first_sample_timestamp = datetime.datetime
|
||||
"UTC date & time not later than the first sample known for this resource"
|
||||
|
||||
last_sample_timestamp = datetime.datetime
|
||||
"UTC date & time not earlier than the last sample known for this resource"
|
||||
|
||||
metadata = {wtypes.text: wtypes.text}
|
||||
"Arbitrary metadata associated with the resource"
|
||||
|
||||
links = [base.Link]
|
||||
"A list containing a self link and associated meter links"
|
||||
|
||||
source = wtypes.text
|
||||
"The source where the resource come from"
|
||||
|
||||
def __init__(self, metadata=None, **kwds):
|
||||
metadata = metadata or {}
|
||||
metadata = utils.flatten_metadata(metadata)
|
||||
super(Resource, self).__init__(metadata=metadata, **kwds)
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(
|
||||
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
|
||||
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
|
||||
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
|
||||
timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
|
||||
source="openstack",
|
||||
metadata={'name1': 'value1',
|
||||
'name2': 'value2'},
|
||||
links=[
|
||||
base.Link(href=('http://localhost:8777/v2/resources/'
|
||||
'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
|
||||
rel='self'),
|
||||
base.Link(href=('http://localhost:8777/v2/meters/volume?'
|
||||
'q.field=resource_id&q.value='
|
||||
'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
|
||||
rel='volume')
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class ResourcesController(rest.RestController):
|
||||
"""Works on resources."""
|
||||
|
||||
@staticmethod
|
||||
def _make_link(rel_name, url, type, type_arg, query=None):
|
||||
query_str = ''
|
||||
if query:
|
||||
query_str = '?q.field=%s&q.value=%s' % (query['field'],
|
||||
query['value'])
|
||||
return base.Link(href='%s/v2/%s/%s%s' % (url, type,
|
||||
type_arg, query_str),
|
||||
rel=rel_name)
|
||||
|
||||
def _resource_links(self, resource_id, meter_links=1):
|
||||
links = [self._make_link('self', pecan.request.application_url,
|
||||
'resources', resource_id)]
|
||||
if meter_links:
|
||||
for meter in pecan.request.storage_conn.get_meters(
|
||||
resource=resource_id):
|
||||
query = {'field': 'resource_id', 'value': resource_id}
|
||||
links.append(self._make_link(meter.name,
|
||||
pecan.request.application_url,
|
||||
'meters', meter.name,
|
||||
query=query))
|
||||
return links
|
||||
|
||||
@wsme_pecan.wsexpose(Resource, six.text_type)
|
||||
def get_one(self, resource_id):
|
||||
"""Retrieve details about one resource.
|
||||
|
||||
:param resource_id: The UUID of the resource.
|
||||
"""
|
||||
|
||||
rbac.enforce('get_resource', pecan.request)
|
||||
# In case we have special character in resource id, for example, swift
|
||||
# can generate samples with resource id like
|
||||
# 29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance
|
||||
resource_id = urllib.unquote(resource_id)
|
||||
|
||||
authorized_project = rbac.get_limited_to_project(pecan.request.headers)
|
||||
resources = list(pecan.request.storage_conn.get_resources(
|
||||
resource=resource_id, project=authorized_project))
|
||||
if not resources:
|
||||
raise base.EntityNotFound(_('Resource'), resource_id)
|
||||
return Resource.from_db_and_links(resources[0],
|
||||
self._resource_links(resource_id))
|
||||
|
||||
@wsme_pecan.wsexpose([Resource], [base.Query], int, int)
|
||||
def get_all(self, q=None, limit=None, meter_links=1):
|
||||
"""Retrieve definitions of all of the resources.
|
||||
|
||||
:param q: Filter rules for the resources to be returned.
|
||||
:param meter_links: option to include related meter links
|
||||
"""
|
||||
|
||||
rbac.enforce('get_resources', pecan.request)
|
||||
|
||||
q = q or []
|
||||
limit = utils.enforce_limit(limit)
|
||||
kwargs = utils.query_to_kwargs(
|
||||
q, pecan.request.storage_conn.get_resources, ['limit'])
|
||||
resources = [
|
||||
Resource.from_db_and_links(r,
|
||||
self._resource_links(r.resource_id,
|
||||
meter_links))
|
||||
for r in pecan.request.storage_conn.get_resources(limit=limit,
|
||||
**kwargs)]
|
||||
return resources
|
|
@ -18,82 +18,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from keystoneauth1 import exceptions
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import strutils
|
||||
import pecan
|
||||
|
||||
from ceilometer.api.controllers.v2 import capabilities
|
||||
from ceilometer.api.controllers.v2 import events
|
||||
from ceilometer.api.controllers.v2 import meters
|
||||
from ceilometer.api.controllers.v2 import query
|
||||
from ceilometer.api.controllers.v2 import resources
|
||||
from ceilometer.api.controllers.v2 import samples
|
||||
from ceilometer.i18n import _, _LW
|
||||
from ceilometer import keystone_client
|
||||
|
||||
|
||||
API_OPTS = [
|
||||
cfg.BoolOpt('gnocchi_is_enabled',
|
||||
default=None,
|
||||
help=('Set True to disable resource/meter/sample URLs. '
|
||||
'Default autodetection by querying keystone.')),
|
||||
cfg.BoolOpt('aodh_is_enabled',
|
||||
default=None,
|
||||
help=('Set True to redirect alarms URLs to aodh. '
|
||||
'Default autodetection by querying keystone.')),
|
||||
cfg.StrOpt('aodh_url',
|
||||
default=None,
|
||||
help=('The endpoint of Aodh to redirect alarms URLs '
|
||||
'to Aodh API. Default autodetection by querying '
|
||||
'keystone.')),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(API_OPTS, group='api')
|
||||
cfg.CONF.import_opt('meter_dispatchers', 'ceilometer.dispatcher')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def gnocchi_abort():
|
||||
pecan.abort(410, ("This telemetry installation is configured to use "
|
||||
"Gnocchi. Please use the Gnocchi API available on "
|
||||
"the metric endpoint to retrieve data."))
|
||||
|
||||
|
||||
def aodh_abort():
|
||||
pecan.abort(410, _("alarms URLs is unavailable when Aodh is "
|
||||
"disabled or unavailable."))
|
||||
|
||||
|
||||
def aodh_redirect(url):
|
||||
# NOTE(sileht): we use 307 and not 301 or 302 to allow
|
||||
# client to redirect POST/PUT/DELETE/...
|
||||
# FIXME(sileht): it would be better to use 308, but webob
|
||||
# doesn't handle it :(
|
||||
# https://github.com/Pylons/webob/pull/207
|
||||
pecan.redirect(location=url + pecan.request.path_qs,
|
||||
code=307)
|
||||
|
||||
|
||||
class QueryController(object):
|
||||
def __init__(self, gnocchi_is_enabled=False, aodh_url=None):
|
||||
self.gnocchi_is_enabled = gnocchi_is_enabled
|
||||
self.aodh_url = aodh_url
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, kind, *remainder):
|
||||
if kind == 'alarms' and self.aodh_url:
|
||||
aodh_redirect(self.aodh_url)
|
||||
elif kind == 'alarms':
|
||||
aodh_abort()
|
||||
elif kind == 'samples' and self.gnocchi_is_enabled:
|
||||
gnocchi_abort()
|
||||
elif kind == 'samples':
|
||||
return query.QuerySamplesController(), remainder
|
||||
else:
|
||||
pecan.abort(404)
|
||||
|
||||
|
||||
class V2Controller(object):
|
||||
|
@ -102,94 +28,3 @@ class V2Controller(object):
|
|||
event_types = events.EventTypesController()
|
||||
events = events.EventsController()
|
||||
capabilities = capabilities.CapabilitiesController()
|
||||
|
||||
def __init__(self):
|
||||
self._gnocchi_is_enabled = None
|
||||
self._aodh_is_enabled = None
|
||||
self._aodh_url = None
|
||||
|
||||
@property
|
||||
def gnocchi_is_enabled(self):
|
||||
if self._gnocchi_is_enabled is None:
|
||||
if cfg.CONF.api.gnocchi_is_enabled is not None:
|
||||
self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled
|
||||
|
||||
elif ("gnocchi" not in cfg.CONF.meter_dispatchers
|
||||
or "database" in cfg.CONF.meter_dispatchers):
|
||||
self._gnocchi_is_enabled = False
|
||||
else:
|
||||
try:
|
||||
catalog = keystone_client.get_service_catalog(
|
||||
keystone_client.get_client())
|
||||
catalog.url_for(service_type='metric')
|
||||
except exceptions.EndpointNotFound:
|
||||
self._gnocchi_is_enabled = False
|
||||
except exceptions.ClientException:
|
||||
LOG.warning(_LW("Can't connect to keystone, assuming "
|
||||
"gnocchi is disabled and retry later"))
|
||||
else:
|
||||
self._gnocchi_is_enabled = True
|
||||
LOG.warning(_LW("ceilometer-api started with gnocchi "
|
||||
"enabled. The resources/meters/samples "
|
||||
"URLs are disabled."))
|
||||
return self._gnocchi_is_enabled
|
||||
|
||||
@property
|
||||
def aodh_url(self):
|
||||
if self._aodh_url is None:
|
||||
if cfg.CONF.api.aodh_is_enabled is False:
|
||||
self._aodh_url = ""
|
||||
elif cfg.CONF.api.aodh_url is not None:
|
||||
self._aodh_url = self._normalize_aodh_url(
|
||||
cfg.CONF.api.aodh_url)
|
||||
else:
|
||||
try:
|
||||
catalog = keystone_client.get_service_catalog(
|
||||
keystone_client.get_client())
|
||||
self._aodh_url = self._normalize_aodh_url(
|
||||
catalog.url_for(service_type='alarming'))
|
||||
except exceptions.EndpointNotFound:
|
||||
self._aodh_url = ""
|
||||
except exceptions.ClientException:
|
||||
LOG.warning(_LW("Can't connect to keystone, assuming aodh "
|
||||
"is disabled and retry later."))
|
||||
else:
|
||||
LOG.warning(_LW("ceilometer-api started with aodh "
|
||||
"enabled. Alarms URLs will be redirected "
|
||||
"to aodh endpoint."))
|
||||
return self._aodh_url
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, kind, *remainder):
|
||||
if (kind in ['meters', 'resources', 'samples']
|
||||
and self.gnocchi_is_enabled):
|
||||
if kind == 'meters' and pecan.request.method == 'POST':
|
||||
direct = pecan.request.params.get('direct', '')
|
||||
if strutils.bool_from_string(direct):
|
||||
pecan.abort(400, _('direct option cannot be true when '
|
||||
'Gnocchi is enabled.'))
|
||||
return meters.MetersController(), remainder
|
||||
gnocchi_abort()
|
||||
elif kind == 'meters':
|
||||
return meters.MetersController(), remainder
|
||||
elif kind == 'resources':
|
||||
return resources.ResourcesController(), remainder
|
||||
elif kind == 'samples':
|
||||
return samples.SamplesController(), remainder
|
||||
elif kind == 'query':
|
||||
return QueryController(
|
||||
gnocchi_is_enabled=self.gnocchi_is_enabled,
|
||||
aodh_url=self.aodh_url,
|
||||
), remainder
|
||||
elif kind == 'alarms' and (not self.aodh_url):
|
||||
aodh_abort()
|
||||
elif kind == 'alarms' and self.aodh_url:
|
||||
aodh_redirect(self.aodh_url)
|
||||
else:
|
||||
pecan.abort(404)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_aodh_url(url):
|
||||
if url.endswith("/"):
|
||||
return url[:-1]
|
||||
return url
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 IBM Corp.
|
||||
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||
# Copyright Ericsson AB 2013. All rights reserved
|
||||
# Copyright 2014 Hewlett-Packard Company
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from ceilometer.api.controllers.v2 import base
|
||||
from ceilometer.api.controllers.v2 import utils
|
||||
from ceilometer.api import rbac
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer import sample
|
||||
from ceilometer import storage
|
||||
|
||||
|
||||
class Sample(base.Base):
|
||||
"""One measurement."""
|
||||
|
||||
id = wtypes.text
|
||||
"The unique identifier for the sample."
|
||||
|
||||
meter = wtypes.text
|
||||
"The meter name this sample is for."
|
||||
|
||||
type = wtypes.Enum(str, *sample.TYPES)
|
||||
"The meter type (see :ref:`meter_types`)"
|
||||
|
||||
unit = wtypes.text
|
||||
"The unit of measure."
|
||||
|
||||
volume = float
|
||||
"The metered value."
|
||||
|
||||
user_id = wtypes.text
|
||||
"The user this sample was taken for."
|
||||
|
||||
project_id = wtypes.text
|
||||
"The project this sample was taken for."
|
||||
|
||||
resource_id = wtypes.text
|
||||
"The :class:`Resource` this sample was taken for."
|
||||
|
||||
source = wtypes.text
|
||||
"The source that identifies where the sample comes from."
|
||||
|
||||
timestamp = datetime.datetime
|
||||
"When the sample has been generated."
|
||||
|
||||
recorded_at = datetime.datetime
|
||||
"When the sample has been recorded."
|
||||
|
||||
metadata = {wtypes.text: wtypes.text}
|
||||
"Arbitrary metadata associated with the sample."
|
||||
|
||||
@classmethod
|
||||
def from_db_model(cls, m):
|
||||
return cls(id=m.message_id,
|
||||
meter=m.counter_name,
|
||||
type=m.counter_type,
|
||||
unit=m.counter_unit,
|
||||
volume=m.counter_volume,
|
||||
user_id=m.user_id,
|
||||
project_id=m.project_id,
|
||||
resource_id=m.resource_id,
|
||||
source=m.source,
|
||||
timestamp=m.timestamp,
|
||||
recorded_at=m.recorded_at,
|
||||
metadata=utils.flatten_metadata(m.resource_metadata))
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
return cls(id=str(uuid.uuid1()),
|
||||
meter='instance',
|
||||
type='gauge',
|
||||
unit='instance',
|
||||
volume=1,
|
||||
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
|
||||
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
|
||||
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
|
||||
timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
|
||||
recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
|
||||
source='openstack',
|
||||
metadata={'name1': 'value1',
|
||||
'name2': 'value2'},
|
||||
)
|
||||
|
||||
|
||||
class SamplesController(rest.RestController):
|
||||
"""Controller managing the samples."""
|
||||
|
||||
@wsme_pecan.wsexpose([Sample], [base.Query], int)
|
||||
def get_all(self, q=None, limit=None):
|
||||
"""Return all known samples, based on the data recorded so far.
|
||||
|
||||
:param q: Filter rules for the samples to be returned.
|
||||
:param limit: Maximum number of samples to be returned.
|
||||
"""
|
||||
|
||||
rbac.enforce('get_samples', pecan.request)
|
||||
|
||||
q = q or []
|
||||
|
||||
limit = utils.enforce_limit(limit)
|
||||
kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
|
||||
f = storage.SampleFilter(**kwargs)
|
||||
return map(Sample.from_db_model,
|
||||
pecan.request.storage_conn.get_samples(f, limit=limit))
|
||||
|
||||
@wsme_pecan.wsexpose(Sample, wtypes.text)
|
||||
def get_one(self, sample_id):
|
||||
"""Return a sample.
|
||||
|
||||
:param sample_id: the id of the sample.
|
||||
"""
|
||||
|
||||
rbac.enforce('get_sample', pecan.request)
|
||||
|
||||
f = storage.SampleFilter(message_id=sample_id)
|
||||
|
||||
samples = list(pecan.request.storage_conn.get_samples(f))
|
||||
if len(samples) < 1:
|
||||
raise base.EntityNotFound(_('Sample'), sample_id)
|
||||
|
||||
return Sample.from_db_model(samples[0])
|
|
@ -18,22 +18,15 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
import pecan
|
||||
import six
|
||||
import wsme
|
||||
|
||||
from ceilometer.api.controllers.v2 import base
|
||||
from ceilometer.api import rbac
|
||||
from ceilometer.i18n import _, _LI
|
||||
from ceilometer import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
cfg.CONF.import_opt('default_api_return_limit', 'ceilometer.api.app',
|
||||
|
@ -61,265 +54,6 @@ def get_auth_project(on_behalf_of=None):
|
|||
return auth_project
|
||||
|
||||
|
||||
def sanitize_query(query, db_func, on_behalf_of=None):
|
||||
"""Check the query.
|
||||
|
||||
See if:
|
||||
1) the request is coming from admin - then allow full visibility
|
||||
2) non-admin - make sure that the query includes the requester's project.
|
||||
"""
|
||||
q = copy.copy(query)
|
||||
|
||||
auth_project = get_auth_project(on_behalf_of)
|
||||
if auth_project:
|
||||
_verify_query_segregation(q, auth_project)
|
||||
|
||||
proj_q = [i for i in q if i.field == 'project_id']
|
||||
valid_keys = inspect.getargspec(db_func)[0]
|
||||
if not proj_q and 'on_behalf_of' not in valid_keys:
|
||||
# The user is restricted, but they didn't specify a project
|
||||
# so add it for them.
|
||||
q.append(base.Query(field='project_id',
|
||||
op='eq',
|
||||
value=auth_project))
|
||||
return q
|
||||
|
||||
|
||||
def _verify_query_segregation(query, auth_project=None):
|
||||
"""Ensure non-admin queries are not constrained to another project."""
|
||||
auth_project = (auth_project or
|
||||
rbac.get_limited_to_project(pecan.request.headers))
|
||||
|
||||
if not auth_project:
|
||||
return
|
||||
|
||||
for q in query:
|
||||
if q.field in ('project', 'project_id') and auth_project != q.value:
|
||||
raise base.ProjectNotAuthorized(q.value)
|
||||
|
||||
|
||||
def validate_query(query, db_func, internal_keys=None,
|
||||
allow_timestamps=True):
|
||||
"""Validates the syntax of the query and verifies the query.
|
||||
|
||||
Verification check if the query request is authorized for the included
|
||||
project.
|
||||
:param query: Query expression that should be validated
|
||||
:param db_func: the function on the storage level, of which arguments
|
||||
will form the valid_keys list, which defines the valid fields for a
|
||||
query expression
|
||||
:param internal_keys: internally used field names, that should not be
|
||||
used for querying
|
||||
:param allow_timestamps: defines whether the timestamp-based constraint is
|
||||
applicable for this query or not
|
||||
|
||||
:raises InvalidInput: if an operator is not supported for a given field
|
||||
:raises InvalidInput: if timestamp constraints are allowed, but
|
||||
search_offset was included without timestamp constraint
|
||||
:raises: UnknownArgument: if a field name is not a timestamp field, nor
|
||||
in the list of valid keys
|
||||
"""
|
||||
|
||||
internal_keys = internal_keys or []
|
||||
_verify_query_segregation(query)
|
||||
|
||||
valid_keys = inspect.getargspec(db_func)[0]
|
||||
|
||||
internal_timestamp_keys = ['end_timestamp', 'start_timestamp',
|
||||
'end_timestamp_op', 'start_timestamp_op']
|
||||
if 'start_timestamp' in valid_keys:
|
||||
internal_keys += internal_timestamp_keys
|
||||
valid_keys += ['timestamp', 'search_offset']
|
||||
internal_keys.append('self')
|
||||
internal_keys.append('metaquery')
|
||||
valid_keys = set(valid_keys) - set(internal_keys)
|
||||
translation = {'user_id': 'user',
|
||||
'project_id': 'project',
|
||||
'resource_id': 'resource'}
|
||||
|
||||
has_timestamp_query = _validate_timestamp_fields(query,
|
||||
'timestamp',
|
||||
('lt', 'le', 'gt', 'ge'),
|
||||
allow_timestamps)
|
||||
has_search_offset_query = _validate_timestamp_fields(query,
|
||||
'search_offset',
|
||||
'eq',
|
||||
allow_timestamps)
|
||||
|
||||
if has_search_offset_query and not has_timestamp_query:
|
||||
raise wsme.exc.InvalidInput('field', 'search_offset',
|
||||
"search_offset cannot be used without " +
|
||||
"timestamp")
|
||||
|
||||
def _is_field_metadata(field):
|
||||
return (field.startswith('metadata.') or
|
||||
field.startswith('resource_metadata.'))
|
||||
|
||||
for i in query:
|
||||
if i.field not in ('timestamp', 'search_offset'):
|
||||
key = translation.get(i.field, i.field)
|
||||
operator = i.op
|
||||
if key in valid_keys or _is_field_metadata(i.field):
|
||||
if operator == 'eq':
|
||||
if key == 'enabled':
|
||||
i._get_value_as_type('boolean')
|
||||
elif _is_field_metadata(key):
|
||||
i._get_value_as_type()
|
||||
else:
|
||||
raise wsme.exc.InvalidInput('op', i.op,
|
||||
'unimplemented operator for '
|
||||
'%s' % i.field)
|
||||
else:
|
||||
msg = ("unrecognized field in query: %s, "
|
||||
"valid keys: %s") % (query, sorted(valid_keys))
|
||||
raise wsme.exc.UnknownArgument(key, msg)
|
||||
|
||||
|
||||
def _validate_timestamp_fields(query, field_name, operator_list,
|
||||
allow_timestamps):
|
||||
"""Validates the timestamp related constraints in a query if there are any.
|
||||
|
||||
:param query: query expression that may contain the timestamp fields
|
||||
:param field_name: timestamp name, which should be checked (timestamp,
|
||||
search_offset)
|
||||
:param operator_list: list of operators that are supported for that
|
||||
timestamp, which was specified in the parameter field_name
|
||||
:param allow_timestamps: defines whether the timestamp-based constraint is
|
||||
applicable to this query or not
|
||||
|
||||
:returns: True, if there was a timestamp constraint, containing
|
||||
a timestamp field named as defined in field_name, in the query and it
|
||||
was allowed and syntactically correct.
|
||||
:returns: False, if there wasn't timestamp constraint, containing a
|
||||
timestamp field named as defined in field_name, in the query
|
||||
|
||||
:raises InvalidInput: if an operator is unsupported for a given timestamp
|
||||
field
|
||||
:raises UnknownArgument: if the timestamp constraint is not allowed in
|
||||
the query
|
||||
"""
|
||||
|
||||
for item in query:
|
||||
if item.field == field_name:
|
||||
# If *timestamp* or *search_offset* field was specified in the
|
||||
# query, but timestamp is not supported on that resource, on
|
||||
# which the query was invoked, then raise an exception.
|
||||
if not allow_timestamps:
|
||||
raise wsme.exc.UnknownArgument(field_name,
|
||||
"not valid for " +
|
||||
"this resource")
|
||||
if item.op not in operator_list:
|
||||
raise wsme.exc.InvalidInput('op', item.op,
|
||||
'unimplemented operator for %s' %
|
||||
item.field)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def query_to_kwargs(query, db_func, internal_keys=None,
|
||||
allow_timestamps=True):
|
||||
validate_query(query, db_func, internal_keys=internal_keys,
|
||||
allow_timestamps=allow_timestamps)
|
||||
query = sanitize_query(query, db_func)
|
||||
translation = {'user_id': 'user',
|
||||
'project_id': 'project',
|
||||
'resource_id': 'resource'}
|
||||
stamp = {}
|
||||
metaquery = {}
|
||||
kwargs = {}
|
||||
for i in query:
|
||||
if i.field == 'timestamp':
|
||||
if i.op in ('lt', 'le'):
|
||||
stamp['end_timestamp'] = i.value
|
||||
stamp['end_timestamp_op'] = i.op
|
||||
elif i.op in ('gt', 'ge'):
|
||||
stamp['start_timestamp'] = i.value
|
||||
stamp['start_timestamp_op'] = i.op
|
||||
else:
|
||||
if i.op == 'eq':
|
||||
if i.field == 'search_offset':
|
||||
stamp['search_offset'] = i.value
|
||||
elif i.field == 'enabled':
|
||||
kwargs[i.field] = i._get_value_as_type('boolean')
|
||||
elif i.field.startswith('metadata.'):
|
||||
metaquery[i.field] = i._get_value_as_type()
|
||||
elif i.field.startswith('resource_metadata.'):
|
||||
metaquery[i.field[9:]] = i._get_value_as_type()
|
||||
else:
|
||||
key = translation.get(i.field, i.field)
|
||||
kwargs[key] = i.value
|
||||
|
||||
if metaquery and 'metaquery' in inspect.getargspec(db_func)[0]:
|
||||
kwargs['metaquery'] = metaquery
|
||||
if stamp:
|
||||
kwargs.update(_get_query_timestamps(stamp))
|
||||
return kwargs
|
||||
|
||||
|
||||
def _get_query_timestamps(args=None):
|
||||
"""Return any optional timestamp information in the request.
|
||||
|
||||
Determine the desired range, if any, from the GET arguments. Set
|
||||
up the query range using the specified offset.
|
||||
|
||||
[query_start ... start_timestamp ... end_timestamp ... query_end]
|
||||
|
||||
Returns a dictionary containing:
|
||||
|
||||
start_timestamp: First timestamp to use for query
|
||||
start_timestamp_op: First timestamp operator to use for query
|
||||
end_timestamp: Final timestamp to use for query
|
||||
end_timestamp_op: Final timestamp operator to use for query
|
||||
"""
|
||||
|
||||
if args is None:
|
||||
return {}
|
||||
search_offset = int(args.get('search_offset', 0))
|
||||
|
||||
def _parse_timestamp(timestamp):
|
||||
if not timestamp:
|
||||
return None
|
||||
try:
|
||||
iso_timestamp = timeutils.parse_isotime(timestamp)
|
||||
iso_timestamp = iso_timestamp.replace(tzinfo=None)
|
||||
except ValueError:
|
||||
raise wsme.exc.InvalidInput('timestamp', timestamp,
|
||||
'invalid timestamp format')
|
||||
return iso_timestamp
|
||||
|
||||
start_timestamp = _parse_timestamp(args.get('start_timestamp'))
|
||||
end_timestamp = _parse_timestamp(args.get('end_timestamp'))
|
||||
start_timestamp = start_timestamp - datetime.timedelta(
|
||||
minutes=search_offset) if start_timestamp else None
|
||||
end_timestamp = end_timestamp + datetime.timedelta(
|
||||
minutes=search_offset) if end_timestamp else None
|
||||
return {'start_timestamp': start_timestamp,
|
||||
'end_timestamp': end_timestamp,
|
||||
'start_timestamp_op': args.get('start_timestamp_op'),
|
||||
'end_timestamp_op': args.get('end_timestamp_op')}
|
||||
|
||||
|
||||
def flatten_metadata(metadata):
|
||||
"""Return flattened resource metadata.
|
||||
|
||||
Metadata is returned with flattened nested structures (except nested sets)
|
||||
and with all values converted to unicode strings.
|
||||
"""
|
||||
if metadata:
|
||||
# After changing recursive_keypairs` output we need to keep
|
||||
# flattening output unchanged.
|
||||
# Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.')
|
||||
# output before: a.b:c=d
|
||||
# output now: a.b.c=d
|
||||
# So to keep the first variant just replace all dots except the first
|
||||
return dict((k.replace('.', ':').replace(':', '.', 1),
|
||||
six.text_type(v))
|
||||
for k, v in utils.recursive_keypairs(metadata,
|
||||
separator='.')
|
||||
if type(v) is not set)
|
||||
return {}
|
||||
|
||||
|
||||
# TODO(fabiog): this decorator should disappear and have a more unified
|
||||
# way of controlling access and scope. Before messing with this, though
|
||||
# I feel this file should be re-factored in smaller chunks one for each
|
||||
|
|
|
@ -14,20 +14,11 @@
|
|||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import oslo_messaging
|
||||
|
||||
from pecan import hooks
|
||||
|
||||
from ceilometer.i18n import _LE
|
||||
from ceilometer import messaging
|
||||
from ceilometer import storage
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging',
|
||||
group='publisher_notifier')
|
||||
|
||||
|
||||
class ConfigHook(hooks.PecanHook):
|
||||
"""Attach the configuration object to the request.
|
||||
|
@ -43,45 +34,12 @@ class ConfigHook(hooks.PecanHook):
|
|||
class DBHook(hooks.PecanHook):
|
||||
|
||||
def __init__(self):
|
||||
self.storage_connection = DBHook.get_connection('metering')
|
||||
self.event_storage_connection = DBHook.get_connection('event')
|
||||
|
||||
if (not self.storage_connection
|
||||
and not self.event_storage_connection):
|
||||
raise Exception("Api failed to start. Failed to connect to "
|
||||
"databases, purpose: %s" %
|
||||
', '.join(['metering', 'event']))
|
||||
self.event_storage_connection = storage.get_connection_from_config(
|
||||
cfg.CONF)
|
||||
|
||||
def before(self, state):
|
||||
state.request.storage_conn = self.storage_connection
|
||||
state.request.event_storage_conn = self.event_storage_connection
|
||||
|
||||
@staticmethod
|
||||
def get_connection(purpose):
|
||||
try:
|
||||
return storage.get_connection_from_config(cfg.CONF, purpose)
|
||||
except Exception as err:
|
||||
params = {"purpose": purpose, "err": err}
|
||||
LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s "
|
||||
"retry later: %(err)s") % params)
|
||||
|
||||
|
||||
class NotifierHook(hooks.PecanHook):
|
||||
"""Create and attach a notifier to the request.
|
||||
|
||||
Usually, samples will be push to notification bus by notifier when they
|
||||
are posted via /v2/meters/ API.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
transport = messaging.get_transport()
|
||||
self.notifier = oslo_messaging.Notifier(
|
||||
transport, driver=cfg.CONF.publisher_notifier.telemetry_driver,
|
||||
publisher_id="ceilometer.api")
|
||||
|
||||
def before(self, state):
|
||||
state.request.notifier = self.notifier
|
||||
|
||||
|
||||
class TranslationHook(hooks.PecanHook):
|
||||
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_service import service as os_service
|
||||
|
||||
from ceilometer import notification
|
||||
from ceilometer import service
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def main():
|
||||
service.prepare_service()
|
||||
os_service.launch(CONF, notification.NotificationService(),
|
||||
workers=CONF.notification.workers).wait()
|
|
@ -1,29 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_service import service as os_service
|
||||
|
||||
from ceilometer import collector
|
||||
from ceilometer import service
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def main():
|
||||
service.prepare_service()
|
||||
os_service.launch(CONF, collector.CollectorService(),
|
||||
workers=CONF.collector.workers).wait()
|
|
@ -1,84 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014-2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_service import service as os_service
|
||||
|
||||
from ceilometer.agent import manager
|
||||
from ceilometer.i18n import _LW
|
||||
from ceilometer import service
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class MultiChoicesOpt(cfg.Opt):
|
||||
def __init__(self, name, choices=None, **kwargs):
|
||||
super(MultiChoicesOpt, self).__init__(
|
||||
name, type=DeduplicatedCfgList(choices), **kwargs)
|
||||
self.choices = choices
|
||||
|
||||
def _get_argparse_kwargs(self, group, **kwargs):
|
||||
"""Extends the base argparse keyword dict for multi choices options."""
|
||||
kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group)
|
||||
kwargs['nargs'] = '+'
|
||||
choices = kwargs.get('choices', self.choices)
|
||||
if choices:
|
||||
kwargs['choices'] = choices
|
||||
return kwargs
|
||||
|
||||
|
||||
class DeduplicatedCfgList(cfg.types.List):
|
||||
def __init__(self, choices=None, **kwargs):
|
||||
super(DeduplicatedCfgList, self).__init__(**kwargs)
|
||||
self.choices = choices or []
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs)
|
||||
result_set = set(result)
|
||||
if len(result) != len(result_set):
|
||||
LOG.warning(_LW("Duplicated values: %s found in CLI options, "
|
||||
"auto de-duplicated"), result)
|
||||
result = list(result_set)
|
||||
if self.choices and not (result_set <= set(self.choices)):
|
||||
raise Exception('Valid values are %s, but found %s'
|
||||
% (self.choices, result))
|
||||
return result
|
||||
|
||||
|
||||
CLI_OPTS = [
|
||||
MultiChoicesOpt('polling-namespaces',
|
||||
default=['compute', 'central'],
|
||||
choices=['compute', 'central', 'ipmi'],
|
||||
dest='polling_namespaces',
|
||||
help='Polling namespace(s) to be used while '
|
||||
'resource polling'),
|
||||
MultiChoicesOpt('pollster-list',
|
||||
default=[],
|
||||
dest='pollster_list',
|
||||
help='List of pollsters (or wildcard templates) to be '
|
||||
'used while polling'),
|
||||
]
|
||||
|
||||
CONF.register_cli_opts(CLI_OPTS)
|
||||
|
||||
|
||||
def main():
|
||||
service.prepare_service()
|
||||
os_service.launch(CONF, manager.AgentManager(CONF.polling_namespaces,
|
||||
CONF.pollster_list)).wait()
|
|
@ -1,93 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2012-2014 Julien Danjou
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Command line tool for creating meter for Ceilometer.
|
||||
"""
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
from stevedore import extension
|
||||
|
||||
from ceilometer import pipeline
|
||||
from ceilometer import sample
|
||||
from ceilometer import service
|
||||
|
||||
|
||||
def send_sample():
|
||||
cfg.CONF.register_cli_opts([
|
||||
cfg.StrOpt('sample-name',
|
||||
short='n',
|
||||
help='Meter name.',
|
||||
required=True),
|
||||
cfg.StrOpt('sample-type',
|
||||
short='y',
|
||||
help='Meter type (gauge, delta, cumulative).',
|
||||
default='gauge',
|
||||
required=True),
|
||||
cfg.StrOpt('sample-unit',
|
||||
short='U',
|
||||
help='Meter unit.'),
|
||||
cfg.IntOpt('sample-volume',
|
||||
short='l',
|
||||
help='Meter volume value.',
|
||||
default=1),
|
||||
cfg.StrOpt('sample-resource',
|
||||
short='r',
|
||||
help='Meter resource id.',
|
||||
required=True),
|
||||
cfg.StrOpt('sample-user',
|
||||
short='u',
|
||||
help='Meter user id.'),
|
||||
cfg.StrOpt('sample-project',
|
||||
short='p',
|
||||
help='Meter project id.'),
|
||||
cfg.StrOpt('sample-timestamp',
|
||||
short='i',
|
||||
help='Meter timestamp.',
|
||||
default=timeutils.utcnow().isoformat()),
|
||||
cfg.StrOpt('sample-metadata',
|
||||
short='m',
|
||||
help='Meter metadata.'),
|
||||
])
|
||||
|
||||
service.prepare_service()
|
||||
|
||||
# Set up logging to use the console
|
||||
console = logging.StreamHandler(sys.stderr)
|
||||
console.setLevel(logging.DEBUG)
|
||||
formatter = logging.Formatter('%(message)s')
|
||||
console.setFormatter(formatter)
|
||||
root_logger = logging.getLogger('')
|
||||
root_logger.addHandler(console)
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
pipeline_manager = pipeline.setup_pipeline(
|
||||
extension.ExtensionManager('ceilometer.transformer'))
|
||||
|
||||
with pipeline_manager.publisher() as p:
|
||||
p([sample.Sample(
|
||||
name=cfg.CONF.sample_name,
|
||||
type=cfg.CONF.sample_type,
|
||||
unit=cfg.CONF.sample_unit,
|
||||
volume=cfg.CONF.sample_volume,
|
||||
user_id=cfg.CONF.sample_user,
|
||||
project_id=cfg.CONF.sample_project,
|
||||
resource_id=cfg.CONF.sample_resource,
|
||||
timestamp=cfg.CONF.sample_timestamp,
|
||||
resource_metadata=cfg.CONF.sample_metadata and eval(
|
||||
cfg.CONF.sample_metadata))])
|
|
@ -27,25 +27,15 @@ LOG = log.getLogger(__name__)
|
|||
|
||||
def dbsync():
|
||||
service.prepare_service()
|
||||
storage.get_connection_from_config(cfg.CONF, 'metering').upgrade()
|
||||
storage.get_connection_from_config(cfg.CONF, 'event').upgrade()
|
||||
storage.get_connection_from_config(cfg.CONF).upgrade()
|
||||
|
||||
|
||||
def expirer():
|
||||
service.prepare_service()
|
||||
|
||||
if cfg.CONF.database.metering_time_to_live > 0:
|
||||
LOG.debug("Clearing expired metering data")
|
||||
storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering')
|
||||
storage_conn.clear_expired_metering_data(
|
||||
cfg.CONF.database.metering_time_to_live)
|
||||
else:
|
||||
LOG.info(_LI("Nothing to clean, database metering time to live "
|
||||
"is disabled"))
|
||||
|
||||
if cfg.CONF.database.event_time_to_live > 0:
|
||||
LOG.debug("Clearing expired event data")
|
||||
event_conn = storage.get_connection_from_config(cfg.CONF, 'event')
|
||||
event_conn = storage.get_connection_from_config(cfg.CONF)
|
||||
event_conn.clear_expired_event_data(
|
||||
cfg.CONF.database.event_time_to_live)
|
||||
else:
|
||||
|
|
|
@ -1,184 +0,0 @@
|
|||
#
|
||||
# Copyright 2012-2013 eNovance <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from itertools import chain
|
||||
import socket
|
||||
|
||||
import msgpack
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import oslo_messaging
|
||||
from oslo_utils import netutils
|
||||
from oslo_utils import units
|
||||
|
||||
from ceilometer import dispatcher
|
||||
from ceilometer.i18n import _, _LE, _LW
|
||||
from ceilometer import messaging
|
||||
from ceilometer import service_base
|
||||
from ceilometer import utils
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('udp_address',
|
||||
default='0.0.0.0',
|
||||
help='Address to which the UDP socket is bound. Set to '
|
||||
'an empty string to disable.'),
|
||||
cfg.PortOpt('udp_port',
|
||||
default=4952,
|
||||
help='Port to which the UDP socket is bound.'),
|
||||
cfg.IntOpt('batch_size',
|
||||
default=1,
|
||||
help='Number of notification messages to wait before '
|
||||
'dispatching them'),
|
||||
cfg.IntOpt('batch_timeout',
|
||||
default=None,
|
||||
help='Number of seconds to wait before dispatching samples'
|
||||
'when batch_size is not reached (None means indefinitely)'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS, group="collector")
|
||||
cfg.CONF.import_opt('metering_topic', 'ceilometer.publisher.messaging',
|
||||
group='publisher_notifier')
|
||||
cfg.CONF.import_opt('event_topic', 'ceilometer.publisher.messaging',
|
||||
group='publisher_notifier')
|
||||
cfg.CONF.import_opt('store_events', 'ceilometer.notification',
|
||||
group='notification')
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class CollectorService(service_base.ServiceBase):
|
||||
"""Listener for the collector service."""
|
||||
def start(self):
|
||||
"""Bind the UDP socket and handle incoming data."""
|
||||
# ensure dispatcher is configured before starting other services
|
||||
dispatcher_managers = dispatcher.load_dispatcher_manager()
|
||||
(self.meter_manager, self.event_manager) = dispatcher_managers
|
||||
self.sample_listener = None
|
||||
self.event_listener = None
|
||||
self.udp_thread = None
|
||||
super(CollectorService, self).start()
|
||||
|
||||
if cfg.CONF.collector.udp_address:
|
||||
self.udp_thread = utils.spawn_thread(self.start_udp)
|
||||
|
||||
transport = messaging.get_transport(optional=True)
|
||||
if transport:
|
||||
if list(self.meter_manager):
|
||||
sample_target = oslo_messaging.Target(
|
||||
topic=cfg.CONF.publisher_notifier.metering_topic)
|
||||
self.sample_listener = (
|
||||
messaging.get_batch_notification_listener(
|
||||
transport, [sample_target],
|
||||
[SampleEndpoint(self.meter_manager)],
|
||||
allow_requeue=True,
|
||||
batch_size=cfg.CONF.collector.batch_size,
|
||||
batch_timeout=cfg.CONF.collector.batch_timeout))
|
||||
self.sample_listener.start()
|
||||
|
||||
if cfg.CONF.notification.store_events and list(self.event_manager):
|
||||
event_target = oslo_messaging.Target(
|
||||
topic=cfg.CONF.publisher_notifier.event_topic)
|
||||
self.event_listener = (
|
||||
messaging.get_batch_notification_listener(
|
||||
transport, [event_target],
|
||||
[EventEndpoint(
|
||||
EventDispatcherVerificator(self.event_manager))],
|
||||
allow_requeue=True,
|
||||
batch_size=cfg.CONF.collector.batch_size,
|
||||
batch_timeout=cfg.CONF.collector.batch_timeout))
|
||||
self.event_listener.start()
|
||||
|
||||
def start_udp(self):
|
||||
address_family = socket.AF_INET
|
||||
if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address):
|
||||
address_family = socket.AF_INET6
|
||||
udp = socket.socket(address_family, socket.SOCK_DGRAM)
|
||||
udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
udp.bind((cfg.CONF.collector.udp_address,
|
||||
cfg.CONF.collector.udp_port))
|
||||
|
||||
self.udp_run = True
|
||||
while self.udp_run:
|
||||
# NOTE(jd) Arbitrary limit of 64K because that ought to be
|
||||
# enough for anybody.
|
||||
data, source = udp.recvfrom(64 * units.Ki)
|
||||
try:
|
||||
sample = msgpack.loads(data, encoding='utf-8')
|
||||
except Exception:
|
||||
LOG.warning(_("UDP: Cannot decode data sent by %s"), source)
|
||||
else:
|
||||
try:
|
||||
LOG.debug("UDP: Storing %s", sample)
|
||||
self.meter_manager.map_method(
|
||||
'verify_and_record_metering_data', sample)
|
||||
except Exception:
|
||||
LOG.exception(_("UDP: Unable to store meter"))
|
||||
|
||||
def stop(self):
|
||||
if self.sample_listener:
|
||||
utils.kill_listeners([self.sample_listener])
|
||||
if self.event_listener:
|
||||
utils.kill_listeners([self.event_listener])
|
||||
if self.udp_thread:
|
||||
self.udp_run = False
|
||||
self.udp_thread.join()
|
||||
super(CollectorService, self).stop()
|
||||
|
||||
|
||||
class CollectorEndpoint(object):
|
||||
def __init__(self, dispatcher_manager):
|
||||
self.dispatcher_manager = dispatcher_manager
|
||||
|
||||
def sample(self, messages):
|
||||
"""RPC endpoint for notification messages
|
||||
|
||||
When another service sends a notification over the message
|
||||
bus, this method receives it.
|
||||
"""
|
||||
samples = list(chain.from_iterable(m["payload"] for m in messages))
|
||||
try:
|
||||
self.dispatcher_manager.map_method(self.method, samples)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Dispatcher failed to handle the %s, "
|
||||
"requeue it."), self.ep_type)
|
||||
return oslo_messaging.NotificationResult.REQUEUE
|
||||
|
||||
|
||||
class SampleEndpoint(CollectorEndpoint):
|
||||
method = 'verify_and_record_metering_data'
|
||||
ep_type = 'sample'
|
||||
|
||||
|
||||
class EventDispatcherVerificator(object):
|
||||
def __init__(self, dispatcher):
|
||||
self.dispatcher = dispatcher
|
||||
|
||||
def verify_and_record_events(self, events):
|
||||
"""Verify event signature and record them."""
|
||||
goods = []
|
||||
for event in events:
|
||||
if utils.verify_signature(
|
||||
event, self.conf.publisher.telemetry_secret):
|
||||
goods.append(event)
|
||||
else:
|
||||
LOG.warning(_LW(
|
||||
'event signature invalid, discarding event: %s'), event)
|
||||
return self.dispatcher.record_events(goods)
|
||||
|
||||
|
||||
class EventEndpoint(CollectorEndpoint):
|
||||
method = 'verify_and_record_events'
|
||||
ep_type = 'event'
|
|
@ -1,87 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer import nova_client
|
||||
|
||||
OPTS = [
|
||||
cfg.BoolOpt('workload_partitioning',
|
||||
default=False,
|
||||
help='Enable work-load partitioning, allowing multiple '
|
||||
'compute agents to be run simultaneously.'),
|
||||
cfg.IntOpt('resource_update_interval',
|
||||
default=0,
|
||||
min=0,
|
||||
help="New instances will be discovered periodically based"
|
||||
" on this option (in seconds). By default, "
|
||||
"the agent discovers instances according to pipeline "
|
||||
"polling interval. If option is greater than 0, "
|
||||
"the instance list to poll will be updated based "
|
||||
"on this option's interval. Measurements relating "
|
||||
"to the instances will match intervals "
|
||||
"defined in pipeline.")
|
||||
]
|
||||
cfg.CONF.register_opts(OPTS, group='compute')
|
||||
|
||||
|
||||
class InstanceDiscovery(plugin_base.DiscoveryBase):
|
||||
def __init__(self):
|
||||
super(InstanceDiscovery, self).__init__()
|
||||
self.nova_cli = nova_client.Client()
|
||||
self.last_run = None
|
||||
self.instances = {}
|
||||
self.expiration_time = cfg.CONF.compute.resource_update_interval
|
||||
|
||||
def discover(self, manager, param=None):
|
||||
"""Discover resources to monitor."""
|
||||
secs_from_last_update = 0
|
||||
if self.last_run:
|
||||
secs_from_last_update = timeutils.delta_seconds(
|
||||
self.last_run, timeutils.utcnow(True))
|
||||
|
||||
instances = []
|
||||
# NOTE(ityaptin) we update make a nova request only if
|
||||
# it's a first discovery or resources expired
|
||||
if not self.last_run or secs_from_last_update >= self.expiration_time:
|
||||
try:
|
||||
utc_now = timeutils.utcnow(True)
|
||||
since = self.last_run.isoformat() if self.last_run else None
|
||||
instances = self.nova_cli.instance_get_all_by_host(
|
||||
cfg.CONF.host, since)
|
||||
self.last_run = utc_now
|
||||
except Exception:
|
||||
# NOTE(zqfan): instance_get_all_by_host is wrapped and will log
|
||||
# exception when there is any error. It is no need to raise it
|
||||
# again and print one more time.
|
||||
return []
|
||||
|
||||
for instance in instances:
|
||||
if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted',
|
||||
'error']:
|
||||
self.instances.pop(instance.id, None)
|
||||
else:
|
||||
self.instances[instance.id] = instance
|
||||
|
||||
return self.instances.values()
|
||||
|
||||
@property
|
||||
def group_id(self):
|
||||
if cfg.CONF.compute.workload_partitioning:
|
||||
return cfg.CONF.host
|
||||
else:
|
||||
return None
|
|
@ -1,41 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 Intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
import oslo_messaging
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('nova_control_exchange',
|
||||
default='nova',
|
||||
help="Exchange name for Nova notifications."),
|
||||
]
|
||||
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
|
||||
class ComputeNotificationBase(plugin_base.NotificationBase):
|
||||
def get_targets(self, conf):
|
||||
"""Return a sequence of oslo_messaging.Target
|
||||
|
||||
This sequence is defining the exchange and topics to be connected for
|
||||
this plugin.
|
||||
"""
|
||||
return [oslo_messaging.Target(topic=topic,
|
||||
exchange=conf.nova_control_exchange)
|
||||
for topic in self.get_notification_topics(conf)]
|
|
@ -1,89 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Converters for producing compute sample messages from notification events.
|
||||
"""
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer.compute import notifications
|
||||
from ceilometer.compute import util
|
||||
from ceilometer import sample
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class UserMetadataAwareInstanceNotificationBase(
|
||||
notifications.ComputeNotificationBase):
|
||||
"""Consumes notifications containing instance user metadata."""
|
||||
|
||||
def process_notification(self, message):
|
||||
instance_properties = self.get_instance_properties(message)
|
||||
if isinstance(instance_properties.get('metadata'), dict):
|
||||
src_metadata = instance_properties['metadata']
|
||||
del instance_properties['metadata']
|
||||
util.add_reserved_user_metadata(src_metadata, instance_properties)
|
||||
return self.get_sample(message)
|
||||
|
||||
def get_instance_properties(self, message):
|
||||
"""Retrieve instance properties from notification payload."""
|
||||
return message['payload']
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_sample(self, message):
|
||||
"""Derive sample from notification payload."""
|
||||
|
||||
|
||||
class InstanceScheduled(UserMetadataAwareInstanceNotificationBase,
|
||||
plugin_base.NonMetricNotificationBase):
|
||||
event_types = ['scheduler.run_instance.scheduled']
|
||||
|
||||
def get_instance_properties(self, message):
|
||||
"""Retrieve instance properties from notification payload."""
|
||||
return message['payload']['request_spec']['instance_properties']
|
||||
|
||||
def get_sample(self, message):
|
||||
yield sample.Sample.from_notification(
|
||||
name='instance.scheduled',
|
||||
type=sample.TYPE_DELTA,
|
||||
volume=1,
|
||||
unit='instance',
|
||||
user_id=None,
|
||||
project_id=message['payload']['request_spec']
|
||||
['instance_properties']['project_id'],
|
||||
resource_id=message['payload']['instance_id'],
|
||||
message=message)
|
||||
|
||||
|
||||
class ComputeInstanceNotificationBase(
|
||||
UserMetadataAwareInstanceNotificationBase):
|
||||
"""Convert compute.instance.* notifications into Samples."""
|
||||
event_types = ['compute.instance.*']
|
||||
|
||||
|
||||
class Instance(ComputeInstanceNotificationBase,
|
||||
plugin_base.NonMetricNotificationBase):
|
||||
def get_sample(self, message):
|
||||
yield sample.Sample.from_notification(
|
||||
name='instance',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='instance',
|
||||
volume=1,
|
||||
user_id=message['payload']['user_id'],
|
||||
project_id=message['payload']['tenant_id'],
|
||||
resource_id=message['payload']['instance_id'],
|
||||
message=message)
|
|
@ -1,77 +0,0 @@
|
|||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseComputePollster(plugin_base.PollsterBase):
|
||||
|
||||
def setup_environment(self):
|
||||
super(BaseComputePollster, self).setup_environment()
|
||||
# propagate exception from check_sanity
|
||||
self.inspector.check_sanity()
|
||||
|
||||
@property
|
||||
def inspector(self):
|
||||
try:
|
||||
inspector = self._inspector
|
||||
except AttributeError:
|
||||
inspector = virt_inspector.get_hypervisor_inspector()
|
||||
BaseComputePollster._inspector = inspector
|
||||
return inspector
|
||||
|
||||
@property
|
||||
def default_discovery(self):
|
||||
return 'local_instances'
|
||||
|
||||
@staticmethod
|
||||
def _populate_cache_create(_i_cache, _instance, _inspector,
|
||||
_DiskData, _inspector_attr, _stats_attr):
|
||||
"""Settings and return cache."""
|
||||
if _instance.id not in _i_cache:
|
||||
_data = 0
|
||||
_per_device_data = {}
|
||||
disk_rates = getattr(_inspector, _inspector_attr)(_instance)
|
||||
for disk, stats in disk_rates:
|
||||
_data += getattr(stats, _stats_attr)
|
||||
_per_device_data[disk.device] = (
|
||||
getattr(stats, _stats_attr))
|
||||
_per_disk_data = {
|
||||
_stats_attr: _per_device_data
|
||||
}
|
||||
_i_cache[_instance.id] = _DiskData(
|
||||
_data,
|
||||
_per_disk_data
|
||||
)
|
||||
return _i_cache[_instance.id]
|
||||
|
||||
def _record_poll_time(self):
|
||||
"""Method records current time as the poll time.
|
||||
|
||||
:return: time in seconds since the last poll time was recorded
|
||||
"""
|
||||
current_time = timeutils.utcnow()
|
||||
duration = None
|
||||
if hasattr(self, '_last_poll_time'):
|
||||
duration = timeutils.delta_seconds(self._last_poll_time,
|
||||
current_time)
|
||||
self._last_poll_time = current_time
|
||||
return duration
|
|
@ -1,93 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 eNovance <licensing@enovance.com>
|
||||
# Copyright 2012 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import ceilometer
|
||||
from ceilometer.compute import pollsters
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer import sample
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class CPUPollster(pollsters.BaseComputePollster):
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for instance in resources:
|
||||
LOG.debug('checking instance %s', instance.id)
|
||||
try:
|
||||
cpu_info = self.inspector.inspect_cpus(instance)
|
||||
LOG.debug("CPUTIME USAGE: %(instance)s %(time)d",
|
||||
{'instance': instance,
|
||||
'time': cpu_info.time})
|
||||
cpu_num = {'cpu_number': cpu_info.number}
|
||||
yield util.make_sample_from_instance(
|
||||
instance,
|
||||
name='cpu',
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit='ns',
|
||||
volume=cpu_info.time,
|
||||
additional_metadata=cpu_num,
|
||||
)
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except virt_inspector.InstanceShutOffException as e:
|
||||
LOG.debug('Instance %(instance_id)s was shut off while '
|
||||
'getting samples of %(pollster)s: %(exc)s',
|
||||
{'instance_id': instance.id,
|
||||
'pollster': self.__class__.__name__, 'exc': e})
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('Obtaining CPU time is not implemented for %s',
|
||||
self.inspector.__class__.__name__)
|
||||
except Exception as err:
|
||||
LOG.exception(_('could not get CPU time for %(id)s: %(e)s'),
|
||||
{'id': instance.id, 'e': err})
|
||||
|
||||
|
||||
class CPUUtilPollster(pollsters.BaseComputePollster):
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
self._inspection_duration = self._record_poll_time()
|
||||
for instance in resources:
|
||||
LOG.debug('Checking CPU util for instance %s', instance.id)
|
||||
try:
|
||||
cpu_info = self.inspector.inspect_cpu_util(
|
||||
instance, self._inspection_duration)
|
||||
LOG.debug("CPU UTIL: %(instance)s %(util)d",
|
||||
{'instance': instance,
|
||||
'util': cpu_info.util})
|
||||
yield util.make_sample_from_instance(
|
||||
instance,
|
||||
name='cpu_util',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='%',
|
||||
volume=cpu_info.util,
|
||||
)
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('Obtaining CPU Util is not implemented for %s',
|
||||
self.inspector.__class__.__name__)
|
||||
except Exception as err:
|
||||
LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'),
|
||||
{'id': instance.id, 'e': err})
|
|
@ -1,694 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 eNovance <licensing@enovance.com>
|
||||
# Copyright 2012 Red Hat, Inc
|
||||
# Copyright 2014 Cisco Systems, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import abc
|
||||
import collections
|
||||
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
import ceilometer
|
||||
from ceilometer.compute import pollsters
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer import sample
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
DiskIOData = collections.namedtuple(
|
||||
'DiskIOData',
|
||||
'r_bytes r_requests w_bytes w_requests per_disk_requests',
|
||||
)
|
||||
|
||||
DiskRateData = collections.namedtuple('DiskRateData',
|
||||
['read_bytes_rate',
|
||||
'read_requests_rate',
|
||||
'write_bytes_rate',
|
||||
'write_requests_rate',
|
||||
'per_disk_rate'])
|
||||
|
||||
DiskLatencyData = collections.namedtuple('DiskLatencyData',
|
||||
['disk_latency',
|
||||
'per_disk_latency'])
|
||||
|
||||
DiskIOPSData = collections.namedtuple('DiskIOPSData',
|
||||
['iops_count',
|
||||
'per_disk_iops'])
|
||||
|
||||
DiskInfoData = collections.namedtuple('DiskInfoData',
|
||||
['capacity',
|
||||
'allocation',
|
||||
'physical',
|
||||
'per_disk_info'])
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _Base(pollsters.BaseComputePollster):
|
||||
|
||||
DISKIO_USAGE_MESSAGE = ' '.join(["DISKIO USAGE:",
|
||||
"%s %s:",
|
||||
"read-requests=%d",
|
||||
"read-bytes=%d",
|
||||
"write-requests=%d",
|
||||
"write-bytes=%d",
|
||||
"errors=%d",
|
||||
])
|
||||
|
||||
CACHE_KEY_DISK = 'diskio'
|
||||
|
||||
def _populate_cache(self, inspector, cache, instance):
|
||||
i_cache = cache.setdefault(self.CACHE_KEY_DISK, {})
|
||||
if instance.id not in i_cache:
|
||||
r_bytes = 0
|
||||
r_requests = 0
|
||||
w_bytes = 0
|
||||
w_requests = 0
|
||||
per_device_read_bytes = {}
|
||||
per_device_read_requests = {}
|
||||
per_device_write_bytes = {}
|
||||
per_device_write_requests = {}
|
||||
for disk, info in inspector.inspect_disks(instance):
|
||||
LOG.debug(self.DISKIO_USAGE_MESSAGE,
|
||||
instance, disk.device, info.read_requests,
|
||||
info.read_bytes, info.write_requests,
|
||||
info.write_bytes, info.errors)
|
||||
r_bytes += info.read_bytes
|
||||
r_requests += info.read_requests
|
||||
w_bytes += info.write_bytes
|
||||
w_requests += info.write_requests
|
||||
# per disk data
|
||||
per_device_read_bytes[disk.device] = info.read_bytes
|
||||
per_device_read_requests[disk.device] = info.read_requests
|
||||
per_device_write_bytes[disk.device] = info.write_bytes
|
||||
per_device_write_requests[disk.device] = info.write_requests
|
||||
per_device_requests = {
|
||||
'read_bytes': per_device_read_bytes,
|
||||
'read_requests': per_device_read_requests,
|
||||
'write_bytes': per_device_write_bytes,
|
||||
'write_requests': per_device_write_requests,
|
||||
}
|
||||
i_cache[instance.id] = DiskIOData(
|
||||
r_bytes=r_bytes,
|
||||
r_requests=r_requests,
|
||||
w_bytes=w_bytes,
|
||||
w_requests=w_requests,
|
||||
per_disk_requests=per_device_requests,
|
||||
)
|
||||
return i_cache[instance.id]
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_samples(instance, c_data):
|
||||
"""Return one or more Sample."""
|
||||
|
||||
@staticmethod
|
||||
def _get_sample_read_and_write(instance, _name, _unit, c_data,
|
||||
_volume, _metadata):
|
||||
"""Read / write Pollster and return one Sample"""
|
||||
return [util.make_sample_from_instance(
|
||||
instance,
|
||||
name=_name,
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit=_unit,
|
||||
volume=getattr(c_data, _volume),
|
||||
additional_metadata={
|
||||
'device': c_data.per_disk_requests[_metadata].keys()},
|
||||
)]
|
||||
|
||||
@staticmethod
|
||||
def _get_samples_per_device(c_data, _attr, instance, _name, _unit):
|
||||
"""Return one or more Samples for meter 'disk.device.*'"""
|
||||
samples = []
|
||||
for disk, value in six.iteritems(c_data.per_disk_requests[_attr]):
|
||||
samples.append(util.make_sample_from_instance(
|
||||
instance,
|
||||
name=_name,
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit=_unit,
|
||||
volume=value,
|
||||
resource_id="%s-%s" % (instance.id, disk),
|
||||
additional_metadata={'disk_name': disk},
|
||||
))
|
||||
return samples
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for instance in resources:
|
||||
instance_name = util.instance_name(instance)
|
||||
try:
|
||||
c_data = self._populate_cache(
|
||||
self.inspector,
|
||||
cache,
|
||||
instance,
|
||||
)
|
||||
for s in self._get_samples(instance, c_data):
|
||||
yield s
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except virt_inspector.InstanceShutOffException as e:
|
||||
LOG.debug('Instance %(instance_id)s was shut off while '
|
||||
'getting samples of %(pollster)s: %(exc)s',
|
||||
{'instance_id': instance.id,
|
||||
'pollster': self.__class__.__name__, 'exc': e})
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('%(inspector)s does not provide data for '
|
||||
' %(pollster)s',
|
||||
{'inspector': self.inspector.__class__.__name__,
|
||||
'pollster': self.__class__.__name__})
|
||||
except Exception as err:
|
||||
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
|
||||
{'name': instance_name, 'error': err})
|
||||
|
||||
|
||||
class ReadRequestsPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.read.requests', 'request', c_data,
|
||||
'r_requests', 'read_requests')
|
||||
|
||||
|
||||
class PerDeviceReadRequestsPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_samples_per_device(
|
||||
c_data, 'read_requests', instance,
|
||||
'disk.device.read.requests', 'request')
|
||||
|
||||
|
||||
class ReadBytesPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.read.bytes', 'B', c_data,
|
||||
'r_bytes', 'read_bytes')
|
||||
|
||||
|
||||
class PerDeviceReadBytesPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_samples_per_device(
|
||||
c_data, 'read_bytes', instance,
|
||||
'disk.device.read.bytes', 'B')
|
||||
|
||||
|
||||
class WriteRequestsPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.write.requests', 'request',
|
||||
c_data, 'w_requests', 'write_requests')
|
||||
|
||||
|
||||
class PerDeviceWriteRequestsPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_samples_per_device(
|
||||
c_data, 'write_requests', instance,
|
||||
'disk.device.write.requests', 'request')
|
||||
|
||||
|
||||
class WriteBytesPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.write.bytes', 'B',
|
||||
c_data, 'w_bytes', 'write_bytes')
|
||||
|
||||
|
||||
class PerDeviceWriteBytesPollster(_Base):
|
||||
|
||||
def _get_samples(self, instance, c_data):
|
||||
return self._get_samples_per_device(
|
||||
c_data, 'write_bytes', instance,
|
||||
'disk.device.write.bytes', 'B')
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _DiskRatesPollsterBase(pollsters.BaseComputePollster):
|
||||
|
||||
CACHE_KEY_DISK_RATE = 'diskio-rate'
|
||||
|
||||
def _populate_cache(self, inspector, cache, instance):
|
||||
i_cache = cache.setdefault(self.CACHE_KEY_DISK_RATE, {})
|
||||
if instance.id not in i_cache:
|
||||
r_bytes_rate = 0
|
||||
r_requests_rate = 0
|
||||
w_bytes_rate = 0
|
||||
w_requests_rate = 0
|
||||
per_disk_r_bytes_rate = {}
|
||||
per_disk_r_requests_rate = {}
|
||||
per_disk_w_bytes_rate = {}
|
||||
per_disk_w_requests_rate = {}
|
||||
disk_rates = inspector.inspect_disk_rates(
|
||||
instance, self._inspection_duration)
|
||||
for disk, info in disk_rates:
|
||||
r_bytes_rate += info.read_bytes_rate
|
||||
r_requests_rate += info.read_requests_rate
|
||||
w_bytes_rate += info.write_bytes_rate
|
||||
w_requests_rate += info.write_requests_rate
|
||||
|
||||
per_disk_r_bytes_rate[disk.device] = info.read_bytes_rate
|
||||
per_disk_r_requests_rate[disk.device] = info.read_requests_rate
|
||||
per_disk_w_bytes_rate[disk.device] = info.write_bytes_rate
|
||||
per_disk_w_requests_rate[disk.device] = (
|
||||
info.write_requests_rate)
|
||||
per_disk_rate = {
|
||||
'read_bytes_rate': per_disk_r_bytes_rate,
|
||||
'read_requests_rate': per_disk_r_requests_rate,
|
||||
'write_bytes_rate': per_disk_w_bytes_rate,
|
||||
'write_requests_rate': per_disk_w_requests_rate,
|
||||
}
|
||||
i_cache[instance.id] = DiskRateData(
|
||||
r_bytes_rate,
|
||||
r_requests_rate,
|
||||
w_bytes_rate,
|
||||
w_requests_rate,
|
||||
per_disk_rate
|
||||
)
|
||||
return i_cache[instance.id]
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
"""Return one or more Sample."""
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
self._inspection_duration = self._record_poll_time()
|
||||
for instance in resources:
|
||||
try:
|
||||
disk_rates_info = self._populate_cache(
|
||||
self.inspector,
|
||||
cache,
|
||||
instance,
|
||||
)
|
||||
for disk_rate in self._get_samples(instance, disk_rates_info):
|
||||
yield disk_rate
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('%(inspector)s does not provide data for '
|
||||
' %(pollster)s',
|
||||
{'inspector': self.inspector.__class__.__name__,
|
||||
'pollster': self.__class__.__name__})
|
||||
except Exception as err:
|
||||
instance_name = util.instance_name(instance)
|
||||
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
|
||||
{'name': instance_name, 'error': err})
|
||||
|
||||
def _get_samples_per_device(self, disk_rates_info, _attr, instance,
|
||||
_name, _unit):
|
||||
"""Return one or more Samples for meter 'disk.device.*'."""
|
||||
samples = []
|
||||
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
|
||||
_attr]):
|
||||
samples.append(util.make_sample_from_instance(
|
||||
instance,
|
||||
name=_name,
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit=_unit,
|
||||
volume=value,
|
||||
resource_id="%s-%s" % (instance.id, disk),
|
||||
additional_metadata={'disk_name': disk},
|
||||
))
|
||||
return samples
|
||||
|
||||
def _get_sample_read_and_write(self, instance, _name, _unit, _element,
|
||||
_attr1, _attr2):
|
||||
"""Read / write Pollster and return one Sample"""
|
||||
return [util.make_sample_from_instance(
|
||||
instance,
|
||||
name=_name,
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit=_unit,
|
||||
volume=getattr(_element, _attr1),
|
||||
additional_metadata={
|
||||
'device': getattr(_element, _attr2)[_attr1].keys()},
|
||||
)]
|
||||
|
||||
|
||||
class ReadBytesRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.read.bytes.rate', 'B/s', disk_rates_info,
|
||||
'read_bytes_rate', 'per_disk_rate')
|
||||
|
||||
|
||||
class PerDeviceReadBytesRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_samples_per_device(
|
||||
disk_rates_info, 'read_bytes_rate', instance,
|
||||
'disk.device.read.bytes.rate', 'B/s')
|
||||
|
||||
|
||||
class ReadRequestsRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.read.requests.rate', 'requests/s', disk_rates_info,
|
||||
'read_requests_rate', 'per_disk_rate')
|
||||
|
||||
|
||||
class PerDeviceReadRequestsRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_samples_per_device(
|
||||
disk_rates_info, 'read_requests_rate', instance,
|
||||
'disk.device.read.requests.rate', 'requests/s')
|
||||
|
||||
|
||||
class WriteBytesRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.write.bytes.rate', 'B/s', disk_rates_info,
|
||||
'write_bytes_rate', 'per_disk_rate')
|
||||
|
||||
|
||||
class PerDeviceWriteBytesRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_samples_per_device(
|
||||
disk_rates_info, 'write_bytes_rate', instance,
|
||||
'disk.device.write.bytes.rate', 'B/s')
|
||||
|
||||
|
||||
class WriteRequestsRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_sample_read_and_write(
|
||||
instance, 'disk.write.requests.rate', 'requests/s',
|
||||
disk_rates_info, 'write_requests_rate', 'per_disk_rate')
|
||||
|
||||
|
||||
class PerDeviceWriteRequestsRatePollster(_DiskRatesPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
return self._get_samples_per_device(
|
||||
disk_rates_info, 'write_requests_rate', instance,
|
||||
'disk.device.write.requests.rate', 'requests/s')
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _DiskLatencyPollsterBase(pollsters.BaseComputePollster):
|
||||
|
||||
CACHE_KEY_DISK_LATENCY = 'disk-latency'
|
||||
|
||||
def _populate_cache(self, inspector, cache, instance):
|
||||
return self._populate_cache_create(
|
||||
cache.setdefault(self.CACHE_KEY_DISK_LATENCY, {}),
|
||||
instance, inspector, DiskLatencyData,
|
||||
'inspect_disk_latency', 'disk_latency')
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
"""Return one or more Sample."""
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for instance in resources:
|
||||
try:
|
||||
disk_latency_info = self._populate_cache(
|
||||
self.inspector,
|
||||
cache,
|
||||
instance,
|
||||
)
|
||||
for disk_latency in self._get_samples(instance,
|
||||
disk_latency_info):
|
||||
yield disk_latency
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('%(inspector)s does not provide data for '
|
||||
' %(pollster)s',
|
||||
{'inspector': self.inspector.__class__.__name__,
|
||||
'pollster': self.__class__.__name__})
|
||||
except Exception as err:
|
||||
instance_name = util.instance_name(instance)
|
||||
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
|
||||
{'name': instance_name, 'error': err})
|
||||
|
||||
|
||||
class DiskLatencyPollster(_DiskLatencyPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_latency_info):
|
||||
return [util.make_sample_from_instance(
|
||||
instance,
|
||||
name='disk.latency',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='ms',
|
||||
volume=disk_latency_info.disk_latency / 1000
|
||||
)]
|
||||
|
||||
|
||||
class PerDeviceDiskLatencyPollster(_DiskLatencyPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_latency_info):
|
||||
samples = []
|
||||
for disk, value in six.iteritems(disk_latency_info.per_disk_latency[
|
||||
'disk_latency']):
|
||||
samples.append(util.make_sample_from_instance(
|
||||
instance,
|
||||
name='disk.device.latency',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='ms',
|
||||
volume=value / 1000,
|
||||
resource_id="%s-%s" % (instance.id, disk),
|
||||
additional_metadata={'disk_name': disk},
|
||||
))
|
||||
return samples
|
||||
|
||||
|
||||
class _DiskIOPSPollsterBase(pollsters.BaseComputePollster):
|
||||
|
||||
CACHE_KEY_DISK_IOPS = 'disk-iops'
|
||||
|
||||
def _populate_cache(self, inspector, cache, instance):
|
||||
return self._populate_cache_create(
|
||||
cache.setdefault(self.CACHE_KEY_DISK_IOPS, {}),
|
||||
instance, inspector, DiskIOPSData,
|
||||
'inspect_disk_iops', 'iops_count')
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_samples(self, instance, disk_rates_info):
|
||||
"""Return one or more Sample."""
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for instance in resources:
|
||||
try:
|
||||
disk_iops_info = self._populate_cache(
|
||||
self.inspector,
|
||||
cache,
|
||||
instance,
|
||||
)
|
||||
for disk_iops in self._get_samples(instance,
|
||||
disk_iops_info):
|
||||
yield disk_iops
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('%(inspector)s does not provide data for '
|
||||
'%(pollster)s',
|
||||
{'inspector': self.inspector.__class__.__name__,
|
||||
'pollster': self.__class__.__name__})
|
||||
except Exception as err:
|
||||
instance_name = util.instance_name(instance)
|
||||
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
|
||||
{'name': instance_name, 'error': err})
|
||||
|
||||
|
||||
class DiskIOPSPollster(_DiskIOPSPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_iops_info):
|
||||
return [util.make_sample_from_instance(
|
||||
instance,
|
||||
name='disk.iops',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='count/s',
|
||||
volume=disk_iops_info.iops_count
|
||||
)]
|
||||
|
||||
|
||||
class PerDeviceDiskIOPSPollster(_DiskIOPSPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_iops_info):
|
||||
samples = []
|
||||
for disk, value in six.iteritems(disk_iops_info.per_disk_iops[
|
||||
'iops_count']):
|
||||
samples.append(util.make_sample_from_instance(
|
||||
instance,
|
||||
name='disk.device.iops',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='count/s',
|
||||
volume=value,
|
||||
resource_id="%s-%s" % (instance.id, disk),
|
||||
additional_metadata={'disk_name': disk},
|
||||
))
|
||||
return samples
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _DiskInfoPollsterBase(pollsters.BaseComputePollster):
|
||||
|
||||
CACHE_KEY_DISK_INFO = 'diskinfo'
|
||||
|
||||
def _populate_cache(self, inspector, cache, instance):
|
||||
i_cache = cache.setdefault(self.CACHE_KEY_DISK_INFO, {})
|
||||
if instance.id not in i_cache:
|
||||
all_capacity = 0
|
||||
all_allocation = 0
|
||||
all_physical = 0
|
||||
per_disk_capacity = {}
|
||||
per_disk_allocation = {}
|
||||
per_disk_physical = {}
|
||||
disk_info = inspector.inspect_disk_info(
|
||||
instance)
|
||||
for disk, info in disk_info:
|
||||
all_capacity += info.capacity
|
||||
all_allocation += info.allocation
|
||||
all_physical += info.physical
|
||||
|
||||
per_disk_capacity[disk.device] = info.capacity
|
||||
per_disk_allocation[disk.device] = info.allocation
|
||||
per_disk_physical[disk.device] = info.physical
|
||||
per_disk_info = {
|
||||
'capacity': per_disk_capacity,
|
||||
'allocation': per_disk_allocation,
|
||||
'physical': per_disk_physical,
|
||||
}
|
||||
i_cache[instance.id] = DiskInfoData(
|
||||
all_capacity,
|
||||
all_allocation,
|
||||
all_physical,
|
||||
per_disk_info
|
||||
)
|
||||
return i_cache[instance.id]
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_samples(self, instance, disk_info):
|
||||
"""Return one or more Sample."""
|
||||
|
||||
def _get_samples_per_device(self, disk_info, _attr, instance, _name):
|
||||
"""Return one or more Samples for meter 'disk.device.*'."""
|
||||
samples = []
|
||||
for disk, value in six.iteritems(disk_info.per_disk_info[_attr]):
|
||||
samples.append(util.make_sample_from_instance(
|
||||
instance,
|
||||
name=_name,
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='B',
|
||||
volume=value,
|
||||
resource_id="%s-%s" % (instance.id, disk),
|
||||
additional_metadata={'disk_name': disk},
|
||||
))
|
||||
return samples
|
||||
|
||||
def _get_samples_task(self, instance, _name, disk_info, _attr1, _attr2):
|
||||
"""Return one or more Samples for meter 'disk.task.*'."""
|
||||
return [util.make_sample_from_instance(
|
||||
instance,
|
||||
name=_name,
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='B',
|
||||
volume=getattr(disk_info, _attr1),
|
||||
additional_metadata={
|
||||
'device': disk_info.per_disk_info[_attr2].keys()},
|
||||
)]
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for instance in resources:
|
||||
try:
|
||||
disk_size_info = self._populate_cache(
|
||||
self.inspector,
|
||||
cache,
|
||||
instance,
|
||||
)
|
||||
for disk_info in self._get_samples(instance, disk_size_info):
|
||||
yield disk_info
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except virt_inspector.InstanceShutOffException as e:
|
||||
LOG.debug('Instance %(instance_id)s was shut off while '
|
||||
'getting samples of %(pollster)s: %(exc)s',
|
||||
{'instance_id': instance.id,
|
||||
'pollster': self.__class__.__name__, 'exc': e})
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('%(inspector)s does not provide data for '
|
||||
' %(pollster)s',
|
||||
{'inspector': self.inspector.__class__.__name__,
|
||||
'pollster': self.__class__.__name__})
|
||||
except Exception as err:
|
||||
instance_name = util.instance_name(instance)
|
||||
LOG.exception(_('Ignoring instance %(name)s '
|
||||
'(%(instance_id)s) : %(error)s') % (
|
||||
{'name': instance_name,
|
||||
'instance_id': instance.id,
|
||||
'error': err}))
|
||||
|
||||
|
||||
class CapacityPollster(_DiskInfoPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_info):
|
||||
return self._get_samples_task(
|
||||
instance, 'disk.capacity', disk_info,
|
||||
'capacity', 'capacity')
|
||||
|
||||
|
||||
class PerDeviceCapacityPollster(_DiskInfoPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_info):
|
||||
return self._get_samples_per_device(
|
||||
disk_info, 'capacity', instance, 'disk.device.capacity')
|
||||
|
||||
|
||||
class AllocationPollster(_DiskInfoPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_info):
|
||||
return self._get_samples_task(
|
||||
instance, 'disk.allocation', disk_info,
|
||||
'allocation', 'allocation')
|
||||
|
||||
|
||||
class PerDeviceAllocationPollster(_DiskInfoPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_info):
|
||||
return self._get_samples_per_device(
|
||||
disk_info, 'allocation', instance, 'disk.device.allocation')
|
||||
|
||||
|
||||
class PhysicalPollster(_DiskInfoPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_info):
|
||||
return self._get_samples_task(
|
||||
instance, 'disk.usage', disk_info,
|
||||
'physical', 'physical')
|
||||
|
||||
|
||||
class PerDevicePhysicalPollster(_DiskInfoPollsterBase):
|
||||
|
||||
def _get_samples(self, instance, disk_info):
|
||||
return self._get_samples_per_device(
|
||||
disk_info, 'physical', instance, 'disk.device.usage')
|
|
@ -1,33 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 eNovance <licensing@enovance.com>
|
||||
# Copyright 2012 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ceilometer.compute import pollsters
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer import sample
|
||||
|
||||
|
||||
class InstancePollster(pollsters.BaseComputePollster):
|
||||
|
||||
@staticmethod
|
||||
def get_samples(manager, cache, resources):
|
||||
for instance in resources:
|
||||
yield util.make_sample_from_instance(
|
||||
instance,
|
||||
name='instance',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='instance',
|
||||
volume=1,
|
||||
)
|
|
@ -1,110 +0,0 @@
|
|||
# Copyright (c) 2014 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import ceilometer
|
||||
from ceilometer.compute import pollsters
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
from ceilometer.i18n import _, _LE, _LW
|
||||
from ceilometer import sample
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class MemoryUsagePollster(pollsters.BaseComputePollster):
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
self._inspection_duration = self._record_poll_time()
|
||||
for instance in resources:
|
||||
LOG.debug('Checking memory usage for instance %s', instance.id)
|
||||
try:
|
||||
memory_info = self.inspector.inspect_memory_usage(
|
||||
instance, self._inspection_duration)
|
||||
LOG.debug("MEMORY USAGE: %(instance)s %(usage)f",
|
||||
{'instance': instance,
|
||||
'usage': memory_info.usage})
|
||||
yield util.make_sample_from_instance(
|
||||
instance,
|
||||
name='memory.usage',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='MB',
|
||||
volume=memory_info.usage,
|
||||
)
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except virt_inspector.InstanceShutOffException as e:
|
||||
LOG.debug('Instance %(instance_id)s was shut off while '
|
||||
'getting samples of %(pollster)s: %(exc)s',
|
||||
{'instance_id': instance.id,
|
||||
'pollster': self.__class__.__name__, 'exc': e})
|
||||
except virt_inspector.NoDataException as e:
|
||||
LOG.warning(_LW('Cannot inspect data of %(pollster)s for '
|
||||
'%(instance_id)s, non-fatal reason: %(exc)s'),
|
||||
{'pollster': self.__class__.__name__,
|
||||
'instance_id': instance.id, 'exc': e})
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('Obtaining Memory Usage is not implemented for %s',
|
||||
self.inspector.__class__.__name__)
|
||||
except Exception as err:
|
||||
LOG.exception(_('Could not get Memory Usage for '
|
||||
'%(id)s: %(e)s'), {'id': instance.id,
|
||||
'e': err})
|
||||
|
||||
|
||||
class MemoryResidentPollster(pollsters.BaseComputePollster):
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
self._inspection_duration = self._record_poll_time()
|
||||
for instance in resources:
|
||||
LOG.debug('Checking resident memory for instance %s',
|
||||
instance.id)
|
||||
try:
|
||||
memory_info = self.inspector.inspect_memory_resident(
|
||||
instance, self._inspection_duration)
|
||||
LOG.debug("RESIDENT MEMORY: %(instance)s %(resident)f",
|
||||
{'instance': instance,
|
||||
'resident': memory_info.resident})
|
||||
yield util.make_sample_from_instance(
|
||||
instance,
|
||||
name='memory.resident',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='MB',
|
||||
volume=memory_info.resident,
|
||||
)
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except virt_inspector.InstanceShutOffException as e:
|
||||
LOG.debug('Instance %(instance_id)s was shut off while '
|
||||
'getting samples of %(pollster)s: %(exc)s',
|
||||
{'instance_id': instance.id,
|
||||
'pollster': self.__class__.__name__, 'exc': e})
|
||||
except virt_inspector.NoDataException as e:
|
||||
LOG.warning(_LW('Cannot inspect data of %(pollster)s for '
|
||||
'%(instance_id)s, non-fatal reason: %(exc)s'),
|
||||
{'pollster': self.__class__.__name__,
|
||||
'instance_id': instance.id, 'exc': e})
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('Obtaining Resident Memory is not implemented'
|
||||
' for %s', self.inspector.__class__.__name__)
|
||||
except Exception as err:
|
||||
LOG.exception(_LE('Could not get Resident Memory Usage for '
|
||||
'%(id)s: %(e)s'), {'id': instance.id,
|
||||
'e': err})
|
|
@ -1,210 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 eNovance <licensing@enovance.com>
|
||||
# Copyright 2012 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import ceilometer
|
||||
from ceilometer.compute import pollsters
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer import sample
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class _Base(pollsters.BaseComputePollster):
|
||||
|
||||
NET_USAGE_MESSAGE = ' '.join(["NETWORK USAGE:", "%s %s:", "read-bytes=%d",
|
||||
"write-bytes=%d"])
|
||||
|
||||
@staticmethod
|
||||
def make_vnic_sample(instance, name, type, unit, volume, vnic_data):
|
||||
metadata = copy.copy(vnic_data)
|
||||
additional_metadata = dict(zip(metadata._fields, metadata))
|
||||
if vnic_data.fref is not None:
|
||||
rid = vnic_data.fref
|
||||
additional_metadata['vnic_name'] = vnic_data.fref
|
||||
else:
|
||||
instance_name = util.instance_name(instance)
|
||||
rid = "%s-%s-%s" % (instance_name, instance.id, vnic_data.name)
|
||||
additional_metadata['vnic_name'] = vnic_data.name
|
||||
|
||||
return util.make_sample_from_instance(
|
||||
instance=instance,
|
||||
name=name,
|
||||
type=type,
|
||||
unit=unit,
|
||||
volume=volume,
|
||||
resource_id=rid,
|
||||
additional_metadata=additional_metadata
|
||||
)
|
||||
|
||||
CACHE_KEY_VNIC = 'vnics'
|
||||
|
||||
def _get_vnic_info(self, inspector, instance):
|
||||
return inspector.inspect_vnics(instance)
|
||||
|
||||
@staticmethod
|
||||
def _get_rx_info(info):
|
||||
return info.rx_bytes
|
||||
|
||||
@staticmethod
|
||||
def _get_tx_info(info):
|
||||
return info.tx_bytes
|
||||
|
||||
def _get_vnics_for_instance(self, cache, inspector, instance):
|
||||
i_cache = cache.setdefault(self.CACHE_KEY_VNIC, {})
|
||||
if instance.id not in i_cache:
|
||||
i_cache[instance.id] = list(
|
||||
self._get_vnic_info(inspector, instance)
|
||||
)
|
||||
return i_cache[instance.id]
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
self._inspection_duration = self._record_poll_time()
|
||||
for instance in resources:
|
||||
instance_name = util.instance_name(instance)
|
||||
LOG.debug('checking net info for instance %s', instance.id)
|
||||
try:
|
||||
vnics = self._get_vnics_for_instance(
|
||||
cache,
|
||||
self.inspector,
|
||||
instance,
|
||||
)
|
||||
for vnic, info in vnics:
|
||||
LOG.debug(self.NET_USAGE_MESSAGE, instance_name,
|
||||
vnic.name, self._get_rx_info(info),
|
||||
self._get_tx_info(info))
|
||||
yield self._get_sample(instance, vnic, info)
|
||||
except virt_inspector.InstanceNotFoundException as err:
|
||||
# Instance was deleted while getting samples. Ignore it.
|
||||
LOG.debug('Exception while getting samples %s', err)
|
||||
except virt_inspector.InstanceShutOffException as e:
|
||||
LOG.debug('Instance %(instance_id)s was shut off while '
|
||||
'getting samples of %(pollster)s: %(exc)s',
|
||||
{'instance_id': instance.id,
|
||||
'pollster': self.__class__.__name__, 'exc': e})
|
||||
except ceilometer.NotImplementedError:
|
||||
# Selected inspector does not implement this pollster.
|
||||
LOG.debug('%(inspector)s does not provide data for '
|
||||
' %(pollster)s',
|
||||
{'inspector': self.inspector.__class__.__name__,
|
||||
'pollster': self.__class__.__name__})
|
||||
except Exception as err:
|
||||
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
|
||||
{'name': instance_name, 'error': err})
|
||||
|
||||
|
||||
class _RateBase(_Base):
|
||||
|
||||
NET_USAGE_MESSAGE = ' '.join(["NETWORK RATE:", "%s %s:",
|
||||
"read-bytes-rate=%d",
|
||||
"write-bytes-rate=%d"])
|
||||
|
||||
CACHE_KEY_VNIC = 'vnic-rates'
|
||||
|
||||
def _get_vnic_info(self, inspector, instance):
|
||||
return inspector.inspect_vnic_rates(instance,
|
||||
self._inspection_duration)
|
||||
|
||||
@staticmethod
|
||||
def _get_rx_info(info):
|
||||
return info.rx_bytes_rate
|
||||
|
||||
@staticmethod
|
||||
def _get_tx_info(info):
|
||||
return info.tx_bytes_rate
|
||||
|
||||
|
||||
class IncomingBytesPollster(_Base):
|
||||
|
||||
def _get_sample(self, instance, vnic, info):
|
||||
return self.make_vnic_sample(
|
||||
instance,
|
||||
name='network.incoming.bytes',
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit='B',
|
||||
volume=info.rx_bytes,
|
||||
vnic_data=vnic,
|
||||
)
|
||||
|
||||
|
||||
class IncomingPacketsPollster(_Base):
|
||||
|
||||
def _get_sample(self, instance, vnic, info):
|
||||
return self.make_vnic_sample(
|
||||
instance,
|
||||
name='network.incoming.packets',
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit='packet',
|
||||
volume=info.rx_packets,
|
||||
vnic_data=vnic,
|
||||
)
|
||||
|
||||
|
||||
class OutgoingBytesPollster(_Base):
|
||||
|
||||
def _get_sample(self, instance, vnic, info):
|
||||
return self.make_vnic_sample(
|
||||
instance,
|
||||
name='network.outgoing.bytes',
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit='B',
|
||||
volume=info.tx_bytes,
|
||||
vnic_data=vnic,
|
||||
)
|
||||
|
||||
|
||||
class OutgoingPacketsPollster(_Base):
|
||||
|
||||
def _get_sample(self, instance, vnic, info):
|
||||
return self.make_vnic_sample(
|
||||
instance,
|
||||
name='network.outgoing.packets',
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit='packet',
|
||||
volume=info.tx_packets,
|
||||
vnic_data=vnic,
|
||||
)
|
||||
|
||||
|
||||
class IncomingBytesRatePollster(_RateBase):
|
||||
|
||||
def _get_sample(self, instance, vnic, info):
|
||||
return self.make_vnic_sample(
|
||||
instance,
|
||||
name='network.incoming.bytes.rate',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='B/s',
|
||||
volume=info.rx_bytes_rate,
|
||||
vnic_data=vnic,
|
||||
)
|
||||
|
||||
|
||||
class OutgoingBytesRatePollster(_RateBase):
|
||||
|
||||
def _get_sample(self, instance, vnic, info):
|
||||
return self.make_vnic_sample(
|
||||
instance,
|
||||
name='network.outgoing.bytes.rate',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='B/s',
|
||||
volume=info.tx_bytes_rate,
|
||||
vnic_data=vnic,
|
||||
)
|
|
@ -1,96 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 eNovance <licensing@enovance.com>
|
||||
# Copyright 2012 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from ceilometer.compute import util as compute_util
|
||||
from ceilometer import sample
|
||||
|
||||
|
||||
INSTANCE_PROPERTIES = [
|
||||
# Identity properties
|
||||
'reservation_id',
|
||||
# Type properties
|
||||
'architecture',
|
||||
'OS-EXT-AZ:availability_zone',
|
||||
'kernel_id',
|
||||
'os_type',
|
||||
'ramdisk_id',
|
||||
]
|
||||
|
||||
|
||||
def _get_metadata_from_object(instance):
|
||||
"""Return a metadata dictionary for the instance."""
|
||||
instance_type = instance.flavor['name'] if instance.flavor else None
|
||||
metadata = {
|
||||
'display_name': instance.name,
|
||||
'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''),
|
||||
'instance_id': instance.id,
|
||||
'instance_type': instance_type,
|
||||
'host': instance.hostId,
|
||||
'instance_host': getattr(instance, 'OS-EXT-SRV-ATTR:host', u''),
|
||||
'flavor': instance.flavor,
|
||||
'status': instance.status.lower(),
|
||||
'state': getattr(instance, 'OS-EXT-STS:vm_state', u''),
|
||||
}
|
||||
|
||||
# Image properties
|
||||
if instance.image:
|
||||
metadata['image'] = instance.image
|
||||
metadata['image_ref'] = instance.image['id']
|
||||
# Images that come through the conductor API in the nova notifier
|
||||
# plugin will not have links.
|
||||
if instance.image.get('links'):
|
||||
metadata['image_ref_url'] = instance.image['links'][0]['href']
|
||||
else:
|
||||
metadata['image_ref_url'] = None
|
||||
else:
|
||||
metadata['image'] = None
|
||||
metadata['image_ref'] = None
|
||||
metadata['image_ref_url'] = None
|
||||
|
||||
for name in INSTANCE_PROPERTIES:
|
||||
if hasattr(instance, name):
|
||||
metadata[name] = getattr(instance, name)
|
||||
|
||||
metadata['vcpus'] = instance.flavor['vcpus']
|
||||
metadata['memory_mb'] = instance.flavor['ram']
|
||||
metadata['disk_gb'] = instance.flavor['disk']
|
||||
metadata['ephemeral_gb'] = instance.flavor['ephemeral']
|
||||
metadata['root_gb'] = (int(metadata['disk_gb']) -
|
||||
int(metadata['ephemeral_gb']))
|
||||
|
||||
return compute_util.add_reserved_user_metadata(instance.metadata, metadata)
|
||||
|
||||
|
||||
def make_sample_from_instance(instance, name, type, unit, volume,
|
||||
resource_id=None, additional_metadata=None):
|
||||
additional_metadata = additional_metadata or {}
|
||||
resource_metadata = _get_metadata_from_object(instance)
|
||||
resource_metadata.update(additional_metadata)
|
||||
return sample.Sample(
|
||||
name=name,
|
||||
type=type,
|
||||
unit=unit,
|
||||
volume=volume,
|
||||
user_id=instance.user_id,
|
||||
project_id=instance.tenant_id,
|
||||
resource_id=resource_id or instance.id,
|
||||
resource_metadata=resource_metadata,
|
||||
)
|
||||
|
||||
|
||||
def instance_name(instance):
|
||||
"""Shortcut to get instance name."""
|
||||
return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None)
|
|
@ -1,66 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
|
||||
# Below config is for collecting metadata which user defined in nova or else,
|
||||
# and then storing it to Sample for future use according to user's requirement.
|
||||
# Such as using it as OpenTSDB tags for metrics.
|
||||
OPTS = [
|
||||
cfg.ListOpt('reserved_metadata_namespace',
|
||||
default=['metering.'],
|
||||
help='List of metadata prefixes reserved for metering use.'),
|
||||
cfg.IntOpt('reserved_metadata_length',
|
||||
default=256,
|
||||
help='Limit on length of reserved metadata values.'),
|
||||
cfg.ListOpt('reserved_metadata_keys',
|
||||
default=[],
|
||||
help='List of metadata keys reserved for metering use. And '
|
||||
'these keys are additional to the ones included in the '
|
||||
'namespace.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
|
||||
def add_reserved_user_metadata(src_metadata, dest_metadata):
|
||||
limit = cfg.CONF.reserved_metadata_length
|
||||
user_metadata = {}
|
||||
for prefix in cfg.CONF.reserved_metadata_namespace:
|
||||
md = dict(
|
||||
(k[len(prefix):].replace('.', '_'),
|
||||
v[:limit] if isinstance(v, six.string_types) else v)
|
||||
for k, v in src_metadata.items()
|
||||
if (k.startswith(prefix) and
|
||||
k[len(prefix):].replace('.', '_') not in dest_metadata)
|
||||
)
|
||||
user_metadata.update(md)
|
||||
|
||||
for metadata_key in cfg.CONF.reserved_metadata_keys:
|
||||
md = dict(
|
||||
(k.replace('.', '_'),
|
||||
v[:limit] if isinstance(v, six.string_types) else v)
|
||||
for k, v in src_metadata.items()
|
||||
if (k == metadata_key and
|
||||
k.replace('.', '_') not in dest_metadata)
|
||||
)
|
||||
user_metadata.update(md)
|
||||
|
||||
if user_metadata:
|
||||
dest_metadata['user_metadata'] = user_metadata
|
||||
|
||||
return dest_metadata
|
|
@ -1,159 +0,0 @@
|
|||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Implementation of Inspector abstraction for Hyper-V"""
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import sys
|
||||
|
||||
from os_win import exceptions as os_win_exc
|
||||
from os_win import utilsfactory
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
|
||||
|
||||
def convert_exceptions(function, exception_map):
|
||||
expected_exceptions = tuple(exception_map.keys())
|
||||
|
||||
@functools.wraps(function)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return function(*args, **kwargs)
|
||||
except expected_exceptions as ex:
|
||||
# exception might be a subclass of an expected exception.
|
||||
for expected in expected_exceptions:
|
||||
if isinstance(ex, expected):
|
||||
raised_exception = exception_map[expected]
|
||||
break
|
||||
|
||||
exc_info = sys.exc_info()
|
||||
# NOTE(claudiub): Python 3 raises the exception object given as
|
||||
# the second argument in six.reraise.
|
||||
# The original message will be maintained by passing the original
|
||||
# exception.
|
||||
exc = raised_exception(six.text_type(exc_info[1]))
|
||||
six.reraise(raised_exception, exc, exc_info[2])
|
||||
return wrapper
|
||||
|
||||
|
||||
def decorate_all_methods(decorator, *args, **kwargs):
|
||||
def decorate(cls):
|
||||
for attr in cls.__dict__:
|
||||
class_member = getattr(cls, attr)
|
||||
if callable(class_member):
|
||||
setattr(cls, attr, decorator(class_member, *args, **kwargs))
|
||||
return cls
|
||||
|
||||
return decorate
|
||||
|
||||
|
||||
exception_conversion_map = collections.OrderedDict([
|
||||
# NOTE(claudiub): order should be from the most specialized exception type
|
||||
# to the most generic exception type.
|
||||
# (expected_exception, converted_exception)
|
||||
(os_win_exc.NotFound, virt_inspector.InstanceNotFoundException),
|
||||
(os_win_exc.OSWinException, virt_inspector.InspectorException),
|
||||
])
|
||||
|
||||
# NOTE(claudiub): the purpose of the decorator below is to prevent any
|
||||
# os_win exceptions (subclasses of OSWinException) to leak outside of the
|
||||
# HyperVInspector.
|
||||
|
||||
|
||||
@decorate_all_methods(convert_exceptions, exception_conversion_map)
|
||||
class HyperVInspector(virt_inspector.Inspector):
|
||||
|
||||
def __init__(self):
|
||||
super(HyperVInspector, self).__init__()
|
||||
self._utils = utilsfactory.get_metricsutils()
|
||||
self._host_max_cpu_clock = self._compute_host_max_cpu_clock()
|
||||
|
||||
def _compute_host_max_cpu_clock(self):
|
||||
hostutils = utilsfactory.get_hostutils()
|
||||
# host's number of CPUs and CPU clock speed will not change.
|
||||
cpu_info = hostutils.get_cpus_info()
|
||||
host_cpu_count = len(cpu_info)
|
||||
host_cpu_clock = cpu_info[0]['MaxClockSpeed']
|
||||
|
||||
return float(host_cpu_clock * host_cpu_count)
|
||||
|
||||
def inspect_cpus(self, instance):
|
||||
instance_name = util.instance_name(instance)
|
||||
(cpu_clock_used,
|
||||
cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name)
|
||||
|
||||
cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock
|
||||
|
||||
# Nanoseconds
|
||||
cpu_time = (int(uptime * cpu_percent_used) * units.k)
|
||||
|
||||
return virt_inspector.CPUStats(number=cpu_count, time=cpu_time)
|
||||
|
||||
def inspect_memory_usage(self, instance, duration=None):
|
||||
instance_name = util.instance_name(instance)
|
||||
usage = self._utils.get_memory_metrics(instance_name)
|
||||
return virt_inspector.MemoryUsageStats(usage=usage)
|
||||
|
||||
def inspect_vnics(self, instance):
|
||||
instance_name = util.instance_name(instance)
|
||||
for vnic_metrics in self._utils.get_vnic_metrics(instance_name):
|
||||
interface = virt_inspector.Interface(
|
||||
name=vnic_metrics["element_name"],
|
||||
mac=vnic_metrics["address"],
|
||||
fref=None,
|
||||
parameters=None)
|
||||
|
||||
stats = virt_inspector.InterfaceStats(
|
||||
rx_bytes=vnic_metrics['rx_mb'] * units.Mi,
|
||||
rx_packets=0,
|
||||
tx_bytes=vnic_metrics['tx_mb'] * units.Mi,
|
||||
tx_packets=0)
|
||||
|
||||
yield (interface, stats)
|
||||
|
||||
def inspect_disks(self, instance):
|
||||
instance_name = util.instance_name(instance)
|
||||
for disk_metrics in self._utils.get_disk_metrics(instance_name):
|
||||
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
|
||||
stats = virt_inspector.DiskStats(
|
||||
read_requests=0,
|
||||
# Return bytes
|
||||
read_bytes=disk_metrics['read_mb'] * units.Mi,
|
||||
write_requests=0,
|
||||
write_bytes=disk_metrics['write_mb'] * units.Mi,
|
||||
errors=0)
|
||||
|
||||
yield (disk, stats)
|
||||
|
||||
def inspect_disk_latency(self, instance):
|
||||
instance_name = util.instance_name(instance)
|
||||
for disk_metrics in self._utils.get_disk_latency_metrics(
|
||||
instance_name):
|
||||
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
|
||||
stats = virt_inspector.DiskLatencyStats(
|
||||
disk_latency=disk_metrics['disk_latency'])
|
||||
|
||||
yield (disk, stats)
|
||||
|
||||
def inspect_disk_iops(self, instance):
|
||||
instance_name = util.instance_name(instance)
|
||||
for disk_metrics in self._utils.get_disk_iops_count(instance_name):
|
||||
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
|
||||
stats = virt_inspector.DiskIOPSStats(
|
||||
iops_count=disk_metrics['iops_count'])
|
||||
|
||||
yield (disk, stats)
|
|
@ -1,315 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Inspector abstraction for read-only access to hypervisors."""
|
||||
|
||||
import collections
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from stevedore import driver
|
||||
|
||||
import ceilometer
|
||||
from ceilometer.i18n import _
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('hypervisor_inspector',
|
||||
default='libvirt',
|
||||
help='Inspector to use for inspecting the hypervisor layer. '
|
||||
'Known inspectors are libvirt, hyperv, vmware, xenapi '
|
||||
'and powervm.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
# Named tuple representing instances.
|
||||
#
|
||||
# name: the name of the instance
|
||||
# uuid: the UUID associated with the instance
|
||||
#
|
||||
Instance = collections.namedtuple('Instance', ['name', 'UUID'])
|
||||
|
||||
|
||||
# Named tuple representing CPU statistics.
|
||||
#
|
||||
# number: number of CPUs
|
||||
# time: cumulative CPU time
|
||||
#
|
||||
CPUStats = collections.namedtuple('CPUStats', ['number', 'time'])
|
||||
|
||||
# Named tuple representing CPU Utilization statistics.
|
||||
#
|
||||
# util: CPU utilization in percentage
|
||||
#
|
||||
CPUUtilStats = collections.namedtuple('CPUUtilStats', ['util'])
|
||||
|
||||
# Named tuple representing Memory usage statistics.
|
||||
#
|
||||
# usage: Amount of memory used
|
||||
#
|
||||
MemoryUsageStats = collections.namedtuple('MemoryUsageStats', ['usage'])
|
||||
|
||||
|
||||
# Named tuple representing Resident Memory usage statistics.
|
||||
#
|
||||
# resident: Amount of resident memory
|
||||
#
|
||||
MemoryResidentStats = collections.namedtuple('MemoryResidentStats',
|
||||
['resident'])
|
||||
|
||||
|
||||
# Named tuple representing vNICs.
|
||||
#
|
||||
# name: the name of the vNIC
|
||||
# mac: the MAC address
|
||||
# fref: the filter ref
|
||||
# parameters: miscellaneous parameters
|
||||
#
|
||||
Interface = collections.namedtuple('Interface', ['name', 'mac',
|
||||
'fref', 'parameters'])
|
||||
|
||||
|
||||
# Named tuple representing vNIC statistics.
|
||||
#
|
||||
# rx_bytes: number of received bytes
|
||||
# rx_packets: number of received packets
|
||||
# tx_bytes: number of transmitted bytes
|
||||
# tx_packets: number of transmitted packets
|
||||
#
|
||||
InterfaceStats = collections.namedtuple('InterfaceStats',
|
||||
['rx_bytes', 'rx_packets',
|
||||
'tx_bytes', 'tx_packets'])
|
||||
|
||||
|
||||
# Named tuple representing vNIC rate statistics.
|
||||
#
|
||||
# rx_bytes_rate: rate of received bytes
|
||||
# tx_bytes_rate: rate of transmitted bytes
|
||||
#
|
||||
InterfaceRateStats = collections.namedtuple('InterfaceRateStats',
|
||||
['rx_bytes_rate', 'tx_bytes_rate'])
|
||||
|
||||
|
||||
# Named tuple representing disks.
|
||||
#
|
||||
# device: the device name for the disk
|
||||
#
|
||||
Disk = collections.namedtuple('Disk', ['device'])
|
||||
|
||||
|
||||
# Named tuple representing disk statistics.
|
||||
#
|
||||
# read_bytes: number of bytes read
|
||||
# read_requests: number of read operations
|
||||
# write_bytes: number of bytes written
|
||||
# write_requests: number of write operations
|
||||
# errors: number of errors
|
||||
#
|
||||
DiskStats = collections.namedtuple('DiskStats',
|
||||
['read_bytes', 'read_requests',
|
||||
'write_bytes', 'write_requests',
|
||||
'errors'])
|
||||
|
||||
# Named tuple representing disk rate statistics.
|
||||
#
|
||||
# read_bytes_rate: number of bytes read per second
|
||||
# read_requests_rate: number of read operations per second
|
||||
# write_bytes_rate: number of bytes written per second
|
||||
# write_requests_rate: number of write operations per second
|
||||
#
|
||||
DiskRateStats = collections.namedtuple('DiskRateStats',
|
||||
['read_bytes_rate',
|
||||
'read_requests_rate',
|
||||
'write_bytes_rate',
|
||||
'write_requests_rate'])
|
||||
|
||||
# Named tuple representing disk latency statistics.
|
||||
#
|
||||
# disk_latency: average disk latency
|
||||
#
|
||||
DiskLatencyStats = collections.namedtuple('DiskLatencyStats',
|
||||
['disk_latency'])
|
||||
|
||||
# Named tuple representing disk iops statistics.
|
||||
#
|
||||
# iops: number of iops per second
|
||||
#
|
||||
DiskIOPSStats = collections.namedtuple('DiskIOPSStats',
|
||||
['iops_count'])
|
||||
|
||||
|
||||
# Named tuple representing disk Information.
|
||||
#
|
||||
# capacity: capacity of the disk
|
||||
# allocation: allocation of the disk
|
||||
# physical: usage of the disk
|
||||
|
||||
DiskInfo = collections.namedtuple('DiskInfo',
|
||||
['capacity',
|
||||
'allocation',
|
||||
'physical'])
|
||||
|
||||
|
||||
# Exception types
|
||||
#
|
||||
class InspectorException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(InspectorException, self).__init__(message)
|
||||
|
||||
|
||||
class InstanceNotFoundException(InspectorException):
|
||||
pass
|
||||
|
||||
|
||||
class InstanceShutOffException(InspectorException):
|
||||
pass
|
||||
|
||||
|
||||
class NoDataException(InspectorException):
|
||||
pass
|
||||
|
||||
|
||||
class NoSanityException(InspectorException):
|
||||
pass
|
||||
|
||||
|
||||
# Main virt inspector abstraction layering over the hypervisor API.
|
||||
#
|
||||
class Inspector(object):
|
||||
|
||||
def check_sanity(self):
|
||||
"""Check the sanity of hypervisor inspector.
|
||||
|
||||
Each subclass could overwrite it to throw any exception
|
||||
when detecting mis-configured inspector
|
||||
"""
|
||||
pass
|
||||
|
||||
def inspect_cpus(self, instance):
|
||||
"""Inspect the CPU statistics for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:return: the number of CPUs and cumulative CPU time
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_cpu_util(self, instance, duration=None):
|
||||
"""Inspect the CPU Utilization (%) for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:param duration: the last 'n' seconds, over which the value should be
|
||||
inspected
|
||||
:return: the percentage of CPU utilization
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_vnics(self, instance):
|
||||
"""Inspect the vNIC statistics for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:return: for each vNIC, the number of bytes & packets
|
||||
received and transmitted
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_vnic_rates(self, instance, duration=None):
|
||||
"""Inspect the vNIC rate statistics for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:param duration: the last 'n' seconds, over which the value should be
|
||||
inspected
|
||||
:return: for each vNIC, the rate of bytes & packets
|
||||
received and transmitted
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_disks(self, instance):
|
||||
"""Inspect the disk statistics for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:return: for each disk, the number of bytes & operations
|
||||
read and written, and the error count
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_memory_usage(self, instance, duration=None):
|
||||
"""Inspect the memory usage statistics for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:param duration: the last 'n' seconds, over which the value should be
|
||||
inspected
|
||||
:return: the amount of memory used
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_memory_resident(self, instance, duration=None):
|
||||
"""Inspect the resident memory statistics for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:param duration: the last 'n' seconds, over which the value should be
|
||||
inspected
|
||||
:return: the amount of resident memory
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_disk_rates(self, instance, duration=None):
|
||||
"""Inspect the disk statistics as rates for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:param duration: the last 'n' seconds, over which the value should be
|
||||
inspected
|
||||
:return: for each disk, the number of bytes & operations
|
||||
read and written per second, with the error count
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_disk_latency(self, instance):
|
||||
"""Inspect the disk statistics as rates for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:return: for each disk, the average disk latency
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_disk_iops(self, instance):
|
||||
"""Inspect the disk statistics as rates for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:return: for each disk, the number of iops per second
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
def inspect_disk_info(self, instance):
|
||||
"""Inspect the disk information for an instance.
|
||||
|
||||
:param instance: the target instance
|
||||
:return: for each disk , capacity , alloaction and usage
|
||||
"""
|
||||
raise ceilometer.NotImplementedError
|
||||
|
||||
|
||||
def get_hypervisor_inspector():
|
||||
try:
|
||||
namespace = 'ceilometer.compute.virt'
|
||||
mgr = driver.DriverManager(namespace,
|
||||
cfg.CONF.hypervisor_inspector,
|
||||
invoke_on_load=True)
|
||||
return mgr.driver
|
||||
except ImportError as e:
|
||||
LOG.error(_("Unable to load the hypervisor inspector: %s") % e)
|
||||
return Inspector()
|
|
@ -1,228 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 Red Hat, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Implementation of Inspector abstraction for libvirt."""
|
||||
|
||||
from lxml import etree
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
from ceilometer.i18n import _
|
||||
|
||||
libvirt = None
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('libvirt_type',
|
||||
default='kvm',
|
||||
choices=['kvm', 'lxc', 'qemu', 'uml', 'xen'],
|
||||
help='Libvirt domain type.'),
|
||||
cfg.StrOpt('libvirt_uri',
|
||||
default='',
|
||||
help='Override the default libvirt URI '
|
||||
'(which is dependent on libvirt_type).'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(OPTS)
|
||||
|
||||
|
||||
def retry_on_disconnect(function):
|
||||
def decorator(self, *args, **kwargs):
|
||||
try:
|
||||
return function(self, *args, **kwargs)
|
||||
except ImportError:
|
||||
# NOTE(sileht): in case of libvirt failed to be imported
|
||||
raise
|
||||
except libvirt.libvirtError as e:
|
||||
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
|
||||
libvirt.VIR_ERR_INTERNAL_ERROR) and
|
||||
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
|
||||
libvirt.VIR_FROM_RPC)):
|
||||
LOG.debug('Connection to libvirt broken')
|
||||
self.connection = None
|
||||
return function(self, *args, **kwargs)
|
||||
else:
|
||||
raise
|
||||
return decorator
|
||||
|
||||
|
||||
class LibvirtInspector(virt_inspector.Inspector):
|
||||
|
||||
per_type_uris = dict(uml='uml:///system', xen='xen:///', lxc='lxc:///')
|
||||
|
||||
def __init__(self):
|
||||
self.uri = self._get_uri()
|
||||
self.connection = None
|
||||
|
||||
def _get_uri(self):
|
||||
return CONF.libvirt_uri or self.per_type_uris.get(CONF.libvirt_type,
|
||||
'qemu:///system')
|
||||
|
||||
def _get_connection(self):
|
||||
if not self.connection:
|
||||
global libvirt
|
||||
if libvirt is None:
|
||||
libvirt = __import__('libvirt')
|
||||
LOG.debug('Connecting to libvirt: %s', self.uri)
|
||||
self.connection = libvirt.openReadOnly(self.uri)
|
||||
|
||||
return self.connection
|
||||
|
||||
def check_sanity(self):
|
||||
if not self._get_connection():
|
||||
raise virt_inspector.NoSanityException()
|
||||
|
||||
@retry_on_disconnect
|
||||
def _lookup_by_uuid(self, instance):
|
||||
instance_name = util.instance_name(instance)
|
||||
try:
|
||||
return self._get_connection().lookupByUUIDString(instance.id)
|
||||
except Exception as ex:
|
||||
if not libvirt or not isinstance(ex, libvirt.libvirtError):
|
||||
raise virt_inspector.InspectorException(six.text_type(ex))
|
||||
error_code = ex.get_error_code()
|
||||
if (error_code in (libvirt.VIR_ERR_SYSTEM_ERROR,
|
||||
libvirt.VIR_ERR_INTERNAL_ERROR) and
|
||||
ex.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
|
||||
libvirt.VIR_FROM_RPC)):
|
||||
raise
|
||||
msg = _("Error from libvirt while looking up instance "
|
||||
"<name=%(name)s, id=%(id)s>: "
|
||||
"[Error Code %(error_code)s] "
|
||||
"%(ex)s") % {'name': instance_name,
|
||||
'id': instance.id,
|
||||
'error_code': error_code,
|
||||
'ex': ex}
|
||||
raise virt_inspector.InstanceNotFoundException(msg)
|
||||
|
||||
def inspect_cpus(self, instance):
|
||||
domain = self._get_domain_not_shut_off_or_raise(instance)
|
||||
dom_info = domain.info()
|
||||
return virt_inspector.CPUStats(number=dom_info[3], time=dom_info[4])
|
||||
|
||||
def _get_domain_not_shut_off_or_raise(self, instance):
|
||||
instance_name = util.instance_name(instance)
|
||||
domain = self._lookup_by_uuid(instance)
|
||||
|
||||
state = domain.info()[0]
|
||||
if state == libvirt.VIR_DOMAIN_SHUTOFF:
|
||||
msg = _('Failed to inspect data of instance '
|
||||
'<name=%(name)s, id=%(id)s>, '
|
||||
'domain state is SHUTOFF.') % {
|
||||
'name': instance_name, 'id': instance.id}
|
||||
raise virt_inspector.InstanceShutOffException(msg)
|
||||
|
||||
return domain
|
||||
|
||||
def inspect_vnics(self, instance):
|
||||
domain = self._get_domain_not_shut_off_or_raise(instance)
|
||||
|
||||
tree = etree.fromstring(domain.XMLDesc(0))
|
||||
for iface in tree.findall('devices/interface'):
|
||||
target = iface.find('target')
|
||||
if target is not None:
|
||||
name = target.get('dev')
|
||||
else:
|
||||
continue
|
||||
mac = iface.find('mac')
|
||||
if mac is not None:
|
||||
mac_address = mac.get('address')
|
||||
else:
|
||||
continue
|
||||
fref = iface.find('filterref')
|
||||
if fref is not None:
|
||||
fref = fref.get('filter')
|
||||
|
||||
params = dict((p.get('name').lower(), p.get('value'))
|
||||
for p in iface.findall('filterref/parameter'))
|
||||
interface = virt_inspector.Interface(name=name, mac=mac_address,
|
||||
fref=fref, parameters=params)
|
||||
dom_stats = domain.interfaceStats(name)
|
||||
stats = virt_inspector.InterfaceStats(rx_bytes=dom_stats[0],
|
||||
rx_packets=dom_stats[1],
|
||||
tx_bytes=dom_stats[4],
|
||||
tx_packets=dom_stats[5])
|
||||
yield (interface, stats)
|
||||
|
||||
def inspect_disks(self, instance):
|
||||
domain = self._get_domain_not_shut_off_or_raise(instance)
|
||||
|
||||
tree = etree.fromstring(domain.XMLDesc(0))
|
||||
for device in filter(
|
||||
bool,
|
||||
[target.get("dev")
|
||||
for target in tree.findall('devices/disk/target')]):
|
||||
disk = virt_inspector.Disk(device=device)
|
||||
block_stats = domain.blockStats(device)
|
||||
stats = virt_inspector.DiskStats(read_requests=block_stats[0],
|
||||
read_bytes=block_stats[1],
|
||||
write_requests=block_stats[2],
|
||||
write_bytes=block_stats[3],
|
||||
errors=block_stats[4])
|
||||
yield (disk, stats)
|
||||
|
||||
def inspect_memory_usage(self, instance, duration=None):
|
||||
instance_name = util.instance_name(instance)
|
||||
domain = self._get_domain_not_shut_off_or_raise(instance)
|
||||
|
||||
try:
|
||||
memory_stats = domain.memoryStats()
|
||||
if (memory_stats and
|
||||
memory_stats.get('available') and
|
||||
memory_stats.get('unused')):
|
||||
memory_used = (memory_stats.get('available') -
|
||||
memory_stats.get('unused'))
|
||||
# Stat provided from libvirt is in KB, converting it to MB.
|
||||
memory_used = memory_used / units.Ki
|
||||
return virt_inspector.MemoryUsageStats(usage=memory_used)
|
||||
else:
|
||||
msg = _('Failed to inspect memory usage of instance '
|
||||
'<name=%(name)s, id=%(id)s>, '
|
||||
'can not get info from libvirt.') % {
|
||||
'name': instance_name, 'id': instance.id}
|
||||
raise virt_inspector.NoDataException(msg)
|
||||
# memoryStats might launch an exception if the method is not supported
|
||||
# by the underlying hypervisor being used by libvirt.
|
||||
except libvirt.libvirtError as e:
|
||||
msg = _('Failed to inspect memory usage of %(instance_uuid)s, '
|
||||
'can not get info from libvirt: %(error)s') % {
|
||||
'instance_uuid': instance.id, 'error': e}
|
||||
raise virt_inspector.NoDataException(msg)
|
||||
|
||||
def inspect_disk_info(self, instance):
|
||||
domain = self._get_domain_not_shut_off_or_raise(instance)
|
||||
|
||||
tree = etree.fromstring(domain.XMLDesc(0))
|
||||
for device in filter(
|
||||
bool,
|
||||
[target.get("dev")
|
||||
for target in tree.findall('devices/disk/target')]):
|
||||
disk = virt_inspector.Disk(device=device)
|
||||
block_info = domain.blockInfo(device)
|
||||
info = virt_inspector.DiskInfo(capacity=block_info[0],
|
||||
allocation=block_info[1],
|
||||
physical=block_info[2])
|
||||
|
||||
yield (disk, info)
|
||||
|
||||
def inspect_memory_resident(self, instance, duration=None):
|
||||
domain = self._get_domain_not_shut_off_or_raise(instance)
|
||||
memory = domain.memoryStats()['rss'] / units.Ki
|
||||
return virt_inspector.MemoryResidentStats(resident=memory)
|
|
@ -1,199 +0,0 @@
|
|||
# Copyright (c) 2014 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Implementation of Inspector abstraction for VMware vSphere"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
from oslo_vmware import api
|
||||
import six
|
||||
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
from ceilometer.compute.virt.vmware import vsphere_operations
|
||||
from ceilometer.i18n import _
|
||||
|
||||
|
||||
opt_group = cfg.OptGroup(name='vmware',
|
||||
title='Options for VMware')
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('host_ip',
|
||||
default='',
|
||||
help='IP address of the VMware vSphere host.'),
|
||||
cfg.PortOpt('host_port',
|
||||
default=443,
|
||||
help='Port of the VMware vSphere host.'),
|
||||
cfg.StrOpt('host_username',
|
||||
default='',
|
||||
help='Username of VMware vSphere.'),
|
||||
cfg.StrOpt('host_password',
|
||||
default='',
|
||||
help='Password of VMware vSphere.',
|
||||
secret=True),
|
||||
cfg.StrOpt('ca_file',
|
||||
help='CA bundle file to use in verifying the vCenter server '
|
||||
'certificate.'),
|
||||
cfg.BoolOpt('insecure',
|
||||
default=False,
|
||||
help='If true, the vCenter server certificate is not '
|
||||
'verified. If false, then the default CA truststore is '
|
||||
'used for verification. This option is ignored if '
|
||||
'"ca_file" is set.'),
|
||||
cfg.IntOpt('api_retry_count',
|
||||
default=10,
|
||||
help='Number of times a VMware vSphere API may be retried.'),
|
||||
cfg.FloatOpt('task_poll_interval',
|
||||
default=0.5,
|
||||
help='Sleep time in seconds for polling an ongoing async '
|
||||
'task.'),
|
||||
cfg.StrOpt('wsdl_location',
|
||||
help='Optional vim service WSDL location '
|
||||
'e.g http://<server>/vimService.wsdl. '
|
||||
'Optional over-ride to default location for bug '
|
||||
'work-arounds.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_group(opt_group)
|
||||
cfg.CONF.register_opts(OPTS, group=opt_group)
|
||||
|
||||
VC_AVERAGE_MEMORY_CONSUMED_CNTR = 'mem:consumed:average'
|
||||
VC_AVERAGE_CPU_CONSUMED_CNTR = 'cpu:usage:average'
|
||||
VC_NETWORK_RX_COUNTER = 'net:received:average'
|
||||
VC_NETWORK_TX_COUNTER = 'net:transmitted:average'
|
||||
VC_DISK_READ_RATE_CNTR = "disk:read:average"
|
||||
VC_DISK_READ_REQUESTS_RATE_CNTR = "disk:numberReadAveraged:average"
|
||||
VC_DISK_WRITE_RATE_CNTR = "disk:write:average"
|
||||
VC_DISK_WRITE_REQUESTS_RATE_CNTR = "disk:numberWriteAveraged:average"
|
||||
|
||||
|
||||
def get_api_session():
|
||||
api_session = api.VMwareAPISession(
|
||||
cfg.CONF.vmware.host_ip,
|
||||
cfg.CONF.vmware.host_username,
|
||||
cfg.CONF.vmware.host_password,
|
||||
cfg.CONF.vmware.api_retry_count,
|
||||
cfg.CONF.vmware.task_poll_interval,
|
||||
wsdl_loc=cfg.CONF.vmware.wsdl_location,
|
||||
port=cfg.CONF.vmware.host_port,
|
||||
cacert=cfg.CONF.vmware.ca_file,
|
||||
insecure=cfg.CONF.vmware.insecure)
|
||||
return api_session
|
||||
|
||||
|
||||
class VsphereInspector(virt_inspector.Inspector):
|
||||
|
||||
def __init__(self):
|
||||
super(VsphereInspector, self).__init__()
|
||||
self._ops = vsphere_operations.VsphereOperations(
|
||||
get_api_session(), 1000)
|
||||
|
||||
def inspect_cpu_util(self, instance, duration=None):
|
||||
vm_moid = self._ops.get_vm_moid(instance.id)
|
||||
if vm_moid is None:
|
||||
raise virt_inspector.InstanceNotFoundException(
|
||||
_('VM %s not found in VMware vSphere') % instance.id)
|
||||
cpu_util_counter_id = self._ops.get_perf_counter_id(
|
||||
VC_AVERAGE_CPU_CONSUMED_CNTR)
|
||||
cpu_util = self._ops.query_vm_aggregate_stats(
|
||||
vm_moid, cpu_util_counter_id, duration)
|
||||
|
||||
# For this counter vSphere returns values scaled-up by 100, since the
|
||||
# corresponding API can't return decimals, but only longs.
|
||||
# For e.g. if the utilization is 12.34%, the value returned is 1234.
|
||||
# Hence, dividing by 100.
|
||||
cpu_util = cpu_util / 100
|
||||
return virt_inspector.CPUUtilStats(util=cpu_util)
|
||||
|
||||
def inspect_vnic_rates(self, instance, duration=None):
|
||||
vm_moid = self._ops.get_vm_moid(instance.id)
|
||||
if not vm_moid:
|
||||
raise virt_inspector.InstanceNotFoundException(
|
||||
_('VM %s not found in VMware vSphere') % instance.id)
|
||||
|
||||
vnic_stats = {}
|
||||
vnic_ids = set()
|
||||
|
||||
for net_counter in (VC_NETWORK_RX_COUNTER, VC_NETWORK_TX_COUNTER):
|
||||
net_counter_id = self._ops.get_perf_counter_id(net_counter)
|
||||
vnic_id_to_stats_map = self._ops.query_vm_device_stats(
|
||||
vm_moid, net_counter_id, duration)
|
||||
vnic_stats[net_counter] = vnic_id_to_stats_map
|
||||
vnic_ids.update(six.iterkeys(vnic_id_to_stats_map))
|
||||
|
||||
# Stats provided from vSphere are in KB/s, converting it to B/s.
|
||||
for vnic_id in vnic_ids:
|
||||
rx_bytes_rate = (vnic_stats[VC_NETWORK_RX_COUNTER]
|
||||
.get(vnic_id, 0) * units.Ki)
|
||||
tx_bytes_rate = (vnic_stats[VC_NETWORK_TX_COUNTER]
|
||||
.get(vnic_id, 0) * units.Ki)
|
||||
|
||||
stats = virt_inspector.InterfaceRateStats(rx_bytes_rate,
|
||||
tx_bytes_rate)
|
||||
interface = virt_inspector.Interface(
|
||||
name=vnic_id,
|
||||
mac=None,
|
||||
fref=None,
|
||||
parameters=None)
|
||||
yield (interface, stats)
|
||||
|
||||
def inspect_memory_usage(self, instance, duration=None):
|
||||
vm_moid = self._ops.get_vm_moid(instance.id)
|
||||
if vm_moid is None:
|
||||
raise virt_inspector.InstanceNotFoundException(
|
||||
_('VM %s not found in VMware vSphere') % instance.id)
|
||||
mem_counter_id = self._ops.get_perf_counter_id(
|
||||
VC_AVERAGE_MEMORY_CONSUMED_CNTR)
|
||||
memory = self._ops.query_vm_aggregate_stats(
|
||||
vm_moid, mem_counter_id, duration)
|
||||
# Stat provided from vSphere is in KB, converting it to MB.
|
||||
memory = memory / units.Ki
|
||||
return virt_inspector.MemoryUsageStats(usage=memory)
|
||||
|
||||
def inspect_disk_rates(self, instance, duration=None):
|
||||
vm_moid = self._ops.get_vm_moid(instance.id)
|
||||
if not vm_moid:
|
||||
raise virt_inspector.InstanceNotFoundException(
|
||||
_('VM %s not found in VMware vSphere') % instance.id)
|
||||
|
||||
disk_stats = {}
|
||||
disk_ids = set()
|
||||
disk_counters = [
|
||||
VC_DISK_READ_RATE_CNTR,
|
||||
VC_DISK_READ_REQUESTS_RATE_CNTR,
|
||||
VC_DISK_WRITE_RATE_CNTR,
|
||||
VC_DISK_WRITE_REQUESTS_RATE_CNTR
|
||||
]
|
||||
|
||||
for disk_counter in disk_counters:
|
||||
disk_counter_id = self._ops.get_perf_counter_id(disk_counter)
|
||||
disk_id_to_stat_map = self._ops.query_vm_device_stats(
|
||||
vm_moid, disk_counter_id, duration)
|
||||
disk_stats[disk_counter] = disk_id_to_stat_map
|
||||
disk_ids.update(six.iterkeys(disk_id_to_stat_map))
|
||||
|
||||
for disk_id in disk_ids:
|
||||
|
||||
def stat_val(counter_name):
|
||||
return disk_stats[counter_name].get(disk_id, 0)
|
||||
|
||||
disk = virt_inspector.Disk(device=disk_id)
|
||||
# Stats provided from vSphere are in KB/s, converting it to B/s.
|
||||
disk_rate_info = virt_inspector.DiskRateStats(
|
||||
read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki,
|
||||
read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR),
|
||||
write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki,
|
||||
write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR)
|
||||
)
|
||||
yield(disk, disk_rate_info)
|
|
@ -1,230 +0,0 @@
|
|||
# Copyright (c) 2014 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_vmware import vim_util
|
||||
|
||||
|
||||
PERF_MANAGER_TYPE = "PerformanceManager"
|
||||
PERF_COUNTER_PROPERTY = "perfCounter"
|
||||
VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value'
|
||||
|
||||
# ESXi Servers sample performance data every 20 seconds. 20-second interval
|
||||
# data is called instance data or real-time data. To retrieve instance data,
|
||||
# we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId"
|
||||
# property. In that case the "QueryPerf" method operates as a raw data feed
|
||||
# that bypasses the vCenter database and instead retrieves performance data
|
||||
# from an ESXi host.
|
||||
# The following value is time interval for real-time performance stats
|
||||
# in seconds and it is not configurable.
|
||||
VC_REAL_TIME_SAMPLING_INTERVAL = 20
|
||||
|
||||
|
||||
class VsphereOperations(object):
|
||||
"""Class to invoke vSphere APIs calls.
|
||||
|
||||
vSphere APIs calls are required by various pollsters, collecting data from
|
||||
VMware infrastructure.
|
||||
"""
|
||||
def __init__(self, api_session, max_objects):
|
||||
self._api_session = api_session
|
||||
self._max_objects = max_objects
|
||||
# Mapping between "VM's Nova instance Id" -> "VM's MOID"
|
||||
# In case a VM is deployed by Nova, then its name is instance ID.
|
||||
# So this map essentially has VM names as keys.
|
||||
self._vm_moid_lookup_map = {}
|
||||
|
||||
# Mapping from full name -> ID, for VC Performance counters
|
||||
self._perf_counter_id_lookup_map = None
|
||||
|
||||
def _init_vm_moid_lookup_map(self):
|
||||
session = self._api_session
|
||||
result = session.invoke_api(vim_util, "get_objects", session.vim,
|
||||
"VirtualMachine", self._max_objects,
|
||||
[VM_INSTANCE_ID_PROPERTY],
|
||||
False)
|
||||
while result:
|
||||
for vm_object in result.objects:
|
||||
vm_moid = vm_object.obj.value
|
||||
# propSet will be set only if the server provides value
|
||||
if hasattr(vm_object, 'propSet') and vm_object.propSet:
|
||||
vm_instance_id = vm_object.propSet[0].val
|
||||
if vm_instance_id:
|
||||
self._vm_moid_lookup_map[vm_instance_id] = vm_moid
|
||||
|
||||
result = session.invoke_api(vim_util, "continue_retrieval",
|
||||
session.vim, result)
|
||||
|
||||
def get_vm_moid(self, vm_instance_id):
|
||||
"""Method returns VC MOID of the VM by its NOVA instance ID."""
|
||||
if vm_instance_id not in self._vm_moid_lookup_map:
|
||||
self._init_vm_moid_lookup_map()
|
||||
|
||||
return self._vm_moid_lookup_map.get(vm_instance_id, None)
|
||||
|
||||
def _init_perf_counter_id_lookup_map(self):
|
||||
|
||||
# Query details of all the performance counters from VC
|
||||
session = self._api_session
|
||||
client_factory = session.vim.client.factory
|
||||
perf_manager = session.vim.service_content.perfManager
|
||||
|
||||
prop_spec = vim_util.build_property_spec(
|
||||
client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY])
|
||||
|
||||
obj_spec = vim_util.build_object_spec(
|
||||
client_factory, perf_manager, None)
|
||||
|
||||
filter_spec = vim_util.build_property_filter_spec(
|
||||
client_factory, [prop_spec], [obj_spec])
|
||||
|
||||
options = client_factory.create('ns0:RetrieveOptions')
|
||||
options.maxObjects = 1
|
||||
|
||||
prop_collector = session.vim.service_content.propertyCollector
|
||||
result = session.invoke_api(session.vim, "RetrievePropertiesEx",
|
||||
prop_collector, specSet=[filter_spec],
|
||||
options=options)
|
||||
|
||||
perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo
|
||||
|
||||
# Extract the counter Id for each counter and populate the map
|
||||
self._perf_counter_id_lookup_map = {}
|
||||
for perf_counter_info in perf_counter_infos:
|
||||
|
||||
counter_group = perf_counter_info.groupInfo.key
|
||||
counter_name = perf_counter_info.nameInfo.key
|
||||
counter_rollup_type = perf_counter_info.rollupType
|
||||
counter_id = perf_counter_info.key
|
||||
|
||||
counter_full_name = (counter_group + ":" + counter_name + ":" +
|
||||
counter_rollup_type)
|
||||
self._perf_counter_id_lookup_map[counter_full_name] = counter_id
|
||||
|
||||
def get_perf_counter_id(self, counter_full_name):
|
||||
"""Method returns the ID of VC performance counter by its full name.
|
||||
|
||||
A VC performance counter is uniquely identified by the
|
||||
tuple {'Group Name', 'Counter Name', 'Rollup Type'}.
|
||||
It will have an id - counter ID (changes from one VC to another),
|
||||
which is required to query performance stats from that VC.
|
||||
This method returns the ID for a counter,
|
||||
assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'.
|
||||
"""
|
||||
if not self._perf_counter_id_lookup_map:
|
||||
self._init_perf_counter_id_lookup_map()
|
||||
return self._perf_counter_id_lookup_map[counter_full_name]
|
||||
|
||||
# TODO(akhils@vmware.com) Move this method to common library
|
||||
# when it gets checked-in
|
||||
def query_vm_property(self, vm_moid, property_name):
|
||||
"""Method returns the value of specified property for a VM.
|
||||
|
||||
:param vm_moid: moid of the VM whose property is to be queried
|
||||
:param property_name: path of the property
|
||||
"""
|
||||
vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine")
|
||||
session = self._api_session
|
||||
return session.invoke_api(vim_util, "get_object_property",
|
||||
session.vim, vm_mobj, property_name)
|
||||
|
||||
def query_vm_aggregate_stats(self, vm_moid, counter_id, duration):
|
||||
"""Method queries the aggregated real-time stat value for a VM.
|
||||
|
||||
This method should be used for aggregate counters.
|
||||
|
||||
:param vm_moid: moid of the VM
|
||||
:param counter_id: id of the perf counter in VC
|
||||
:param duration: in seconds from current time,
|
||||
over which the stat value was applicable
|
||||
:return: the aggregated stats value for the counter
|
||||
"""
|
||||
# For aggregate counters, device_name should be ""
|
||||
stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration)
|
||||
|
||||
# Performance manager provides the aggregated stats value
|
||||
# with device name -> None
|
||||
return stats.get(None, 0)
|
||||
|
||||
def query_vm_device_stats(self, vm_moid, counter_id, duration):
|
||||
"""Method queries the real-time stat values for a VM, for all devices.
|
||||
|
||||
This method should be used for device(non-aggregate) counters.
|
||||
|
||||
:param vm_moid: moid of the VM
|
||||
:param counter_id: id of the perf counter in VC
|
||||
:param duration: in seconds from current time,
|
||||
over which the stat value was applicable
|
||||
:return: a map containing the stat values keyed by the device ID/name
|
||||
"""
|
||||
# For device counters, device_name should be "*" to get stat values
|
||||
# for all devices.
|
||||
stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration)
|
||||
|
||||
# For some device counters, in addition to the per device value
|
||||
# the Performance manager also returns the aggregated value.
|
||||
# Just to be consistent, deleting the aggregated value if present.
|
||||
stats.pop(None, None)
|
||||
return stats
|
||||
|
||||
def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration):
|
||||
"""Method queries the real-time stat values for a VM.
|
||||
|
||||
:param vm_moid: moid of the VM for which stats are needed
|
||||
:param counter_id: id of the perf counter in VC
|
||||
:param device_name: name of the device for which stats are to be
|
||||
queried. For aggregate counters pass empty string ("").
|
||||
For device counters pass "*", if stats are required over all
|
||||
devices.
|
||||
:param duration: in seconds from current time,
|
||||
over which the stat value was applicable
|
||||
:return: a map containing the stat values keyed by the device ID/name
|
||||
"""
|
||||
|
||||
session = self._api_session
|
||||
client_factory = session.vim.client.factory
|
||||
|
||||
# Construct the QuerySpec
|
||||
metric_id = client_factory.create('ns0:PerfMetricId')
|
||||
metric_id.counterId = counter_id
|
||||
metric_id.instance = device_name
|
||||
|
||||
query_spec = client_factory.create('ns0:PerfQuerySpec')
|
||||
query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine")
|
||||
query_spec.metricId = [metric_id]
|
||||
query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL
|
||||
# We query all samples which are applicable over the specified duration
|
||||
samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL)
|
||||
if duration and
|
||||
duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1)
|
||||
query_spec.maxSample = samples_cnt
|
||||
|
||||
perf_manager = session.vim.service_content.perfManager
|
||||
perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager,
|
||||
querySpec=[query_spec])
|
||||
|
||||
stat_values = {}
|
||||
if perf_stats:
|
||||
entity_metric = perf_stats[0]
|
||||
sample_infos = entity_metric.sampleInfo
|
||||
|
||||
if len(sample_infos) > 0:
|
||||
for metric_series in entity_metric.value:
|
||||
# Take the average of all samples to improve the accuracy
|
||||
# of the stat value
|
||||
stat_value = float(sum(metric_series.value)) / samples_cnt
|
||||
device_id = metric_series.id.instance
|
||||
stat_values[device_id] = stat_value
|
||||
|
||||
return stat_values
|
|
@ -1,192 +0,0 @@
|
|||
# Copyright 2014 Intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Implementation of Inspector abstraction for XenAPI."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
import six.moves.urllib.parse as urlparse
|
||||
try:
|
||||
import XenAPI as api
|
||||
except ImportError:
|
||||
api = None
|
||||
|
||||
from ceilometer.compute.pollsters import util
|
||||
from ceilometer.compute.virt import inspector as virt_inspector
|
||||
from ceilometer.i18n import _
|
||||
|
||||
opt_group = cfg.OptGroup(name='xenapi',
|
||||
title='Options for XenAPI')
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('connection_url',
|
||||
help='URL for connection to XenServer/Xen Cloud Platform.'),
|
||||
cfg.StrOpt('connection_username',
|
||||
default='root',
|
||||
help='Username for connection to XenServer/Xen Cloud '
|
||||
'Platform.'),
|
||||
cfg.StrOpt('connection_password',
|
||||
help='Password for connection to XenServer/Xen Cloud Platform.',
|
||||
secret=True),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opts(OPTS, group=opt_group)
|
||||
|
||||
|
||||
class XenapiException(virt_inspector.InspectorException):
|
||||
pass
|
||||
|
||||
|
||||
def swap_xapi_host(url, host_addr):
|
||||
"""Replace the XenServer address present in 'url' with 'host_addr'."""
|
||||
temp_url = urlparse.urlparse(url)
|
||||
# The connection URL is served by XAPI and doesn't support having a
|
||||
# path for the connection url after the port. And username/password
|
||||
# will be pass separately. So the URL like "http://abc:abc@abc:433/abc"
|
||||
# should not appear for XAPI case.
|
||||
temp_netloc = temp_url.netloc.replace(temp_url.hostname, '%s' % host_addr)
|
||||
replaced = temp_url._replace(netloc=temp_netloc)
|
||||
return urlparse.urlunparse(replaced)
|
||||
|
||||
|
||||
def get_api_session():
|
||||
if not api:
|
||||
raise ImportError(_('XenAPI not installed'))
|
||||
|
||||
url = CONF.xenapi.connection_url
|
||||
username = CONF.xenapi.connection_username
|
||||
password = CONF.xenapi.connection_password
|
||||
if not url or password is None:
|
||||
raise XenapiException(_('Must specify connection_url, and '
|
||||
'connection_password to use'))
|
||||
|
||||
try:
|
||||
session = (api.xapi_local() if url == 'unix://local'
|
||||
else api.Session(url))
|
||||
session.login_with_password(username, password)
|
||||
except api.Failure as e:
|
||||
if e.details[0] == 'HOST_IS_SLAVE':
|
||||
master = e.details[1]
|
||||
url = swap_xapi_host(url, master)
|
||||
try:
|
||||
session = api.Session(url)
|
||||
session.login_with_password(username, password)
|
||||
except api.Failure as es:
|
||||
raise XenapiException(_('Could not connect slave host: %s ') %
|
||||
es.details[0])
|
||||
else:
|
||||
msg = _("Could not connect to XenAPI: %s") % e.details[0]
|
||||
raise XenapiException(msg)
|
||||
return session
|
||||
|
||||
|
||||
class XenapiInspector(virt_inspector.Inspector):
|
||||
|
||||
def __init__(self):
|
||||
super(XenapiInspector, self).__init__()
|
||||
self.session = get_api_session()
|
||||
|
||||
def _get_host_ref(self):
|
||||
"""Return the xenapi host on which nova-compute runs on."""
|
||||
return self.session.xenapi.session.get_this_host(self.session.handle)
|
||||
|
||||
def _call_xenapi(self, method, *args):
|
||||
return self.session.xenapi_request(method, args)
|
||||
|
||||
def _lookup_by_name(self, instance_name):
|
||||
vm_refs = self._call_xenapi("VM.get_by_name_label", instance_name)
|
||||
n = len(vm_refs)
|
||||
if n == 0:
|
||||
raise virt_inspector.InstanceNotFoundException(
|
||||
_('VM %s not found in XenServer') % instance_name)
|
||||
elif n > 1:
|
||||
raise XenapiException(
|
||||
_('Multiple VM %s found in XenServer') % instance_name)
|
||||
else:
|
||||
return vm_refs[0]
|
||||
|
||||
def inspect_cpu_util(self, instance, duration=None):
|
||||
instance_name = util.instance_name(instance)
|
||||
vm_ref = self._lookup_by_name(instance_name)
|
||||
metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref)
|
||||
metrics_rec = self._call_xenapi("VM_metrics.get_record",
|
||||
metrics_ref)
|
||||
vcpus_number = metrics_rec['VCPUs_number']
|
||||
vcpus_utils = metrics_rec['VCPUs_utilisation']
|
||||
if len(vcpus_utils) == 0:
|
||||
msg = _("Could not get VM %s CPU Utilization") % instance_name
|
||||
raise XenapiException(msg)
|
||||
|
||||
utils = 0.0
|
||||
for num in range(int(vcpus_number)):
|
||||
utils += vcpus_utils.get(str(num))
|
||||
utils = utils / int(vcpus_number) * 100
|
||||
return virt_inspector.CPUUtilStats(util=utils)
|
||||
|
||||
def inspect_memory_usage(self, instance, duration=None):
|
||||
instance_name = util.instance_name(instance)
|
||||
vm_ref = self._lookup_by_name(instance_name)
|
||||
metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref)
|
||||
metrics_rec = self._call_xenapi("VM_metrics.get_record",
|
||||
metrics_ref)
|
||||
# Stat provided from XenServer is in B, converting it to MB.
|
||||
memory = int(metrics_rec['memory_actual']) / units.Mi
|
||||
return virt_inspector.MemoryUsageStats(usage=memory)
|
||||
|
||||
def inspect_vnic_rates(self, instance, duration=None):
|
||||
instance_name = util.instance_name(instance)
|
||||
vm_ref = self._lookup_by_name(instance_name)
|
||||
vif_refs = self._call_xenapi("VM.get_VIFs", vm_ref)
|
||||
if vif_refs:
|
||||
for vif_ref in vif_refs:
|
||||
vif_rec = self._call_xenapi("VIF.get_record", vif_ref)
|
||||
vif_metrics_ref = self._call_xenapi(
|
||||
"VIF.get_metrics", vif_ref)
|
||||
vif_metrics_rec = self._call_xenapi(
|
||||
"VIF_metrics.get_record", vif_metrics_ref)
|
||||
|
||||
interface = virt_inspector.Interface(
|
||||
name=vif_rec['uuid'],
|
||||
mac=vif_rec['MAC'],
|
||||
fref=None,
|
||||
parameters=None)
|
||||
rx_rate = float(vif_metrics_rec['io_read_kbs']) * units.Ki
|
||||
tx_rate = float(vif_metrics_rec['io_write_kbs']) * units.Ki
|
||||
stats = virt_inspector.InterfaceRateStats(rx_rate, tx_rate)
|
||||
yield (interface, stats)
|
||||
|
||||
def inspect_disk_rates(self, instance, duration=None):
|
||||
instance_name = util.instance_name(instance)
|
||||
vm_ref = self._lookup_by_name(instance_name)
|
||||
vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref)
|
||||
if vbd_refs:
|
||||
for vbd_ref in vbd_refs:
|
||||
vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref)
|
||||
vbd_metrics_ref = self._call_xenapi("VBD.get_metrics",
|
||||
vbd_ref)
|
||||
vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record",
|
||||
vbd_metrics_ref)
|
||||
|
||||
disk = virt_inspector.Disk(device=vbd_rec['device'])
|
||||
# Stats provided from XenServer are in KB/s,
|
||||
# converting it to B/s.
|
||||
read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki
|
||||
write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki
|
||||
disk_rate_info = virt_inspector.DiskRateStats(
|
||||
read_bytes_rate=read_rate,
|
||||
read_requests_rate=0,
|
||||
write_bytes_rate=write_rate,
|
||||
write_requests_rate=0)
|
||||
yield(disk, disk_rate_info)
|
|
@ -1,229 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import retrying
|
||||
import tooz.coordination
|
||||
|
||||
from ceilometer.i18n import _LE, _LI, _LW
|
||||
from ceilometer import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('backend_url',
|
||||
help='The backend URL to use for distributed coordination. If '
|
||||
'left empty, per-deployment central agent and per-host '
|
||||
'compute agent won\'t do workload '
|
||||
'partitioning and will only function correctly if a '
|
||||
'single instance of that service is running.'),
|
||||
cfg.FloatOpt('heartbeat',
|
||||
default=1.0,
|
||||
help='Number of seconds between heartbeats for distributed '
|
||||
'coordination.'),
|
||||
cfg.FloatOpt('check_watchers',
|
||||
default=10.0,
|
||||
help='Number of seconds between checks to see if group '
|
||||
'membership has changed'),
|
||||
cfg.IntOpt('retry_backoff',
|
||||
default=1,
|
||||
help='Retry backoff factor when retrying to connect with'
|
||||
'coordination backend'),
|
||||
cfg.IntOpt('max_retry_interval',
|
||||
default=30,
|
||||
help='Maximum number of seconds between retry to join '
|
||||
'partitioning group')
|
||||
]
|
||||
cfg.CONF.register_opts(OPTS, group='coordination')
|
||||
|
||||
|
||||
class ErrorJoiningPartitioningGroup(Exception):
|
||||
def __init__(self):
|
||||
super(ErrorJoiningPartitioningGroup, self).__init__(_LE(
|
||||
'Coordination join_group Error joining partitioning group'))
|
||||
|
||||
|
||||
class MemberNotInGroupError(Exception):
|
||||
def __init__(self, group_id, members, my_id):
|
||||
super(MemberNotInGroupError, self).__init__(_LE(
|
||||
'Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: '
|
||||
'Current agent is not part of group and cannot take tasks') %
|
||||
{'group_id': group_id, 'members': members, 'me': my_id})
|
||||
|
||||
|
||||
def retry_on_error_joining_partition(exception):
|
||||
return isinstance(exception, ErrorJoiningPartitioningGroup)
|
||||
|
||||
|
||||
def retry_on_member_not_in_group(exception):
|
||||
return isinstance(exception, MemberNotInGroupError)
|
||||
|
||||
|
||||
class PartitionCoordinator(object):
|
||||
"""Workload partitioning coordinator.
|
||||
|
||||
This class uses the `tooz` library to manage group membership.
|
||||
|
||||
To ensure that the other agents know this agent is still alive,
|
||||
the `heartbeat` method should be called periodically.
|
||||
|
||||
Coordination errors and reconnects are handled under the hood, so the
|
||||
service using the partition coordinator need not care whether the
|
||||
coordination backend is down. The `extract_my_subset` will simply return an
|
||||
empty iterable in this case.
|
||||
"""
|
||||
|
||||
def __init__(self, my_id=None):
|
||||
self._coordinator = None
|
||||
self._groups = set()
|
||||
self._my_id = my_id or str(uuid.uuid4())
|
||||
|
||||
def start(self):
|
||||
backend_url = cfg.CONF.coordination.backend_url
|
||||
if backend_url:
|
||||
try:
|
||||
self._coordinator = tooz.coordination.get_coordinator(
|
||||
backend_url, self._my_id)
|
||||
self._coordinator.start()
|
||||
LOG.info(_LI('Coordination backend started successfully.'))
|
||||
except tooz.coordination.ToozError:
|
||||
LOG.exception(_LE('Error connecting to coordination backend.'))
|
||||
|
||||
def stop(self):
|
||||
if not self._coordinator:
|
||||
return
|
||||
|
||||
for group in list(self._groups):
|
||||
self.leave_group(group)
|
||||
|
||||
try:
|
||||
self._coordinator.stop()
|
||||
except tooz.coordination.ToozError:
|
||||
LOG.exception(_LE('Error connecting to coordination backend.'))
|
||||
finally:
|
||||
self._coordinator = None
|
||||
|
||||
def is_active(self):
|
||||
return self._coordinator is not None
|
||||
|
||||
def heartbeat(self):
|
||||
if self._coordinator:
|
||||
if not self._coordinator.is_started:
|
||||
# re-connect
|
||||
self.start()
|
||||
try:
|
||||
self._coordinator.heartbeat()
|
||||
except tooz.coordination.ToozError:
|
||||
LOG.exception(_LE('Error sending a heartbeat to coordination '
|
||||
'backend.'))
|
||||
|
||||
def watch_group(self, namespace, callback):
|
||||
if self._coordinator:
|
||||
self._coordinator.watch_join_group(namespace, callback)
|
||||
self._coordinator.watch_leave_group(namespace, callback)
|
||||
|
||||
def run_watchers(self):
|
||||
if self._coordinator:
|
||||
self._coordinator.run_watchers()
|
||||
|
||||
def join_group(self, group_id):
|
||||
if (not self._coordinator or not self._coordinator.is_started
|
||||
or not group_id):
|
||||
return
|
||||
|
||||
retry_backoff = cfg.CONF.coordination.retry_backoff * 1000
|
||||
max_retry_interval = cfg.CONF.coordination.max_retry_interval * 1000
|
||||
|
||||
@retrying.retry(
|
||||
wait_exponential_multiplier=retry_backoff,
|
||||
wait_exponential_max=max_retry_interval,
|
||||
retry_on_exception=retry_on_error_joining_partition,
|
||||
wrap_exception=True)
|
||||
def _inner():
|
||||
try:
|
||||
join_req = self._coordinator.join_group(group_id)
|
||||
join_req.get()
|
||||
LOG.info(_LI('Joined partitioning group %s'), group_id)
|
||||
except tooz.coordination.MemberAlreadyExist:
|
||||
return
|
||||
except tooz.coordination.GroupNotCreated:
|
||||
create_grp_req = self._coordinator.create_group(group_id)
|
||||
try:
|
||||
create_grp_req.get()
|
||||
except tooz.coordination.GroupAlreadyExist:
|
||||
pass
|
||||
raise ErrorJoiningPartitioningGroup()
|
||||
except tooz.coordination.ToozError:
|
||||
LOG.exception(_LE('Error joining partitioning group %s,'
|
||||
' re-trying'), group_id)
|
||||
raise ErrorJoiningPartitioningGroup()
|
||||
self._groups.add(group_id)
|
||||
|
||||
return _inner()
|
||||
|
||||
def leave_group(self, group_id):
|
||||
if group_id not in self._groups:
|
||||
return
|
||||
if self._coordinator:
|
||||
self._coordinator.leave_group(group_id)
|
||||
self._groups.remove(group_id)
|
||||
LOG.info(_LI('Left partitioning group %s'), group_id)
|
||||
|
||||
def _get_members(self, group_id):
|
||||
if not self._coordinator:
|
||||
return [self._my_id]
|
||||
|
||||
while True:
|
||||
get_members_req = self._coordinator.get_members(group_id)
|
||||
try:
|
||||
return get_members_req.get()
|
||||
except tooz.coordination.GroupNotCreated:
|
||||
self.join_group(group_id)
|
||||
|
||||
@retrying.retry(stop_max_attempt_number=5, wait_random_max=2000,
|
||||
retry_on_exception=retry_on_member_not_in_group)
|
||||
def extract_my_subset(self, group_id, iterable, attempt=0):
|
||||
"""Filters an iterable, returning only objects assigned to this agent.
|
||||
|
||||
We have a list of objects and get a list of active group members from
|
||||
`tooz`. We then hash all the objects into buckets and return only
|
||||
the ones that hashed into *our* bucket.
|
||||
"""
|
||||
if not group_id:
|
||||
return iterable
|
||||
if group_id not in self._groups:
|
||||
self.join_group(group_id)
|
||||
try:
|
||||
members = self._get_members(group_id)
|
||||
LOG.debug('Members of group: %s, Me: %s', members, self._my_id)
|
||||
if self._my_id not in members:
|
||||
LOG.warning(_LW('Cannot extract tasks because agent failed to '
|
||||
'join group properly. Rejoining group.'))
|
||||
self.join_group(group_id)
|
||||
members = self._get_members(group_id)
|
||||
if self._my_id not in members:
|
||||
raise MemberNotInGroupError(group_id, members, self._my_id)
|
||||
hr = utils.HashRing(members)
|
||||
filtered = [v for v in iterable
|
||||
if hr.get_node(str(v)) == self._my_id]
|
||||
LOG.debug('My subset: %s', [str(f) for f in filtered])
|
||||
return filtered
|
||||
except tooz.coordination.ToozError:
|
||||
LOG.exception(_LE('Error getting group membership info from '
|
||||
'coordination backend.'))
|
||||
return []
|
|
@ -1,188 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from jsonpath_rw_ext import parser
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from ceilometer.i18n import _, _LI
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class DefinitionException(Exception):
|
||||
def __init__(self, message, definition_cfg):
|
||||
msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message)
|
||||
super(DefinitionException, self).__init__(msg)
|
||||
self.brief_message = message
|
||||
|
||||
|
||||
class MeterDefinitionException(DefinitionException):
|
||||
pass
|
||||
|
||||
|
||||
class EventDefinitionException(DefinitionException):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceDefinitionException(DefinitionException):
|
||||
pass
|
||||
|
||||
|
||||
class Definition(object):
|
||||
JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser()
|
||||
GETTERS_CACHE = {}
|
||||
|
||||
def __init__(self, name, cfg, plugin_manager):
|
||||
self.cfg = cfg
|
||||
self.name = name
|
||||
self.plugin = None
|
||||
if isinstance(cfg, dict):
|
||||
if 'fields' not in cfg:
|
||||
raise DefinitionException(
|
||||
_("The field 'fields' is required for %s") % name,
|
||||
self.cfg)
|
||||
|
||||
if 'plugin' in cfg:
|
||||
plugin_cfg = cfg['plugin']
|
||||
if isinstance(plugin_cfg, six.string_types):
|
||||
plugin_name = plugin_cfg
|
||||
plugin_params = {}
|
||||
else:
|
||||
try:
|
||||
plugin_name = plugin_cfg['name']
|
||||
except KeyError:
|
||||
raise DefinitionException(
|
||||
_('Plugin specified, but no plugin name supplied '
|
||||
'for %s') % name, self.cfg)
|
||||
plugin_params = plugin_cfg.get('parameters')
|
||||
if plugin_params is None:
|
||||
plugin_params = {}
|
||||
try:
|
||||
plugin_ext = plugin_manager[plugin_name]
|
||||
except KeyError:
|
||||
raise DefinitionException(
|
||||
_('No plugin named %(plugin)s available for '
|
||||
'%(name)s') % dict(
|
||||
plugin=plugin_name,
|
||||
name=name), self.cfg)
|
||||
plugin_class = plugin_ext.plugin
|
||||
self.plugin = plugin_class(**plugin_params)
|
||||
|
||||
fields = cfg['fields']
|
||||
else:
|
||||
# Simple definition "foobar: jsonpath"
|
||||
fields = cfg
|
||||
|
||||
if isinstance(fields, list):
|
||||
# NOTE(mdragon): if not a string, we assume a list.
|
||||
if len(fields) == 1:
|
||||
fields = fields[0]
|
||||
else:
|
||||
fields = '|'.join('(%s)' % path for path in fields)
|
||||
|
||||
if isinstance(fields, six.integer_types):
|
||||
self.getter = fields
|
||||
else:
|
||||
try:
|
||||
self.getter = self.make_getter(fields)
|
||||
except Exception as e:
|
||||
raise DefinitionException(
|
||||
_("Parse error in JSONPath specification "
|
||||
"'%(jsonpath)s' for %(name)s: %(err)s")
|
||||
% dict(jsonpath=fields, name=name, err=e), self.cfg)
|
||||
|
||||
def _get_path(self, match):
|
||||
if match.context is not None:
|
||||
for path_element in self._get_path(match.context):
|
||||
yield path_element
|
||||
yield str(match.path)
|
||||
|
||||
def parse(self, obj, return_all_values=False):
|
||||
if callable(self.getter):
|
||||
values = self.getter(obj)
|
||||
else:
|
||||
return self.getter
|
||||
|
||||
values = [match for match in values
|
||||
if return_all_values or match.value is not None]
|
||||
|
||||
if self.plugin is not None:
|
||||
if return_all_values and not self.plugin.support_return_all_values:
|
||||
raise DefinitionException("Plugin %s don't allows to "
|
||||
"return multiple values" %
|
||||
self.cfg["plugin"]["name"], self.cfg)
|
||||
values_map = [('.'.join(self._get_path(match)), match.value) for
|
||||
match in values]
|
||||
values = [v for v in self.plugin.trait_values(values_map)
|
||||
if v is not None]
|
||||
else:
|
||||
values = [match.value for match in values if match is not None]
|
||||
if return_all_values:
|
||||
return values
|
||||
else:
|
||||
return values[0] if values else None
|
||||
|
||||
def make_getter(self, fields):
|
||||
if fields in self.GETTERS_CACHE:
|
||||
return self.GETTERS_CACHE[fields]
|
||||
else:
|
||||
getter = self.JSONPATH_RW_PARSER.parse(fields).find
|
||||
self.GETTERS_CACHE[fields] = getter
|
||||
return getter
|
||||
|
||||
|
||||
def load_definitions(defaults, config_file, fallback_file=None):
|
||||
"""Setup a definitions from yaml config file."""
|
||||
|
||||
if not os.path.exists(config_file):
|
||||
config_file = cfg.CONF.find_file(config_file)
|
||||
if not config_file and fallback_file is not None:
|
||||
LOG.debug("No Definitions configuration file found!"
|
||||
"Using default config.")
|
||||
config_file = fallback_file
|
||||
|
||||
if config_file is not None:
|
||||
LOG.debug("Loading definitions configuration file: %s", config_file)
|
||||
|
||||
with open(config_file) as cf:
|
||||
config = cf.read()
|
||||
|
||||
try:
|
||||
definition_cfg = yaml.safe_load(config)
|
||||
except yaml.YAMLError as err:
|
||||
if hasattr(err, 'problem_mark'):
|
||||
mark = err.problem_mark
|
||||
errmsg = (_("Invalid YAML syntax in Definitions file "
|
||||
"%(file)s at line: %(line)s, column: %(column)s.")
|
||||
% dict(file=config_file,
|
||||
line=mark.line + 1,
|
||||
column=mark.column + 1))
|
||||
else:
|
||||
errmsg = (_("YAML error reading Definitions file "
|
||||
"%(file)s")
|
||||
% dict(file=config_file))
|
||||
LOG.error(errmsg)
|
||||
raise
|
||||
|
||||
else:
|
||||
LOG.debug("No Definitions configuration file found!"
|
||||
"Using default config.")
|
||||
definition_cfg = defaults
|
||||
|
||||
LOG.info(_LI("Definitions: %s"), definition_cfg)
|
||||
return definition_cfg
|
|
@ -16,26 +16,8 @@
|
|||
import abc
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import six
|
||||
from stevedore import named
|
||||
|
||||
from ceilometer.i18n import _LW
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.MultiStrOpt('meter_dispatchers',
|
||||
deprecated_name='dispatcher',
|
||||
default=['database'],
|
||||
help='Dispatchers to process metering data.'),
|
||||
cfg.MultiStrOpt('event_dispatchers',
|
||||
default=['database'],
|
||||
deprecated_name='dispatcher',
|
||||
help='Dispatchers to process event data.'),
|
||||
]
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
STORAGE_OPTS = [
|
||||
cfg.IntOpt('max_retries',
|
||||
|
@ -51,59 +33,11 @@ STORAGE_OPTS = [
|
|||
cfg.CONF.register_opts(STORAGE_OPTS, group='storage')
|
||||
|
||||
|
||||
def _load_dispatcher_manager(dispatcher_type):
|
||||
namespace = 'ceilometer.dispatcher.%s' % dispatcher_type
|
||||
conf_name = '%s_dispatchers' % dispatcher_type
|
||||
|
||||
LOG.debug('loading dispatchers from %s', namespace)
|
||||
# set propagate_map_exceptions to True to enable stevedore
|
||||
# to propagate exceptions.
|
||||
dispatcher_manager = named.NamedExtensionManager(
|
||||
namespace=namespace,
|
||||
names=getattr(cfg.CONF, conf_name),
|
||||
invoke_on_load=True,
|
||||
invoke_args=[cfg.CONF],
|
||||
propagate_map_exceptions=True)
|
||||
if not list(dispatcher_manager):
|
||||
LOG.warning(_LW('Failed to load any dispatchers for %s'),
|
||||
namespace)
|
||||
return dispatcher_manager
|
||||
|
||||
|
||||
def load_dispatcher_manager():
|
||||
return (_load_dispatcher_manager('meter'),
|
||||
_load_dispatcher_manager('event'))
|
||||
|
||||
|
||||
class Base(object):
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class EventDispatcherBase(object):
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class MeterDispatcherBase(Base):
|
||||
@abc.abstractmethod
|
||||
def record_metering_data(self, data):
|
||||
"""Recording metering data interface."""
|
||||
|
||||
def verify_and_record_metering_data(self, datapoints):
|
||||
"""Verify metering data's signature and record valid ones."""
|
||||
if not isinstance(datapoints, list):
|
||||
datapoints = [datapoints]
|
||||
|
||||
valid_datapoints = []
|
||||
for datapoint in datapoints:
|
||||
if utils.verify_signature(datapoint,
|
||||
self.conf.publisher.telemetry_secret):
|
||||
valid_datapoints.append(datapoint)
|
||||
else:
|
||||
LOG.warning(_LW('Message signature is invalid, discarding '
|
||||
'it: <%r>.'), datapoint)
|
||||
return self.record_metering_data(valid_datapoints)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class EventDispatcherBase(Base):
|
||||
@abc.abstractmethod
|
||||
def record_events(self, events):
|
||||
"""Record events."""
|
||||
|
|
|
@ -24,8 +24,7 @@ from ceilometer import storage
|
|||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class DatabaseDispatcher(dispatcher.MeterDispatcherBase,
|
||||
dispatcher.EventDispatcherBase):
|
||||
class DatabaseDispatcher(dispatcher.EventDispatcherBase):
|
||||
"""Dispatcher class for recording metering data into database.
|
||||
|
||||
The dispatcher class which records each meter into a database configured
|
||||
|
@ -41,59 +40,7 @@ class DatabaseDispatcher(dispatcher.MeterDispatcherBase,
|
|||
|
||||
def __init__(self, conf):
|
||||
super(DatabaseDispatcher, self).__init__(conf)
|
||||
|
||||
self._meter_conn = self._get_db_conn('metering', True)
|
||||
self._event_conn = self._get_db_conn('event', True)
|
||||
|
||||
def _get_db_conn(self, purpose, ignore_exception=False):
|
||||
try:
|
||||
return storage.get_connection_from_config(self.conf, purpose)
|
||||
except Exception as err:
|
||||
params = {"purpose": purpose, "err": err}
|
||||
LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s "
|
||||
"re-try later: %(err)s") % params)
|
||||
if not ignore_exception:
|
||||
raise
|
||||
|
||||
@property
|
||||
def meter_conn(self):
|
||||
if not self._meter_conn:
|
||||
self._meter_conn = self._get_db_conn('metering')
|
||||
|
||||
return self._meter_conn
|
||||
|
||||
@property
|
||||
def event_conn(self):
|
||||
if not self._event_conn:
|
||||
self._event_conn = self._get_db_conn('event')
|
||||
|
||||
return self._event_conn
|
||||
|
||||
def record_metering_data(self, data):
|
||||
# We may have receive only one counter on the wire
|
||||
if not isinstance(data, list):
|
||||
data = [data]
|
||||
|
||||
for meter in data:
|
||||
LOG.debug(
|
||||
'metering data %(counter_name)s '
|
||||
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s',
|
||||
{'counter_name': meter['counter_name'],
|
||||
'resource_id': meter['resource_id'],
|
||||
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
|
||||
'counter_volume': meter['counter_volume']})
|
||||
try:
|
||||
# Convert the timestamp to a datetime instance.
|
||||
# Storage engines are responsible for converting
|
||||
# that value to something they can store.
|
||||
if meter.get('timestamp'):
|
||||
ts = timeutils.parse_isotime(meter['timestamp'])
|
||||
meter['timestamp'] = timeutils.normalize_time(ts)
|
||||
self.meter_conn.record_metering_data(meter)
|
||||
except Exception as err:
|
||||
LOG.error(_LE('Failed to record metering data: %s.'), err)
|
||||
# raise the exception to propagate it up in the chain.
|
||||
raise
|
||||
self.event_conn = storage.get_connection_from_config(self.conf)
|
||||
|
||||
def record_events(self, events):
|
||||
if not isinstance(events, list):
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 IBM Corp
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import logging.handlers
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ceilometer import dispatcher
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('file_path',
|
||||
help='Name and the location of the file to record '
|
||||
'meters.'),
|
||||
cfg.IntOpt('max_bytes',
|
||||
default=0,
|
||||
help='The max size of the file.'),
|
||||
cfg.IntOpt('backup_count',
|
||||
default=0,
|
||||
help='The max number of the files to keep.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS, group="dispatcher_file")
|
||||
|
||||
|
||||
class FileDispatcher(dispatcher.MeterDispatcherBase,
|
||||
dispatcher.EventDispatcherBase):
|
||||
"""Dispatcher class for recording metering data to a file.
|
||||
|
||||
The dispatcher class which logs each meter and/or event into a file
|
||||
configured in ceilometer configuration file. An example configuration may
|
||||
look like the following:
|
||||
|
||||
[dispatcher_file]
|
||||
file_path = /tmp/meters
|
||||
|
||||
To enable this dispatcher, the following section needs to be present in
|
||||
ceilometer.conf file
|
||||
|
||||
[DEFAULT]
|
||||
meter_dispatchers = file
|
||||
event_dispatchers = file
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(FileDispatcher, self).__init__(conf)
|
||||
self.log = None
|
||||
|
||||
# if the directory and path are configured, then log to the file
|
||||
if self.conf.dispatcher_file.file_path:
|
||||
dispatcher_logger = logging.Logger('dispatcher.file')
|
||||
dispatcher_logger.setLevel(logging.INFO)
|
||||
# create rotating file handler which logs meters
|
||||
rfh = logging.handlers.RotatingFileHandler(
|
||||
self.conf.dispatcher_file.file_path,
|
||||
maxBytes=self.conf.dispatcher_file.max_bytes,
|
||||
backupCount=self.conf.dispatcher_file.backup_count,
|
||||
encoding='utf8')
|
||||
|
||||
rfh.setLevel(logging.INFO)
|
||||
# Only wanted the meters to be saved in the file, not the
|
||||
# project root logger.
|
||||
dispatcher_logger.propagate = False
|
||||
dispatcher_logger.addHandler(rfh)
|
||||
self.log = dispatcher_logger
|
||||
|
||||
def record_metering_data(self, data):
|
||||
if self.log:
|
||||
self.log.info(data)
|
||||
|
||||
def record_events(self, events):
|
||||
if self.log:
|
||||
self.log.info(events)
|
|
@ -1,469 +0,0 @@
|
|||
#
|
||||
# Copyright 2014-2015 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from collections import defaultdict
|
||||
from hashlib import md5
|
||||
import itertools
|
||||
import operator
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
from gnocchiclient import client
|
||||
from gnocchiclient import exceptions as gnocchi_exc
|
||||
from gnocchiclient import utils as gnocchi_utils
|
||||
from keystoneauth1 import exceptions as ka_exceptions
|
||||
from keystoneauth1 import session as ka_session
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import fnmatch
|
||||
import requests
|
||||
import retrying
|
||||
import six
|
||||
from stevedore import extension
|
||||
|
||||
from ceilometer import declarative
|
||||
from ceilometer import dispatcher
|
||||
from ceilometer.i18n import _, _LE, _LW
|
||||
from ceilometer import keystone_client
|
||||
|
||||
NAME_ENCODED = __name__.encode('utf-8')
|
||||
CACHE_NAMESPACE = uuid.UUID(bytes=md5(NAME_ENCODED).digest())
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
dispatcher_opts = [
|
||||
cfg.BoolOpt('filter_service_activity',
|
||||
default=True,
|
||||
help='Filter out samples generated by Gnocchi '
|
||||
'service activity'),
|
||||
cfg.StrOpt('filter_project',
|
||||
default='gnocchi',
|
||||
help='Gnocchi project used to filter out samples '
|
||||
'generated by Gnocchi service activity'),
|
||||
cfg.StrOpt('url',
|
||||
deprecated_for_removal=True,
|
||||
help='URL to Gnocchi. default: autodetection'),
|
||||
cfg.StrOpt('archive_policy',
|
||||
help='The archive policy to use when the dispatcher '
|
||||
'create a new metric.'),
|
||||
cfg.StrOpt('resources_definition_file',
|
||||
default='gnocchi_resources.yaml',
|
||||
help=_('The Yaml file that defines mapping between samples '
|
||||
'and gnocchi resources/metrics')),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi")
|
||||
|
||||
|
||||
def cache_key_mangler(key):
|
||||
"""Construct an opaque cache key."""
|
||||
if six.PY2:
|
||||
key = key.encode('utf-8')
|
||||
return uuid.uuid5(CACHE_NAMESPACE, key).hex
|
||||
|
||||
|
||||
class ResourcesDefinition(object):
|
||||
|
||||
MANDATORY_FIELDS = {'resource_type': six.string_types,
|
||||
'metrics': list}
|
||||
|
||||
def __init__(self, definition_cfg, default_archive_policy, plugin_manager):
|
||||
self._default_archive_policy = default_archive_policy
|
||||
self.cfg = definition_cfg
|
||||
|
||||
for field, field_type in self.MANDATORY_FIELDS.items():
|
||||
if field not in self.cfg:
|
||||
raise declarative.ResourceDefinitionException(
|
||||
_LE("Required field %s not specified") % field, self.cfg)
|
||||
if not isinstance(self.cfg[field], field_type):
|
||||
raise declarative.ResourceDefinitionException(
|
||||
_LE("Required field %(field)s should be a %(type)s") %
|
||||
{'field': field, 'type': field_type}, self.cfg)
|
||||
|
||||
self._attributes = {}
|
||||
for name, attr_cfg in self.cfg.get('attributes', {}).items():
|
||||
self._attributes[name] = declarative.Definition(name, attr_cfg,
|
||||
plugin_manager)
|
||||
|
||||
self.metrics = {}
|
||||
for t in self.cfg['metrics']:
|
||||
archive_policy = self.cfg.get('archive_policy',
|
||||
self._default_archive_policy)
|
||||
if archive_policy is None:
|
||||
self.metrics[t] = {}
|
||||
else:
|
||||
self.metrics[t] = dict(archive_policy_name=archive_policy)
|
||||
|
||||
def match(self, metric_name):
|
||||
for t in self.cfg['metrics']:
|
||||
if fnmatch.fnmatch(metric_name, t):
|
||||
return True
|
||||
return False
|
||||
|
||||
def attributes(self, sample):
|
||||
attrs = {}
|
||||
for name, definition in self._attributes.items():
|
||||
value = definition.parse(sample)
|
||||
if value is not None:
|
||||
attrs[name] = value
|
||||
return attrs
|
||||
|
||||
|
||||
def get_gnocchiclient(conf):
|
||||
requests_session = requests.session()
|
||||
for scheme in requests_session.adapters.keys():
|
||||
requests_session.mount(scheme, ka_session.TCPKeepAliveAdapter(
|
||||
pool_block=True))
|
||||
|
||||
session = keystone_client.get_session(requests_session=requests_session)
|
||||
return client.Client('1', session,
|
||||
interface=conf.service_credentials.interface,
|
||||
region_name=conf.service_credentials.region_name,
|
||||
endpoint_override=conf.dispatcher_gnocchi.url)
|
||||
|
||||
|
||||
class LockedDefaultDict(defaultdict):
|
||||
"""defaultdict with lock to handle threading
|
||||
|
||||
Dictionary only deletes if nothing is accessing dict and nothing is holding
|
||||
lock to be deleted. If both cases are not true, it will skip delete.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.lock = threading.Lock()
|
||||
super(LockedDefaultDict, self).__init__(*args, **kwargs)
|
||||
|
||||
def __getitem__(self, key):
|
||||
with self.lock:
|
||||
return super(LockedDefaultDict, self).__getitem__(key)
|
||||
|
||||
def pop(self, key, *args):
|
||||
with self.lock:
|
||||
key_lock = super(LockedDefaultDict, self).__getitem__(key)
|
||||
if key_lock.acquire(False):
|
||||
try:
|
||||
super(LockedDefaultDict, self).pop(key, *args)
|
||||
finally:
|
||||
key_lock.release()
|
||||
|
||||
|
||||
class GnocchiDispatcher(dispatcher.MeterDispatcherBase):
|
||||
"""Dispatcher class for recording metering data into database.
|
||||
|
||||
The dispatcher class records each meter into the gnocchi service
|
||||
configured in ceilometer configuration file. An example configuration may
|
||||
look like the following:
|
||||
|
||||
[dispatcher_gnocchi]
|
||||
url = http://localhost:8041
|
||||
archive_policy = low
|
||||
|
||||
To enable this dispatcher, the following section needs to be present in
|
||||
ceilometer.conf file
|
||||
|
||||
[DEFAULT]
|
||||
meter_dispatchers = gnocchi
|
||||
"""
|
||||
def __init__(self, conf):
|
||||
super(GnocchiDispatcher, self).__init__(conf)
|
||||
self.conf = conf
|
||||
self.filter_service_activity = (
|
||||
conf.dispatcher_gnocchi.filter_service_activity)
|
||||
self._ks_client = keystone_client.get_client()
|
||||
self.resources_definition = self._load_resources_definitions(conf)
|
||||
|
||||
self.cache = None
|
||||
try:
|
||||
import oslo_cache
|
||||
oslo_cache.configure(self.conf)
|
||||
# NOTE(cdent): The default cache backend is a real but
|
||||
# noop backend. We don't want to use that here because
|
||||
# we want to avoid the cache pathways entirely if the
|
||||
# cache has not been configured explicitly.
|
||||
if 'null' not in self.conf.cache.backend:
|
||||
cache_region = oslo_cache.create_region()
|
||||
self.cache = oslo_cache.configure_cache_region(
|
||||
self.conf, cache_region)
|
||||
self.cache.key_mangler = cache_key_mangler
|
||||
except ImportError:
|
||||
pass
|
||||
except oslo_cache.exception.ConfigurationError as exc:
|
||||
LOG.warning(_LW('unable to configure oslo_cache: %s') % exc)
|
||||
|
||||
self._gnocchi_project_id = None
|
||||
self._gnocchi_project_id_lock = threading.Lock()
|
||||
self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock)
|
||||
|
||||
self._gnocchi = get_gnocchiclient(conf)
|
||||
# Convert retry_interval secs to msecs for retry decorator
|
||||
retries = conf.storage.max_retries
|
||||
|
||||
@retrying.retry(wait_fixed=conf.storage.retry_interval * 1000,
|
||||
stop_max_attempt_number=(retries if retries >= 0
|
||||
else None))
|
||||
def _get_connection():
|
||||
self._gnocchi.capabilities.list()
|
||||
|
||||
try:
|
||||
_get_connection()
|
||||
except Exception:
|
||||
LOG.error(_LE('Failed to connect to Gnocchi.'))
|
||||
raise
|
||||
|
||||
@classmethod
|
||||
def _load_resources_definitions(cls, conf):
|
||||
plugin_manager = extension.ExtensionManager(
|
||||
namespace='ceilometer.event.trait_plugin')
|
||||
data = declarative.load_definitions(
|
||||
{}, conf.dispatcher_gnocchi.resources_definition_file)
|
||||
resource_defs = []
|
||||
for resource in data.get('resources', []):
|
||||
try:
|
||||
resource_defs.append(ResourcesDefinition(
|
||||
resource,
|
||||
conf.dispatcher_gnocchi.archive_policy, plugin_manager))
|
||||
except Exception as exc:
|
||||
LOG.error(_LE("Failed to load resource due to error %s") %
|
||||
exc)
|
||||
return resource_defs
|
||||
|
||||
@property
|
||||
def gnocchi_project_id(self):
|
||||
if self._gnocchi_project_id is not None:
|
||||
return self._gnocchi_project_id
|
||||
with self._gnocchi_project_id_lock:
|
||||
if self._gnocchi_project_id is None:
|
||||
try:
|
||||
project = self._ks_client.projects.find(
|
||||
name=self.conf.dispatcher_gnocchi.filter_project)
|
||||
except ka_exceptions.NotFound:
|
||||
LOG.warning(_LW('gnocchi project not found in keystone,'
|
||||
' ignoring the filter_service_activity '
|
||||
'option'))
|
||||
self.filter_service_activity = False
|
||||
return None
|
||||
except Exception:
|
||||
LOG.exception('fail to retrieve user of Gnocchi service')
|
||||
raise
|
||||
self._gnocchi_project_id = project.id
|
||||
LOG.debug("gnocchi project found: %s", self.gnocchi_project_id)
|
||||
return self._gnocchi_project_id
|
||||
|
||||
def _is_swift_account_sample(self, sample):
|
||||
return bool([rd for rd in self.resources_definition
|
||||
if rd.cfg['resource_type'] == 'swift_account'
|
||||
and rd.match(sample['counter_name'])])
|
||||
|
||||
def _is_gnocchi_activity(self, sample):
|
||||
return (self.filter_service_activity and self.gnocchi_project_id and (
|
||||
# avoid anything from the user used by gnocchi
|
||||
sample['project_id'] == self.gnocchi_project_id or
|
||||
# avoid anything in the swift account used by gnocchi
|
||||
(sample['resource_id'] == self.gnocchi_project_id and
|
||||
self._is_swift_account_sample(sample))
|
||||
))
|
||||
|
||||
def _get_resource_definition(self, metric_name):
|
||||
for rd in self.resources_definition:
|
||||
if rd.match(metric_name):
|
||||
return rd
|
||||
|
||||
def record_metering_data(self, data):
|
||||
# We may have receive only one counter on the wire
|
||||
if not isinstance(data, list):
|
||||
data = [data]
|
||||
# NOTE(sileht): skip sample generated by gnocchi itself
|
||||
data = [s for s in data if not self._is_gnocchi_activity(s)]
|
||||
|
||||
# FIXME(sileht): This method bulk the processing of samples
|
||||
# grouped by resource_id and metric_name but this is not
|
||||
# efficient yet because the data received here doesn't often
|
||||
# contains a lot of different kind of samples
|
||||
# So perhaps the next step will be to pool the received data from
|
||||
# message bus.
|
||||
data.sort(key=lambda s: (s['resource_id'], s['counter_name']))
|
||||
|
||||
resource_grouped_samples = itertools.groupby(
|
||||
data, key=operator.itemgetter('resource_id'))
|
||||
|
||||
gnocchi_data = {}
|
||||
measures = {}
|
||||
stats = dict(measures=0, resources=0, metrics=0)
|
||||
for resource_id, samples_of_resource in resource_grouped_samples:
|
||||
stats['resources'] += 1
|
||||
metric_grouped_samples = itertools.groupby(
|
||||
list(samples_of_resource),
|
||||
key=operator.itemgetter('counter_name'))
|
||||
|
||||
# NOTE(sileht): We convert resource id to Gnocchi format
|
||||
# because batch_resources_metrics_measures exception
|
||||
# returns this id and not the ceilometer one
|
||||
gnocchi_id = gnocchi_utils.encode_resource_id(resource_id)
|
||||
res_info = gnocchi_data[gnocchi_id] = {}
|
||||
for metric_name, samples in metric_grouped_samples:
|
||||
stats['metrics'] += 1
|
||||
|
||||
samples = list(samples)
|
||||
rd = self._get_resource_definition(metric_name)
|
||||
if rd is None:
|
||||
LOG.warning(_LW("metric %s is not handled by Gnocchi") %
|
||||
metric_name)
|
||||
continue
|
||||
if rd.cfg.get("ignore"):
|
||||
continue
|
||||
|
||||
res_info['resource_type'] = rd.cfg['resource_type']
|
||||
res_info.setdefault("resource", {}).update({
|
||||
"id": resource_id,
|
||||
"user_id": samples[0]['user_id'],
|
||||
"project_id": samples[0]['project_id'],
|
||||
"metrics": rd.metrics,
|
||||
})
|
||||
|
||||
for sample in samples:
|
||||
res_info.setdefault("resource_extra", {}).update(
|
||||
rd.attributes(sample))
|
||||
m = measures.setdefault(gnocchi_id, {}).setdefault(
|
||||
metric_name, [])
|
||||
m.append({'timestamp': sample['timestamp'],
|
||||
'value': sample['counter_volume']})
|
||||
unit = sample['counter_unit']
|
||||
metric = sample['counter_name']
|
||||
res_info['resource']['metrics'][metric]['unit'] = unit
|
||||
|
||||
stats['measures'] += len(measures[gnocchi_id][metric_name])
|
||||
res_info["resource"].update(res_info["resource_extra"])
|
||||
|
||||
try:
|
||||
self.batch_measures(measures, gnocchi_data, stats)
|
||||
except gnocchi_exc.ClientException as e:
|
||||
LOG.error(six.text_type(e))
|
||||
except Exception as e:
|
||||
LOG.error(six.text_type(e), exc_info=True)
|
||||
|
||||
for gnocchi_id, info in gnocchi_data.items():
|
||||
resource = info["resource"]
|
||||
resource_type = info["resource_type"]
|
||||
resource_extra = info["resource_extra"]
|
||||
if not resource_extra:
|
||||
continue
|
||||
try:
|
||||
self._if_not_cached("update", resource_type, resource,
|
||||
self._update_resource, resource_extra)
|
||||
except gnocchi_exc.ClientException as e:
|
||||
LOG.error(six.text_type(e))
|
||||
except Exception as e:
|
||||
LOG.error(six.text_type(e), exc_info=True)
|
||||
|
||||
RE_UNKNOW_METRICS = re.compile("Unknown metrics: (.*) \(HTTP 400\)")
|
||||
RE_UNKNOW_METRICS_LIST = re.compile("([^/ ,]*)/([^,]*)")
|
||||
|
||||
def batch_measures(self, measures, resource_infos, stats):
|
||||
# NOTE(sileht): We don't care about error here, we want
|
||||
# resources metadata always been updated
|
||||
try:
|
||||
self._gnocchi.metric.batch_resources_metrics_measures(measures)
|
||||
except gnocchi_exc.BadRequest as e:
|
||||
m = self.RE_UNKNOW_METRICS.match(six.text_type(e))
|
||||
if m is None:
|
||||
raise
|
||||
|
||||
# NOTE(sileht): Create all missing resources and metrics
|
||||
metric_list = self.RE_UNKNOW_METRICS_LIST.findall(m.group(1))
|
||||
gnocchi_ids_freshly_handled = set()
|
||||
for gnocchi_id, metric_name in metric_list:
|
||||
if gnocchi_id in gnocchi_ids_freshly_handled:
|
||||
continue
|
||||
resource = resource_infos[gnocchi_id]['resource']
|
||||
resource_type = resource_infos[gnocchi_id]['resource_type']
|
||||
try:
|
||||
self._if_not_cached("create", resource_type, resource,
|
||||
self._create_resource)
|
||||
except gnocchi_exc.ResourceAlreadyExists:
|
||||
metric = {'resource_id': resource['id'],
|
||||
'name': metric_name}
|
||||
metric.update(resource["metrics"][metric_name])
|
||||
try:
|
||||
self._gnocchi.metric.create(metric)
|
||||
except gnocchi_exc.NamedMetricAlreadyExists:
|
||||
# NOTE(sileht): metric created in the meantime
|
||||
pass
|
||||
except gnocchi_exc.ClientException as e:
|
||||
LOG.error(six.text_type(e))
|
||||
# We cannot post measures for this metric
|
||||
del measures[gnocchi_id][metric_name]
|
||||
if not measures[gnocchi_id]:
|
||||
del measures[gnocchi_id]
|
||||
except gnocchi_exc.ClientException as e:
|
||||
LOG.error(six.text_type(e))
|
||||
# We cannot post measures for this resource
|
||||
del measures[gnocchi_id]
|
||||
gnocchi_ids_freshly_handled.add(gnocchi_id)
|
||||
else:
|
||||
gnocchi_ids_freshly_handled.add(gnocchi_id)
|
||||
|
||||
# NOTE(sileht): we have created missing resources/metrics,
|
||||
# now retry to post measures
|
||||
self._gnocchi.metric.batch_resources_metrics_measures(measures)
|
||||
|
||||
# FIXME(sileht): take care of measures removed in stats
|
||||
LOG.debug("%(measures)d measures posted against %(metrics)d "
|
||||
"metrics through %(resources)d resources", stats)
|
||||
|
||||
def _create_resource(self, resource_type, resource):
|
||||
self._gnocchi.resource.create(resource_type, resource)
|
||||
LOG.debug('Resource %s created', resource["id"])
|
||||
|
||||
def _update_resource(self, resource_type, resource, resource_extra):
|
||||
self._gnocchi.resource.update(resource_type,
|
||||
resource["id"],
|
||||
resource_extra)
|
||||
LOG.debug('Resource %s updated', resource["id"])
|
||||
|
||||
def _if_not_cached(self, operation, resource_type, resource, method,
|
||||
*args, **kwargs):
|
||||
if self.cache:
|
||||
cache_key = resource['id']
|
||||
attribute_hash = self._check_resource_cache(cache_key, resource)
|
||||
hit = False
|
||||
if attribute_hash:
|
||||
with self._gnocchi_resource_lock[cache_key]:
|
||||
# NOTE(luogangyi): there is a possibility that the
|
||||
# resource was already built in cache by another
|
||||
# ceilometer-collector when we get the lock here.
|
||||
attribute_hash = self._check_resource_cache(cache_key,
|
||||
resource)
|
||||
if attribute_hash:
|
||||
method(resource_type, resource, *args, **kwargs)
|
||||
self.cache.set(cache_key, attribute_hash)
|
||||
else:
|
||||
hit = True
|
||||
LOG.debug('resource cache recheck hit for '
|
||||
'%s %s', operation, cache_key)
|
||||
self._gnocchi_resource_lock.pop(cache_key, None)
|
||||
else:
|
||||
hit = True
|
||||
LOG.debug('Resource cache hit for %s %s', operation, cache_key)
|
||||
if hit and operation == "create":
|
||||
raise gnocchi_exc.ResourceAlreadyExists()
|
||||
else:
|
||||
method(resource_type, resource, *args, **kwargs)
|
||||
|
||||
def _check_resource_cache(self, key, resource_data):
|
||||
cached_hash = self.cache.get(key)
|
||||
attribute_hash = hash(frozenset(filter(lambda x: x[0] != "metrics",
|
||||
resource_data.items())))
|
||||
if not cached_hash or cached_hash != attribute_hash:
|
||||
return attribute_hash
|
||||
else:
|
||||
return None
|
|
@ -1,118 +0,0 @@
|
|||
# Copyright 2013 IBM Corp
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import requests
|
||||
|
||||
from ceilometer import dispatcher
|
||||
from ceilometer.i18n import _LE
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
http_dispatcher_opts = [
|
||||
cfg.StrOpt('target',
|
||||
default='',
|
||||
help='The target where the http request will be sent. '
|
||||
'If this is not set, no data will be posted. For '
|
||||
'example: target = http://hostname:1234/path'),
|
||||
cfg.StrOpt('event_target',
|
||||
help='The target for event data where the http request '
|
||||
'will be sent to. If this is not set, it will default '
|
||||
'to same as Sample target.'),
|
||||
cfg.IntOpt('timeout',
|
||||
default=5,
|
||||
help='The max time in seconds to wait for a request to '
|
||||
'timeout.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(http_dispatcher_opts, group="dispatcher_http")
|
||||
|
||||
|
||||
class HttpDispatcher(dispatcher.MeterDispatcherBase,
|
||||
dispatcher.EventDispatcherBase):
|
||||
"""Dispatcher class for posting metering/event data into a http target.
|
||||
|
||||
To enable this dispatcher, the following option needs to be present in
|
||||
ceilometer.conf file::
|
||||
|
||||
[DEFAULT]
|
||||
meter_dispatchers = http
|
||||
event_dispatchers = http
|
||||
|
||||
Dispatcher specific options can be added as follows::
|
||||
|
||||
[dispatcher_http]
|
||||
target = www.example.com
|
||||
event_target = www.example.com
|
||||
timeout = 2
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(HttpDispatcher, self).__init__(conf)
|
||||
self.headers = {'Content-type': 'application/json'}
|
||||
self.timeout = self.conf.dispatcher_http.timeout
|
||||
self.target = self.conf.dispatcher_http.target
|
||||
self.event_target = (self.conf.dispatcher_http.event_target or
|
||||
self.target)
|
||||
|
||||
def record_metering_data(self, data):
|
||||
if self.target == '':
|
||||
# if the target was not set, do not do anything
|
||||
LOG.error(_LE('Dispatcher target was not set, no meter will '
|
||||
'be posted. Set the target in the ceilometer.conf '
|
||||
'file.'))
|
||||
return
|
||||
|
||||
# We may have receive only one counter on the wire
|
||||
if not isinstance(data, list):
|
||||
data = [data]
|
||||
|
||||
for meter in data:
|
||||
LOG.debug(
|
||||
'metering data %(counter_name)s '
|
||||
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s',
|
||||
{'counter_name': meter['counter_name'],
|
||||
'resource_id': meter['resource_id'],
|
||||
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
|
||||
'counter_volume': meter['counter_volume']})
|
||||
try:
|
||||
# Every meter should be posted to the target
|
||||
res = requests.post(self.target,
|
||||
data=json.dumps(meter),
|
||||
headers=self.headers,
|
||||
timeout=self.timeout)
|
||||
LOG.debug('Message posting finished with status code '
|
||||
'%d.', res.status_code)
|
||||
except Exception as err:
|
||||
LOG.exception(_LE('Failed to record metering data: %s.'), err)
|
||||
|
||||
def record_events(self, events):
|
||||
if not isinstance(events, list):
|
||||
events = [events]
|
||||
|
||||
for event in events:
|
||||
res = None
|
||||
try:
|
||||
res = requests.post(self.event_target, data=event,
|
||||
headers=self.headers,
|
||||
timeout=self.timeout)
|
||||
res.raise_for_status()
|
||||
except Exception:
|
||||
error_code = res.status_code if res else 'unknown'
|
||||
LOG.exception(_LE('Status Code: %{code}s. Failed to'
|
||||
'dispatch event: %{event}s'),
|
||||
{'code': error_code, 'event': event})
|
|
@ -1,124 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from keystoneauth1 import exceptions
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import requests
|
||||
import six
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer import keystone_client
|
||||
from ceilometer import sample
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
SERVICE_OPTS = [
|
||||
cfg.StrOpt('kwapi',
|
||||
default='energy',
|
||||
help='Kwapi service type.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(SERVICE_OPTS, group='service_types')
|
||||
|
||||
|
||||
class KwapiClient(object):
|
||||
"""Kwapi API client."""
|
||||
|
||||
def __init__(self, url, token=None):
|
||||
"""Initializes client."""
|
||||
self.url = url
|
||||
self.token = token
|
||||
|
||||
def iter_probes(self):
|
||||
"""Returns a list of dicts describing all probes."""
|
||||
probes_url = self.url + '/probes/'
|
||||
headers = {}
|
||||
if self.token is not None:
|
||||
headers = {'X-Auth-Token': self.token}
|
||||
timeout = cfg.CONF.http_timeout
|
||||
request = requests.get(probes_url, headers=headers, timeout=timeout)
|
||||
message = request.json()
|
||||
probes = message['probes']
|
||||
for key, value in six.iteritems(probes):
|
||||
probe_dict = value
|
||||
probe_dict['id'] = key
|
||||
yield probe_dict
|
||||
|
||||
|
||||
class _Base(plugin_base.PollsterBase):
|
||||
"""Base class for the Kwapi pollster, derived from PollsterBase."""
|
||||
|
||||
@property
|
||||
def default_discovery(self):
|
||||
return 'endpoint:%s' % cfg.CONF.service_types.kwapi
|
||||
|
||||
@staticmethod
|
||||
def get_kwapi_client(ksclient, endpoint):
|
||||
"""Returns a KwapiClient configured with the proper url and token."""
|
||||
return KwapiClient(endpoint, keystone_client.get_auth_token(ksclient))
|
||||
|
||||
CACHE_KEY_PROBE = 'kwapi.probes'
|
||||
|
||||
def _iter_probes(self, ksclient, cache, endpoint):
|
||||
"""Iterate over all probes."""
|
||||
key = '%s-%s' % (endpoint, self.CACHE_KEY_PROBE)
|
||||
if key not in cache:
|
||||
cache[key] = self._get_probes(ksclient, endpoint)
|
||||
return iter(cache[key])
|
||||
|
||||
def _get_probes(self, ksclient, endpoint):
|
||||
try:
|
||||
client = self.get_kwapi_client(ksclient, endpoint)
|
||||
except exceptions.EndpointNotFound:
|
||||
LOG.debug("Kwapi endpoint not found")
|
||||
return []
|
||||
return list(client.iter_probes())
|
||||
|
||||
|
||||
class EnergyPollster(_Base):
|
||||
"""Measures energy consumption."""
|
||||
def get_samples(self, manager, cache, resources):
|
||||
"""Returns all samples."""
|
||||
for endpoint in resources:
|
||||
for probe in self._iter_probes(manager.keystone, cache, endpoint):
|
||||
yield sample.Sample(
|
||||
name='energy',
|
||||
type=sample.TYPE_CUMULATIVE,
|
||||
unit='kWh',
|
||||
volume=probe['kwh'],
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=probe['id'],
|
||||
resource_metadata={}
|
||||
)
|
||||
|
||||
|
||||
class PowerPollster(_Base):
|
||||
"""Measures power consumption."""
|
||||
def get_samples(self, manager, cache, resources):
|
||||
"""Returns all samples."""
|
||||
for endpoint in resources:
|
||||
for probe in self._iter_probes(manager.keystone, cache, endpoint):
|
||||
yield sample.Sample(
|
||||
name='power',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='W',
|
||||
volume=probe['w'],
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=probe['id'],
|
||||
resource_metadata={}
|
||||
)
|
|
@ -1,294 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 Rackspace Hosting.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import fnmatch
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
|
||||
from ceilometer import declarative
|
||||
from ceilometer.event.storage import models
|
||||
from ceilometer.i18n import _
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('definitions_cfg_file',
|
||||
default="event_definitions.yaml",
|
||||
help="Configuration file for event definitions."
|
||||
),
|
||||
cfg.BoolOpt('drop_unmatched_notifications',
|
||||
default=False,
|
||||
help='Drop notifications if no event definition matches. '
|
||||
'(Otherwise, we convert them with just the default traits)'),
|
||||
cfg.MultiStrOpt('store_raw',
|
||||
default=[],
|
||||
help='Store the raw notification for select priority '
|
||||
'levels (info and/or error). By default, raw details are '
|
||||
'not captured.')
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS, group='event')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class TraitDefinition(declarative.Definition):
|
||||
def __init__(self, name, trait_cfg, plugin_manager):
|
||||
super(TraitDefinition, self).__init__(name, trait_cfg, plugin_manager)
|
||||
type_name = (trait_cfg.get('type', 'text')
|
||||
if isinstance(trait_cfg, dict) else 'text')
|
||||
self.trait_type = models.Trait.get_type_by_name(type_name)
|
||||
if self.trait_type is None:
|
||||
raise declarative.EventDefinitionException(
|
||||
_("Invalid trait type '%(type)s' for trait %(trait)s")
|
||||
% dict(type=type_name, trait=name), self.cfg)
|
||||
|
||||
def to_trait(self, notification_body):
|
||||
value = self.parse(notification_body)
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
# NOTE(mdragon): some openstack projects (mostly Nova) emit ''
|
||||
# for null fields for things like dates.
|
||||
if self.trait_type != models.Trait.TEXT_TYPE and value == '':
|
||||
return None
|
||||
|
||||
value = models.Trait.convert_value(self.trait_type, value)
|
||||
return models.Trait(self.name, self.trait_type, value)
|
||||
|
||||
|
||||
class EventDefinition(object):
|
||||
|
||||
DEFAULT_TRAITS = dict(
|
||||
service=dict(type='text', fields='publisher_id'),
|
||||
request_id=dict(type='text', fields='_context_request_id'),
|
||||
project_id=dict(type='text', fields=['payload.tenant_id',
|
||||
'_context_tenant']),
|
||||
user_id=dict(type='text', fields=['payload.user_id',
|
||||
'_context_user_id']),
|
||||
# TODO(dikonoor):tenant_id is old terminology and should
|
||||
# be deprecated
|
||||
tenant_id=dict(type='text', fields=['payload.tenant_id',
|
||||
'_context_tenant']),
|
||||
)
|
||||
|
||||
def __init__(self, definition_cfg, trait_plugin_mgr):
|
||||
self._included_types = []
|
||||
self._excluded_types = []
|
||||
self.traits = dict()
|
||||
self.cfg = definition_cfg
|
||||
self.raw_levels = [level.lower() for level in cfg.CONF.event.store_raw]
|
||||
|
||||
try:
|
||||
event_type = definition_cfg['event_type']
|
||||
traits = definition_cfg['traits']
|
||||
except KeyError as err:
|
||||
raise declarative.EventDefinitionException(
|
||||
_("Required field %s not specified") % err.args[0], self.cfg)
|
||||
|
||||
if isinstance(event_type, six.string_types):
|
||||
event_type = [event_type]
|
||||
|
||||
for t in event_type:
|
||||
if t.startswith('!'):
|
||||
self._excluded_types.append(t[1:])
|
||||
else:
|
||||
self._included_types.append(t)
|
||||
|
||||
if self._excluded_types and not self._included_types:
|
||||
self._included_types.append('*')
|
||||
|
||||
for trait_name in self.DEFAULT_TRAITS:
|
||||
self.traits[trait_name] = TraitDefinition(
|
||||
trait_name,
|
||||
self.DEFAULT_TRAITS[trait_name],
|
||||
trait_plugin_mgr)
|
||||
for trait_name in traits:
|
||||
self.traits[trait_name] = TraitDefinition(
|
||||
trait_name,
|
||||
traits[trait_name],
|
||||
trait_plugin_mgr)
|
||||
|
||||
def included_type(self, event_type):
|
||||
for t in self._included_types:
|
||||
if fnmatch.fnmatch(event_type, t):
|
||||
return True
|
||||
return False
|
||||
|
||||
def excluded_type(self, event_type):
|
||||
for t in self._excluded_types:
|
||||
if fnmatch.fnmatch(event_type, t):
|
||||
return True
|
||||
return False
|
||||
|
||||
def match_type(self, event_type):
|
||||
return (self.included_type(event_type)
|
||||
and not self.excluded_type(event_type))
|
||||
|
||||
@property
|
||||
def is_catchall(self):
|
||||
return '*' in self._included_types and not self._excluded_types
|
||||
|
||||
@staticmethod
|
||||
def _extract_when(body):
|
||||
"""Extract the generated datetime from the notification."""
|
||||
# NOTE: I am keeping the logic the same as it was in the collector,
|
||||
# However, *ALL* notifications should have a 'timestamp' field, it's
|
||||
# part of the notification envelope spec. If this was put here because
|
||||
# some openstack project is generating notifications without a
|
||||
# timestamp, then that needs to be filed as a bug with the offending
|
||||
# project (mdragon)
|
||||
when = body.get('timestamp', body.get('_context_timestamp'))
|
||||
if when:
|
||||
return timeutils.normalize_time(timeutils.parse_isotime(when))
|
||||
|
||||
return timeutils.utcnow()
|
||||
|
||||
def to_event(self, notification_body):
|
||||
event_type = notification_body['event_type']
|
||||
message_id = notification_body['message_id']
|
||||
when = self._extract_when(notification_body)
|
||||
|
||||
traits = (self.traits[t].to_trait(notification_body)
|
||||
for t in self.traits)
|
||||
# Only accept non-None value traits ...
|
||||
traits = [trait for trait in traits if trait is not None]
|
||||
raw = (notification_body
|
||||
if notification_body.get('priority') in self.raw_levels else {})
|
||||
event = models.Event(message_id, event_type, when, traits, raw)
|
||||
return event
|
||||
|
||||
|
||||
class NotificationEventsConverter(object):
|
||||
"""Notification Event Converter
|
||||
|
||||
The NotificationEventsConverter handles the conversion of Notifications
|
||||
from openstack systems into Ceilometer Events.
|
||||
|
||||
The conversion is handled according to event definitions in a config file.
|
||||
|
||||
The config is a list of event definitions. Order is significant, a
|
||||
notification will be processed according to the LAST definition that
|
||||
matches it's event_type. (We use the last matching definition because that
|
||||
allows you to use YAML merge syntax in the definitions file.)
|
||||
Each definition is a dictionary with the following keys (all are
|
||||
required):
|
||||
|
||||
- event_type: this is a list of notification event_types this definition
|
||||
will handle. These can be wildcarded with unix shell glob (not regex!)
|
||||
wildcards.
|
||||
An exclusion listing (starting with a '!') will exclude any types listed
|
||||
from matching. If ONLY exclusions are listed, the definition will match
|
||||
anything not matching the exclusions.
|
||||
This item can also be a string, which will be taken as equivalent to 1
|
||||
item list.
|
||||
|
||||
Examples:
|
||||
|
||||
* ['compute.instance.exists'] will only match
|
||||
compute.instance.exists notifications
|
||||
* "compute.instance.exists" Same as above.
|
||||
* ["image.create", "image.delete"] will match
|
||||
image.create and image.delete, but not anything else.
|
||||
* "compute.instance.*" will match
|
||||
compute.instance.create.start but not image.upload
|
||||
* ['*.start','*.end', '!scheduler.*'] will match
|
||||
compute.instance.create.start, and image.delete.end,
|
||||
but NOT compute.instance.exists or
|
||||
scheduler.run_instance.start
|
||||
* '!image.*' matches any notification except image
|
||||
notifications.
|
||||
* ['*', '!image.*'] same as above.
|
||||
|
||||
- traits: (dict) The keys are trait names, the values are the trait
|
||||
definitions. Each trait definition is a dictionary with the following
|
||||
keys:
|
||||
|
||||
- type (optional): The data type for this trait. (as a string)
|
||||
Valid options are: 'text', 'int', 'float' and 'datetime', defaults to
|
||||
'text' if not specified.
|
||||
- fields: a path specification for the field(s) in the notification you
|
||||
wish to extract. The paths can be specified with a dot syntax
|
||||
(e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is
|
||||
also supported.
|
||||
In either case, if the key for the field you are looking for contains
|
||||
special characters, like '.', it will need to be quoted (with double
|
||||
or single quotes) like so::
|
||||
|
||||
"payload.image_meta.'org.openstack__1__architecture'"
|
||||
|
||||
The syntax used for the field specification is a variant of JSONPath,
|
||||
and is fairly flexible.
|
||||
(see: https://github.com/kennknowles/python-jsonpath-rw for more info)
|
||||
Specifications can be written to match multiple possible fields, the
|
||||
value for the trait will be derived from the matching fields that
|
||||
exist and have a non-null (i.e. is not None) values in the
|
||||
notification.
|
||||
By default the value will be the first such field. (plugins can alter
|
||||
that, if they wish)
|
||||
|
||||
This configuration value is normally a string, for convenience, it can
|
||||
be specified as a list of specifications, which will be OR'ed together
|
||||
(a union query in jsonpath terms)
|
||||
- plugin (optional): (dictionary) with the following keys:
|
||||
|
||||
- name: (string) name of a plugin to load
|
||||
- parameters: (optional) Dictionary of keyword args to pass
|
||||
to the plugin on initialization. See documentation on each plugin to
|
||||
see what arguments it accepts.
|
||||
|
||||
For convenience, this value can also be specified as a string, which is
|
||||
interpreted as a plugin name, which will be loaded with no parameters.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, events_config, trait_plugin_mgr, add_catchall=True):
|
||||
self.definitions = [
|
||||
EventDefinition(event_def, trait_plugin_mgr)
|
||||
for event_def in reversed(events_config)]
|
||||
if add_catchall and not any(d.is_catchall for d in self.definitions):
|
||||
event_def = dict(event_type='*', traits={})
|
||||
self.definitions.append(EventDefinition(event_def,
|
||||
trait_plugin_mgr))
|
||||
|
||||
def to_event(self, notification_body):
|
||||
event_type = notification_body['event_type']
|
||||
message_id = notification_body['message_id']
|
||||
edef = None
|
||||
for d in self.definitions:
|
||||
if d.match_type(event_type):
|
||||
edef = d
|
||||
break
|
||||
|
||||
if edef is None:
|
||||
msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)')
|
||||
% dict(type=event_type, msgid=message_id))
|
||||
if cfg.CONF.event.drop_unmatched_notifications:
|
||||
LOG.debug(msg)
|
||||
else:
|
||||
# If drop_unmatched_notifications is False, this should
|
||||
# never happen. (mdragon)
|
||||
LOG.error(msg)
|
||||
return None
|
||||
|
||||
return edef.to_event(notification_body)
|
||||
|
||||
|
||||
def setup_events(trait_plugin_mgr):
|
||||
"""Setup the event definitions from yaml config file."""
|
||||
return NotificationEventsConverter(
|
||||
declarative.load_definitions([], cfg.CONF.event.definitions_cfg_file),
|
||||
trait_plugin_mgr,
|
||||
add_catchall=not cfg.CONF.event.drop_unmatched_notifications)
|
|
@ -1,67 +0,0 @@
|
|||
# Copyright 2012-2014 eNovance <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import oslo_messaging
|
||||
from stevedore import extension
|
||||
|
||||
from ceilometer.event import converter as event_converter
|
||||
from ceilometer.i18n import _LE
|
||||
from ceilometer import messaging
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class EventsNotificationEndpoint(object):
|
||||
def __init__(self, manager):
|
||||
super(EventsNotificationEndpoint, self).__init__()
|
||||
LOG.debug('Loading event definitions')
|
||||
self.event_converter = event_converter.setup_events(
|
||||
extension.ExtensionManager(
|
||||
namespace='ceilometer.event.trait_plugin'))
|
||||
self.manager = manager
|
||||
|
||||
def info(self, notifications):
|
||||
"""Convert message at info level to Ceilometer Event.
|
||||
|
||||
:param notifications: list of notifications
|
||||
"""
|
||||
return self.process_notification('info', notifications)
|
||||
|
||||
def error(self, notifications):
|
||||
"""Convert message at error level to Ceilometer Event.
|
||||
|
||||
:param notifications: list of notifications
|
||||
"""
|
||||
return self.process_notification('error', notifications)
|
||||
|
||||
def process_notification(self, priority, notifications):
|
||||
for notification in notifications:
|
||||
# NOTE: the rpc layer currently rips out the notification
|
||||
# delivery_info, which is critical to determining the
|
||||
# source of the notification. This will have to get added back
|
||||
# later.
|
||||
notification = messaging.convert_to_old_notification_format(
|
||||
priority, notification)
|
||||
try:
|
||||
event = self.event_converter.to_event(notification)
|
||||
if event is not None:
|
||||
with self.manager.publisher() as p:
|
||||
p(event)
|
||||
except Exception:
|
||||
if not cfg.CONF.notification.ack_on_event_error:
|
||||
return oslo_messaging.NotificationResult.REQUEUE
|
||||
LOG.error(_LE('Fail to process a notification'), exc_info=True)
|
||||
return oslo_messaging.NotificationResult.HANDLED
|
|
@ -18,7 +18,6 @@ import pymongo
|
|||
|
||||
from ceilometer.event.storage import pymongo_base
|
||||
from ceilometer import storage
|
||||
from ceilometer.storage import impl_mongodb
|
||||
from ceilometer.storage.mongo import utils as pymongo_utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
@ -52,6 +51,31 @@ class Connection(pymongo_base.Connection):
|
|||
# needed.
|
||||
self.upgrade()
|
||||
|
||||
@staticmethod
|
||||
def update_ttl(ttl, ttl_index_name, index_field, coll):
|
||||
"""Update or create time_to_live indexes.
|
||||
|
||||
:param ttl: time to live in seconds.
|
||||
:param ttl_index_name: name of the index we want to update or create.
|
||||
:param index_field: field with the index that we need to update.
|
||||
:param coll: collection which indexes need to be updated.
|
||||
"""
|
||||
indexes = coll.index_information()
|
||||
if ttl <= 0:
|
||||
if ttl_index_name in indexes:
|
||||
coll.drop_index(ttl_index_name)
|
||||
return
|
||||
|
||||
if ttl_index_name in indexes:
|
||||
return coll.database.command(
|
||||
'collMod', coll.name,
|
||||
index={'keyPattern': {index_field: pymongo.ASCENDING},
|
||||
'expireAfterSeconds': ttl})
|
||||
|
||||
coll.create_index([(index_field, pymongo.ASCENDING)],
|
||||
expireAfterSeconds=ttl,
|
||||
name=ttl_index_name)
|
||||
|
||||
def upgrade(self):
|
||||
# create collection if not present
|
||||
if 'event' not in self.db.conn.collection_names():
|
||||
|
@ -65,8 +89,7 @@ class Connection(pymongo_base.Connection):
|
|||
name='event_type_idx'
|
||||
)
|
||||
ttl = cfg.CONF.database.event_time_to_live
|
||||
impl_mongodb.Connection.update_ttl(ttl, 'event_ttl', 'timestamp',
|
||||
self.db.event)
|
||||
self.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event)
|
||||
|
||||
def clear(self):
|
||||
self.conn.drop_database(self.db.name)
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
from __future__ import absolute_import
|
||||
import datetime
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as dbexc
|
||||
|
@ -137,12 +136,8 @@ class Connection(base.Connection):
|
|||
self._engine_facade = db_session.EngineFacade(url, **options)
|
||||
|
||||
def upgrade(self):
|
||||
# NOTE(gordc): to minimise memory, only import migration when needed
|
||||
from oslo_db.sqlalchemy import migration
|
||||
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'..', '..', 'storage', 'sqlalchemy',
|
||||
'migrate_repo')
|
||||
migration.db_sync(self._engine_facade.get_engine(), path)
|
||||
engine = self._engine_facade.get_engine()
|
||||
models.Base.metadata.create_all(engine)
|
||||
|
||||
def clear(self):
|
||||
engine = self._engine_facade.get_engine()
|
||||
|
|
|
@ -1,230 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 Rackspace Hosting.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
from debtcollector import moves
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
|
||||
from ceilometer.i18n import _LW
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class TraitPluginBase(object):
|
||||
"""Base class for plugins.
|
||||
|
||||
It converts notification fields to Trait values.
|
||||
"""
|
||||
|
||||
support_return_all_values = False
|
||||
"""If True, an exception will be raised if the user expect
|
||||
the plugin to return one trait per match_list, but
|
||||
the plugin doesn't allow/support that.
|
||||
"""
|
||||
|
||||
def __init__(self, **kw):
|
||||
"""Setup the trait plugin.
|
||||
|
||||
For each Trait definition a plugin is used on in a conversion
|
||||
definition, a new instance of the plugin will be created, and
|
||||
initialized with the parameters (if any) specified in the
|
||||
config file.
|
||||
|
||||
:param kw: the parameters specified in the event definitions file.
|
||||
|
||||
"""
|
||||
super(TraitPluginBase, self).__init__()
|
||||
|
||||
@moves.moved_method('trait_values', version=6.0, removal_version="?")
|
||||
def trait_value(self, match_list):
|
||||
pass
|
||||
|
||||
def trait_values(self, match_list):
|
||||
"""Convert a set of fields to one or multiple Trait values.
|
||||
|
||||
This method is called each time a trait is attempted to be extracted
|
||||
from a notification. It will be called *even if* no matching fields
|
||||
are found in the notification (in that case, the match_list will be
|
||||
empty). If this method returns None, the trait *will not* be added to
|
||||
the event. Any other value returned by this method will be used as
|
||||
the value for the trait. Values returned will be coerced to the
|
||||
appropriate type for the trait.
|
||||
|
||||
:param match_list: A list (may be empty if no matches) of *tuples*.
|
||||
Each tuple is (field_path, value) where field_path is the jsonpath
|
||||
for that specific field.
|
||||
|
||||
Example::
|
||||
|
||||
trait's fields definition: ['payload.foobar',
|
||||
'payload.baz',
|
||||
'payload.thing.*']
|
||||
notification body:
|
||||
{
|
||||
'message_id': '12345',
|
||||
'publisher': 'someservice.host',
|
||||
'payload': {
|
||||
'foobar': 'test',
|
||||
'thing': {
|
||||
'bar': 12,
|
||||
'boing': 13,
|
||||
}
|
||||
}
|
||||
}
|
||||
match_list will be: [('payload.foobar','test'),
|
||||
('payload.thing.bar',12),
|
||||
('payload.thing.boing',13)]
|
||||
|
||||
Here is a plugin that emulates the default (no plugin) behavior:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class DefaultPlugin(TraitPluginBase):
|
||||
"Plugin that returns the first field value."
|
||||
|
||||
def __init__(self, **kw):
|
||||
super(DefaultPlugin, self).__init__()
|
||||
|
||||
def trait_value(self, match_list):
|
||||
if not match_list:
|
||||
return None
|
||||
return [ match[1] for match in match_list]
|
||||
"""
|
||||
|
||||
# For backwards compatibility for the renamed method.
|
||||
return [self.trait_value(match_list)]
|
||||
|
||||
|
||||
class SplitterTraitPlugin(TraitPluginBase):
|
||||
"""Plugin that splits a piece off of a string value."""
|
||||
|
||||
support_return_all_values = True
|
||||
|
||||
def __init__(self, separator=".", segment=0, max_split=None, **kw):
|
||||
"""Setup how do split the field.
|
||||
|
||||
:param separator: String to split on. default "."
|
||||
:param segment: Which segment to return. (int) default 0
|
||||
:param max_split: Limit number of splits. Default: None (no limit)
|
||||
"""
|
||||
LOG.warning(_LW('split plugin is deprecated, '
|
||||
'add ".`split(%(sep)s, %(segment)d, '
|
||||
'%(max_split)d)`" to your jsonpath instead') %
|
||||
dict(sep=separator,
|
||||
segment=segment,
|
||||
max_split=(-1 if max_split is None
|
||||
else max_split)))
|
||||
|
||||
self.separator = separator
|
||||
self.segment = segment
|
||||
self.max_split = max_split
|
||||
super(SplitterTraitPlugin, self).__init__(**kw)
|
||||
|
||||
def trait_values(self, match_list):
|
||||
return [self._trait_value(match)
|
||||
for match in match_list]
|
||||
|
||||
def _trait_value(self, match):
|
||||
value = six.text_type(match[1])
|
||||
if self.max_split is not None:
|
||||
values = value.split(self.separator, self.max_split)
|
||||
else:
|
||||
values = value.split(self.separator)
|
||||
try:
|
||||
return values[self.segment]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
class BitfieldTraitPlugin(TraitPluginBase):
|
||||
"""Plugin to set flags on a bitfield."""
|
||||
def __init__(self, initial_bitfield=0, flags=None, **kw):
|
||||
"""Setup bitfield trait.
|
||||
|
||||
:param initial_bitfield: (int) initial value for the bitfield
|
||||
Flags that are set will be OR'ed with this.
|
||||
:param flags: List of dictionaries defining bitflags to set depending
|
||||
on data in the notification. Each one has the following
|
||||
keys:
|
||||
path: jsonpath of field to match.
|
||||
bit: (int) number of bit to set (lsb is bit 0)
|
||||
value: set bit if corresponding field's value
|
||||
matches this. If value is not provided,
|
||||
bit will be set if the field exists (and
|
||||
is non-null), regardless of its value.
|
||||
|
||||
"""
|
||||
self.initial_bitfield = initial_bitfield
|
||||
if flags is None:
|
||||
flags = []
|
||||
self.flags = flags
|
||||
super(BitfieldTraitPlugin, self).__init__(**kw)
|
||||
|
||||
def trait_values(self, match_list):
|
||||
matches = dict(match_list)
|
||||
bitfield = self.initial_bitfield
|
||||
for flagdef in self.flags:
|
||||
path = flagdef['path']
|
||||
bit = 2 ** int(flagdef['bit'])
|
||||
if path in matches:
|
||||
if 'value' in flagdef:
|
||||
if matches[path] == flagdef['value']:
|
||||
bitfield |= bit
|
||||
else:
|
||||
bitfield |= bit
|
||||
return [bitfield]
|
||||
|
||||
|
||||
class TimedeltaPluginMissedFields(Exception):
|
||||
def __init__(self):
|
||||
msg = ('It is required to use two timestamp field with Timedelta '
|
||||
'plugin.')
|
||||
super(TimedeltaPluginMissedFields, self).__init__(msg)
|
||||
|
||||
|
||||
class TimedeltaPlugin(TraitPluginBase):
|
||||
"""Setup timedelta meter volume of two timestamps fields.
|
||||
|
||||
Example::
|
||||
|
||||
trait's fields definition: ['payload.created_at',
|
||||
'payload.launched_at']
|
||||
value is been created as total seconds between 'launched_at' and
|
||||
'created_at' timestamps.
|
||||
"""
|
||||
# TODO(idegtiarov): refactor code to have meter_plugins separate from
|
||||
# trait_plugins
|
||||
|
||||
def trait_value(self, match_list):
|
||||
if len(match_list) != 2:
|
||||
LOG.warning(_LW('Timedelta plugin is required two timestamp fields'
|
||||
' to create timedelta value.'))
|
||||
return
|
||||
start, end = match_list
|
||||
try:
|
||||
start_time = timeutils.parse_isotime(start[1])
|
||||
end_time = timeutils.parse_isotime(end[1])
|
||||
except Exception as err:
|
||||
LOG.warning(_LW('Failed to parse date from set fields, both '
|
||||
'fields %(start)s and %(end)s must be datetime: '
|
||||
'%(err)s') %
|
||||
dict(start=start[0], end=end[0], err=err)
|
||||
)
|
||||
return
|
||||
return abs((end_time - start_time).total_seconds())
|
|
@ -1,47 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
EXCHANGE_OPTS = [
|
||||
cfg.StrOpt('heat_control_exchange',
|
||||
default='heat',
|
||||
help="Exchange name for Heat notifications"),
|
||||
cfg.StrOpt('glance_control_exchange',
|
||||
default='glance',
|
||||
help="Exchange name for Glance notifications."),
|
||||
cfg.StrOpt('keystone_control_exchange',
|
||||
default='keystone',
|
||||
help="Exchange name for Keystone notifications."),
|
||||
cfg.StrOpt('cinder_control_exchange',
|
||||
default='cinder',
|
||||
help="Exchange name for Cinder notifications."),
|
||||
cfg.StrOpt('sahara_control_exchange',
|
||||
default='sahara',
|
||||
help="Exchange name for Data Processing notifications."),
|
||||
cfg.StrOpt('swift_control_exchange',
|
||||
default='swift',
|
||||
help="Exchange name for Swift notifications."),
|
||||
cfg.StrOpt('magnum_control_exchange',
|
||||
default='magnum',
|
||||
help="Exchange name for Magnum notifications."),
|
||||
cfg.StrOpt('trove_control_exchange',
|
||||
default='trove',
|
||||
help="Exchange name for DBaaS notifications."),
|
||||
cfg.StrOpt('zaqar_control_exchange',
|
||||
default='zaqar',
|
||||
help="Exchange name for Messaging service notifications."),
|
||||
cfg.StrOpt('dns_control_exchange',
|
||||
default='central',
|
||||
help="Exchange name for DNS service notifications."),
|
||||
]
|
|
@ -1,98 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer import nova_client
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('url_scheme',
|
||||
default='snmp://',
|
||||
help='URL scheme to use for hardware nodes.'),
|
||||
cfg.StrOpt('readonly_user_name',
|
||||
default='ro_snmp_user',
|
||||
help='SNMPd user name of all nodes running in the cloud.'),
|
||||
cfg.StrOpt('readonly_user_password',
|
||||
default='password',
|
||||
help='SNMPd password of all the nodes running in the cloud.',
|
||||
secret=True),
|
||||
]
|
||||
cfg.CONF.register_opts(OPTS, group='hardware')
|
||||
|
||||
|
||||
class NodesDiscoveryTripleO(plugin_base.DiscoveryBase):
|
||||
def __init__(self):
|
||||
super(NodesDiscoveryTripleO, self).__init__()
|
||||
self.nova_cli = nova_client.Client()
|
||||
self.last_run = None
|
||||
self.instances = {}
|
||||
|
||||
@staticmethod
|
||||
def _address(instance, field):
|
||||
return instance.addresses['ctlplane'][0].get(field)
|
||||
|
||||
def discover(self, manager, param=None):
|
||||
"""Discover resources to monitor.
|
||||
|
||||
instance_get_all will return all instances if last_run is None,
|
||||
and will return only the instances changed since the last_run time.
|
||||
"""
|
||||
try:
|
||||
instances = self.nova_cli.instance_get_all(self.last_run)
|
||||
except Exception:
|
||||
# NOTE(zqfan): instance_get_all is wrapped and will log exception
|
||||
# when there is any error. It is no need to raise it again and
|
||||
# print one more time.
|
||||
return []
|
||||
|
||||
for instance in instances:
|
||||
if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted',
|
||||
'error']:
|
||||
self.instances.pop(instance.id, None)
|
||||
else:
|
||||
self.instances[instance.id] = instance
|
||||
self.last_run = timeutils.utcnow(True).isoformat()
|
||||
|
||||
resources = []
|
||||
for instance in self.instances.values():
|
||||
try:
|
||||
ip_address = self._address(instance, 'addr')
|
||||
final_address = (
|
||||
cfg.CONF.hardware.url_scheme +
|
||||
cfg.CONF.hardware.readonly_user_name + ':' +
|
||||
cfg.CONF.hardware.readonly_user_password + '@' +
|
||||
ip_address)
|
||||
|
||||
resource = {
|
||||
'resource_id': instance.id,
|
||||
'resource_url': final_address,
|
||||
'mac_addr': self._address(instance,
|
||||
'OS-EXT-IPS-MAC:mac_addr'),
|
||||
'image_id': instance.image['id'],
|
||||
'flavor_id': instance.flavor['id']
|
||||
}
|
||||
|
||||
resources.append(resource)
|
||||
except KeyError:
|
||||
LOG.error(_("Couldn't obtain IP address of "
|
||||
"instance %s") % instance.id)
|
||||
|
||||
return resources
|
|
@ -1,26 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 Intel Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from stevedore import driver
|
||||
|
||||
|
||||
def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'):
|
||||
"""Get inspector driver and load it.
|
||||
|
||||
:param parsed_url: urlparse.SplitResult object for the inspector
|
||||
:param namespace: Namespace to use to look for drivers.
|
||||
"""
|
||||
loaded_driver = driver.DriverManager(namespace, parsed_url.scheme)
|
||||
return loaded_driver.driver()
|
|
@ -1,47 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 ZHAW SoE
|
||||
#
|
||||
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
|
||||
# Toni Zehnder <zehndton@students.zhaw.ch>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Inspector abstraction for read-only access to hardware components"""
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class Inspector(object):
|
||||
@abc.abstractmethod
|
||||
def inspect_generic(self, host, cache, extra_metadata, param):
|
||||
"""A generic inspect function.
|
||||
|
||||
:param host: the target host
|
||||
:param cache: cache passed from the pollster
|
||||
:param extra_metadata: extra dict to be used as metadata
|
||||
:param param: a dict of inspector specific param
|
||||
:return: an iterator of (value, metadata, extra)
|
||||
:return value: the sample value
|
||||
:return metadata: dict to construct sample's metadata
|
||||
:return extra: dict of extra metadata to help constructing sample
|
||||
"""
|
||||
|
||||
def prepare_params(self, param):
|
||||
"""Parse the params to a format which the inspector itself recognizes.
|
||||
|
||||
:param param: inspector params from meter definition file
|
||||
:return: a dict of param which the inspector recognized
|
||||
"""
|
||||
return {}
|
|
@ -1,313 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 ZHAW SoE
|
||||
# Copyright 2014 Intel Corp
|
||||
#
|
||||
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
|
||||
# Toni Zehnder <zehndton@students.zhaw.ch>
|
||||
# Lianhao Lu <lianhao.lu@intel.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Inspector for collecting data over SNMP"""
|
||||
|
||||
import copy
|
||||
from pysnmp.entity.rfc3413.oneliner import cmdgen
|
||||
|
||||
import six
|
||||
|
||||
from ceilometer.hardware.inspector import base
|
||||
|
||||
|
||||
class SNMPException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def parse_snmp_return(ret, is_bulk=False):
|
||||
"""Check the return value of snmp operations
|
||||
|
||||
:param ret: a tuple of (errorIndication, errorStatus, errorIndex, data)
|
||||
returned by pysnmp
|
||||
:param is_bulk: True if the ret value is from GetBulkRequest
|
||||
:return: a tuple of (err, data)
|
||||
err: True if error found, or False if no error found
|
||||
data: a string of error description if error found, or the
|
||||
actual return data of the snmp operation
|
||||
"""
|
||||
err = True
|
||||
(errIndication, errStatus, errIdx, varBinds) = ret
|
||||
if errIndication:
|
||||
data = errIndication
|
||||
elif errStatus:
|
||||
if is_bulk:
|
||||
varBinds = varBinds[-1]
|
||||
data = "%s at %s" % (errStatus.prettyPrint(),
|
||||
errIdx and varBinds[int(errIdx) - 1] or "?")
|
||||
else:
|
||||
err = False
|
||||
data = varBinds
|
||||
return err, data
|
||||
|
||||
|
||||
EXACT = 'type_exact'
|
||||
PREFIX = 'type_prefix'
|
||||
|
||||
|
||||
class SNMPInspector(base.Inspector):
|
||||
# Default port
|
||||
_port = 161
|
||||
|
||||
_CACHE_KEY_OID = "snmp_cached_oid"
|
||||
|
||||
# NOTE: The following mapping has been moved to the yaml file identified
|
||||
# by the config options hardware.meter_definitions_file. However, we still
|
||||
# keep the description here for code reading purpose.
|
||||
|
||||
"""
|
||||
|
||||
The following mapping define how to construct
|
||||
(value, metadata, extra) returned by inspect_generic
|
||||
MAPPING = {
|
||||
'identifier: {
|
||||
'matching_type': EXACT or PREFIX,
|
||||
'metric_oid': (oid, value_converter)
|
||||
'metadata': {
|
||||
metadata_name1: (oid1, value_converter),
|
||||
metadata_name2: (oid2, value_converter),
|
||||
},
|
||||
'post_op': special func to modify the return data,
|
||||
},
|
||||
}
|
||||
|
||||
For matching_type of EXACT, each item in the above mapping will
|
||||
return exact one (value, metadata, extra) tuple. The value would be
|
||||
returned from SNMP request GetRequest for oid of 'metric_oid', the
|
||||
metadata dict would be constructed based on the returning from SNMP
|
||||
GetRequest for oids of 'metadata'.
|
||||
|
||||
For matching_type of PREFIX, SNMP request GetBulkRequest
|
||||
would be sent to get values for oids of 'metric_oid' and
|
||||
'metadata' of each item in the above mapping. And each item might
|
||||
return multiple (value, metadata, extra) tuples, e.g.
|
||||
Suppose we have the following mapping:
|
||||
MAPPING = {
|
||||
'disk.size.total': {
|
||||
'matching_type': PREFIX,
|
||||
'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int)
|
||||
'metadata': {
|
||||
'device': ("1.3.6.1.4.1.2021.9.1.3", str),
|
||||
'path': ("1.3.6.1.4.1.2021.9.1.2", str),
|
||||
},
|
||||
'post_op': None,
|
||||
},
|
||||
and the SNMP have the following oid/value(s):
|
||||
{
|
||||
'1.3.6.1.4.1.2021.9.1.6.1': 19222656,
|
||||
'1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2",
|
||||
'1.3.6.1.4.1.2021.9.1.2.1': "/"
|
||||
'1.3.6.1.4.1.2021.9.1.6.2': 808112,
|
||||
'1.3.6.1.4.1.2021.9.1.3.2': "tmpfs",
|
||||
'1.3.6.1.4.1.2021.9.1.2.2': "/run",
|
||||
}
|
||||
So here we'll return 2 instances of (value, metadata, extra):
|
||||
(19222656, {'device': "/dev/sda2", 'path': "/"}, None)
|
||||
(808112, {'device': "tmpfs", 'path': "/run"}, None)
|
||||
|
||||
The post_op is assumed to be implemented by new metric developer. It
|
||||
could be used to add additional special metadata(e.g. ip address), or
|
||||
it could be used to add information into extra dict to be returned
|
||||
to construct the pollster how to build final sample, e.g.
|
||||
extra.update('project_id': xy, 'user_id': zw)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(SNMPInspector, self).__init__()
|
||||
self._cmdGen = cmdgen.CommandGenerator()
|
||||
|
||||
def _query_oids(self, host, oids, cache, is_bulk):
|
||||
# send GetRequest or GetBulkRequest to get oids values and
|
||||
# populate the values into cache
|
||||
authData = self._get_auth_strategy(host)
|
||||
transport = cmdgen.UdpTransportTarget((host.hostname,
|
||||
host.port or self._port))
|
||||
oid_cache = cache.setdefault(self._CACHE_KEY_OID, {})
|
||||
|
||||
if is_bulk:
|
||||
ret = self._cmdGen.bulkCmd(authData,
|
||||
transport,
|
||||
0, 100,
|
||||
*oids,
|
||||
lookupValues=True)
|
||||
else:
|
||||
ret = self._cmdGen.getCmd(authData,
|
||||
transport,
|
||||
*oids,
|
||||
lookupValues=True)
|
||||
(error, data) = parse_snmp_return(ret, is_bulk)
|
||||
if error:
|
||||
raise SNMPException("An error occurred, oids %(oid)s, "
|
||||
"host %(host)s, %(err)s" %
|
||||
dict(oid=oids,
|
||||
host=host.hostname,
|
||||
err=data))
|
||||
# save result into cache
|
||||
if is_bulk:
|
||||
for var_bind_table_row in data:
|
||||
for name, val in var_bind_table_row:
|
||||
oid_cache[str(name)] = val
|
||||
else:
|
||||
for name, val in data:
|
||||
oid_cache[str(name)] = val
|
||||
|
||||
@staticmethod
|
||||
def find_matching_oids(oid_cache, oid, match_type, find_one=True):
|
||||
matched = []
|
||||
if match_type == PREFIX:
|
||||
for key in oid_cache.keys():
|
||||
if key.startswith(oid):
|
||||
matched.append(key)
|
||||
if find_one:
|
||||
break
|
||||
else:
|
||||
if oid in oid_cache:
|
||||
matched.append(oid)
|
||||
return matched
|
||||
|
||||
@staticmethod
|
||||
def get_oid_value(oid_cache, oid_def, suffix=''):
|
||||
oid, converter = oid_def
|
||||
value = oid_cache[oid + suffix]
|
||||
if converter:
|
||||
value = converter(value)
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def construct_metadata(cls, oid_cache, meta_defs, suffix=''):
|
||||
metadata = {}
|
||||
for key, oid_def in six.iteritems(meta_defs):
|
||||
metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix)
|
||||
return metadata
|
||||
|
||||
@classmethod
|
||||
def _find_missing_oids(cls, meter_def, cache):
|
||||
# find oids have not been queried and cached
|
||||
new_oids = []
|
||||
oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {})
|
||||
# check metric_oid
|
||||
if not cls.find_matching_oids(oid_cache,
|
||||
meter_def['metric_oid'][0],
|
||||
meter_def['matching_type']):
|
||||
new_oids.append(meter_def['metric_oid'][0])
|
||||
for metadata in meter_def['metadata'].values():
|
||||
if not cls.find_matching_oids(oid_cache,
|
||||
metadata[0],
|
||||
meter_def['matching_type']):
|
||||
new_oids.append(metadata[0])
|
||||
return new_oids
|
||||
|
||||
def inspect_generic(self, host, cache, extra_metadata, param):
|
||||
# the snmp definition for the corresponding meter
|
||||
meter_def = param
|
||||
# collect oids that needs to be queried
|
||||
oids_to_query = self._find_missing_oids(meter_def, cache)
|
||||
# query oids and populate into caches
|
||||
if oids_to_query:
|
||||
self._query_oids(host, oids_to_query, cache,
|
||||
meter_def['matching_type'] == PREFIX)
|
||||
# construct (value, metadata, extra)
|
||||
oid_cache = cache[self._CACHE_KEY_OID]
|
||||
# find all oids which needed to construct final sample values
|
||||
# for matching type of EXACT, only 1 sample would be generated
|
||||
# for matching type of PREFIX, multiple samples could be generated
|
||||
oids_for_sample_values = self.find_matching_oids(
|
||||
oid_cache,
|
||||
meter_def['metric_oid'][0],
|
||||
meter_def['matching_type'],
|
||||
False)
|
||||
input_extra_metadata = extra_metadata
|
||||
|
||||
for oid in oids_for_sample_values:
|
||||
suffix = oid[len(meter_def['metric_oid'][0]):]
|
||||
value = self.get_oid_value(oid_cache,
|
||||
meter_def['metric_oid'],
|
||||
suffix)
|
||||
# get the metadata for this sample value
|
||||
metadata = self.construct_metadata(oid_cache,
|
||||
meter_def['metadata'],
|
||||
suffix)
|
||||
extra_metadata = copy.deepcopy(input_extra_metadata) or {}
|
||||
# call post_op for special cases
|
||||
if meter_def['post_op']:
|
||||
func = getattr(self, meter_def['post_op'], None)
|
||||
if func:
|
||||
value = func(host, cache, meter_def,
|
||||
value, metadata, extra_metadata,
|
||||
suffix)
|
||||
yield (value, metadata, extra_metadata)
|
||||
|
||||
def _post_op_memory_avail_to_used(self, host, cache, meter_def,
|
||||
value, metadata, extra, suffix):
|
||||
_memory_total_oid = "1.3.6.1.4.1.2021.4.5.0"
|
||||
if _memory_total_oid not in cache[self._CACHE_KEY_OID]:
|
||||
self._query_oids(host, [_memory_total_oid], cache, False)
|
||||
value = int(cache[self._CACHE_KEY_OID][_memory_total_oid]) - value
|
||||
return value
|
||||
|
||||
def _post_op_net(self, host, cache, meter_def,
|
||||
value, metadata, extra, suffix):
|
||||
# add ip address into metadata
|
||||
_interface_ip_oid = "1.3.6.1.2.1.4.20.1.2"
|
||||
oid_cache = cache.setdefault(self._CACHE_KEY_OID, {})
|
||||
if not self.find_matching_oids(oid_cache,
|
||||
_interface_ip_oid,
|
||||
PREFIX):
|
||||
# populate the oid into cache
|
||||
self._query_oids(host, [_interface_ip_oid], cache, True)
|
||||
ip_addr = ''
|
||||
for k, v in six.iteritems(oid_cache):
|
||||
if k.startswith(_interface_ip_oid) and v == int(suffix[1:]):
|
||||
ip_addr = k.replace(_interface_ip_oid + ".", "")
|
||||
metadata.update(ip=ip_addr)
|
||||
# update resource_id for each nic interface
|
||||
self._suffix_resource_id(host, metadata, 'name', extra)
|
||||
return value
|
||||
|
||||
def _post_op_disk(self, host, cache, meter_def,
|
||||
value, metadata, extra, suffix):
|
||||
self._suffix_resource_id(host, metadata, 'device', extra)
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def _suffix_resource_id(host, metadata, key, extra):
|
||||
prefix = metadata.get(key)
|
||||
if prefix:
|
||||
res_id = extra.get('resource_id') or host.hostname
|
||||
res_id = res_id + ".%s" % metadata.get(key)
|
||||
extra.update(resource_id=res_id)
|
||||
|
||||
@staticmethod
|
||||
def _get_auth_strategy(host):
|
||||
if host.password:
|
||||
auth_strategy = cmdgen.UsmUserData(host.username,
|
||||
authKey=host.password)
|
||||
else:
|
||||
auth_strategy = cmdgen.CommunityData(host.username or 'public')
|
||||
return auth_strategy
|
||||
|
||||
def prepare_params(self, param):
|
||||
processed = {}
|
||||
processed['matching_type'] = param['matching_type']
|
||||
processed['metric_oid'] = (param['oid'], eval(param['type']))
|
||||
processed['post_op'] = param.get('post_op', None)
|
||||
processed['metadata'] = {}
|
||||
for k, v in six.iteritems(param.get('metadata', {})):
|
||||
processed['metadata'][k] = (v['oid'], eval(v['type']))
|
||||
return processed
|
|
@ -1,189 +0,0 @@
|
|||
---
|
||||
|
||||
metric:
|
||||
# cpu
|
||||
- name: hardware.cpu.load.1min
|
||||
unit: process
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.10.1.3.1"
|
||||
type: "lambda x: float(str(x))"
|
||||
|
||||
- name: hardware.cpu.load.5min
|
||||
unit: process
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.10.1.3.2"
|
||||
type: "lambda x: float(str(x))"
|
||||
|
||||
- name: hardware.cpu.load.15min
|
||||
unit: process
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.10.1.3.3"
|
||||
type: "lambda x: float(str(x))"
|
||||
|
||||
- name: hardware.cpu.util
|
||||
unit: "%"
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.11.9.0"
|
||||
type: "int"
|
||||
# disk
|
||||
- name: hardware.disk.size.total
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_prefix"
|
||||
oid: "1.3.6.1.4.1.2021.9.1.6"
|
||||
type: "int"
|
||||
metadata: &disk_metadata
|
||||
path:
|
||||
oid: "1.3.6.1.4.1.2021.9.1.2"
|
||||
type: "str"
|
||||
device:
|
||||
oid: "1.3.6.1.4.1.2021.9.1.3"
|
||||
type: "str"
|
||||
post_op: "_post_op_disk"
|
||||
|
||||
- name: hardware.disk.size.used
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_prefix"
|
||||
oid: "1.3.6.1.4.1.2021.9.1.8"
|
||||
type: "int"
|
||||
metadata: *disk_metadata
|
||||
post_op: "_post_op_disk"
|
||||
# memory
|
||||
- name: hardware.memory.total
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.4.5.0"
|
||||
type: "int"
|
||||
|
||||
- name: hardware.memory.used
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.4.6.0"
|
||||
type: "int"
|
||||
post_op: "_post_op_memory_avail_to_used"
|
||||
|
||||
- name: hardware.memory.swap.total
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.4.3.0"
|
||||
type: "int"
|
||||
|
||||
- name: hardware.memory.swap.avail
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.4.4.0"
|
||||
type: "int"
|
||||
|
||||
- name: hardware.memory.buffer
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.4.14.0"
|
||||
type: "int"
|
||||
|
||||
- name: hardware.memory.cached
|
||||
unit: KB
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.4.15.0"
|
||||
type: "int"
|
||||
# network interface
|
||||
- name: hardware.network.incoming.bytes
|
||||
unit: B
|
||||
type: cumulative
|
||||
snmp_inspector:
|
||||
matching_type: "type_prefix"
|
||||
oid: "1.3.6.1.2.1.2.2.1.10"
|
||||
type: "int"
|
||||
metadata: &net_metadata
|
||||
name:
|
||||
oid: "1.3.6.1.2.1.2.2.1.2"
|
||||
type: "str"
|
||||
speed:
|
||||
oid: "1.3.6.1.2.1.2.2.1.5"
|
||||
type: "lambda x: int(x) / 8"
|
||||
mac:
|
||||
oid: "1.3.6.1.2.1.2.2.1.6"
|
||||
type: "lambda x: x.prettyPrint().replace('0x', '')"
|
||||
post_op: "_post_op_net"
|
||||
|
||||
- name: hardware.network.outgoing.bytes
|
||||
unit: B
|
||||
type: cumulative
|
||||
snmp_inspector:
|
||||
matching_type: "type_prefix"
|
||||
oid: "1.3.6.1.2.1.2.2.1.16"
|
||||
type: "int"
|
||||
metadata: *net_metadata
|
||||
post_op: "_post_op_net"
|
||||
|
||||
- name: hardware.network.outgoing.errors
|
||||
unit: packet
|
||||
type: cumulative
|
||||
snmp_inspector:
|
||||
matching_type: "type_prefix"
|
||||
oid: "1.3.6.1.2.1.2.2.1.20"
|
||||
type: "int"
|
||||
metadata: *net_metadata
|
||||
post_op: "_post_op_net"
|
||||
#network aggregate
|
||||
- name: hardware.network.ip.outgoing.datagrams
|
||||
unit: datagrams
|
||||
type: cumulative
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.2.1.4.10.0"
|
||||
type: "int"
|
||||
|
||||
- name: hardware.network.ip.incoming.datagrams
|
||||
unit: datagrams
|
||||
type: cumulative
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.2.1.4.3.0"
|
||||
type: "int"
|
||||
#system stats
|
||||
- name: hardware.system_stats.cpu.idle
|
||||
unit: "%"
|
||||
type: gauge
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.11.11.0"
|
||||
type: "int"
|
||||
|
||||
- name: hardware.system_stats.io.outgoing.blocks
|
||||
unit: blocks
|
||||
type: cumulative
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.11.57.0"
|
||||
type: "int"
|
||||
|
||||
- name: hardware.system_stats.io.incoming.blocks
|
||||
unit: blocks
|
||||
type: cumulative
|
||||
snmp_inspector:
|
||||
matching_type: "type_exact"
|
||||
oid: "1.3.6.1.4.1.2021.11.58.0"
|
||||
type: "int"
|
|
@ -1,218 +0,0 @@
|
|||
#
|
||||
# Copyright 2015 Intel Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import itertools
|
||||
import pkg_resources
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import netutils
|
||||
import six
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer import declarative
|
||||
from ceilometer.hardware import inspector as insloader
|
||||
from ceilometer.hardware.pollsters import util
|
||||
from ceilometer.i18n import _LE, _LW
|
||||
from ceilometer import sample
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('meter_definitions_file',
|
||||
default="snmp.yaml",
|
||||
help="Configuration file for defining hardware snmp meters."
|
||||
),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS, group='hardware')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class MeterDefinition(object):
|
||||
required_fields = ['name', 'unit', 'type']
|
||||
|
||||
def __init__(self, definition_cfg):
|
||||
self.cfg = definition_cfg
|
||||
for fname, fval in self.cfg.items():
|
||||
if (isinstance(fname, six.string_types) and
|
||||
(fname in self.required_fields or
|
||||
fname.endswith('_inspector'))):
|
||||
setattr(self, fname, fval)
|
||||
else:
|
||||
LOG.warning(_LW("Ignore unrecognized field %s"), fname)
|
||||
for fname in self.required_fields:
|
||||
if not getattr(self, fname, None):
|
||||
raise declarative.MeterDefinitionException(
|
||||
_LE("Missing field %s") % fname, self.cfg)
|
||||
if self.type not in sample.TYPES:
|
||||
raise declarative.MeterDefinitionException(
|
||||
_LE("Unrecognized type value %s") % self.type, self.cfg)
|
||||
|
||||
|
||||
class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
|
||||
CACHE_KEY = 'hardware.generic'
|
||||
mapping = None
|
||||
|
||||
def __init__(self):
|
||||
super(GenericHardwareDeclarativePollster, self).__init__()
|
||||
self.inspectors = {}
|
||||
|
||||
def _update_meter_definition(self, definition):
|
||||
self.meter_definition = definition
|
||||
self.cached_inspector_params = {}
|
||||
|
||||
@property
|
||||
def default_discovery(self):
|
||||
return 'tripleo_overcloud_nodes'
|
||||
|
||||
@staticmethod
|
||||
def _parse_resource(res):
|
||||
"""Parse resource from discovery.
|
||||
|
||||
Either URL can be given or dict. Dict has to contain at least
|
||||
keys 'resource_id' and 'resource_url', all the dict keys will be stored
|
||||
as metadata.
|
||||
|
||||
:param res: URL or dict containing all resource info.
|
||||
:return parsed_url, resource_id, metadata: Returns parsed URL used for
|
||||
SNMP query, unique identifier of the resource and metadata
|
||||
of the resource.
|
||||
"""
|
||||
parsed_url, resource_id, metadata = (None, None, None)
|
||||
if isinstance(res, dict):
|
||||
if 'resource_url' not in res or 'resource_id' not in res:
|
||||
LOG.error(_LE('Passed resource dict must contain keys '
|
||||
'resource_id and resource_url.'))
|
||||
else:
|
||||
metadata = res
|
||||
parsed_url = netutils.urlsplit(res['resource_url'])
|
||||
resource_id = res['resource_id']
|
||||
else:
|
||||
metadata = {}
|
||||
parsed_url = netutils.urlsplit(res)
|
||||
resource_id = res
|
||||
|
||||
return parsed_url, resource_id, metadata
|
||||
|
||||
def _get_inspector(self, parsed_url):
|
||||
if parsed_url.scheme not in self.inspectors:
|
||||
try:
|
||||
driver = insloader.get_inspector(parsed_url)
|
||||
self.inspectors[parsed_url.scheme] = driver
|
||||
except Exception as err:
|
||||
LOG.exception(_LE("Cannot load inspector %(name)s: %(err)s"),
|
||||
dict(name=parsed_url.scheme,
|
||||
err=err))
|
||||
raise err
|
||||
return self.inspectors[parsed_url.scheme]
|
||||
|
||||
def get_samples(self, manager, cache, resources=None):
|
||||
"""Return an iterable of Sample instances from polling the resources.
|
||||
|
||||
:param manager: The service manager invoking the plugin
|
||||
:param cache: A dictionary for passing data between plugins
|
||||
:param resources: end point to poll data from
|
||||
"""
|
||||
resources = resources or []
|
||||
h_cache = cache.setdefault(self.CACHE_KEY, {})
|
||||
sample_iters = []
|
||||
|
||||
# Get the meter identifiers to poll
|
||||
identifier = self.meter_definition.name
|
||||
|
||||
for resource in resources:
|
||||
parsed_url, res, extra_metadata = self._parse_resource(resource)
|
||||
if parsed_url is None:
|
||||
LOG.error(_LE("Skip invalid resource %s"), resource)
|
||||
continue
|
||||
ins = self._get_inspector(parsed_url)
|
||||
try:
|
||||
# Call hardware inspector to poll for the data
|
||||
i_cache = h_cache.setdefault(res, {})
|
||||
|
||||
# Prepare inspector parameters and cache it for performance
|
||||
param_key = parsed_url.scheme + '.' + identifier
|
||||
inspector_param = self.cached_inspector_params.get(param_key)
|
||||
if not inspector_param:
|
||||
param = getattr(self.meter_definition,
|
||||
parsed_url.scheme + '_inspector', {})
|
||||
inspector_param = ins.prepare_params(param)
|
||||
self.cached_inspector_params[param_key] = inspector_param
|
||||
|
||||
if identifier not in i_cache:
|
||||
i_cache[identifier] = list(ins.inspect_generic(
|
||||
host=parsed_url,
|
||||
cache=i_cache,
|
||||
extra_metadata=extra_metadata,
|
||||
param=inspector_param))
|
||||
# Generate samples
|
||||
if i_cache[identifier]:
|
||||
sample_iters.append(self.generate_samples(
|
||||
parsed_url,
|
||||
i_cache[identifier]))
|
||||
except Exception as err:
|
||||
LOG.exception(_LE('inspector call failed for %(ident)s '
|
||||
'host %(host)s: %(err)s'),
|
||||
dict(ident=identifier,
|
||||
host=parsed_url.hostname,
|
||||
err=err))
|
||||
return itertools.chain(*sample_iters)
|
||||
|
||||
def generate_samples(self, host_url, data):
|
||||
"""Generate a list of Sample from the data returned by inspector
|
||||
|
||||
:param host_url: host url of the endpoint
|
||||
:param data: list of data returned by the corresponding inspector
|
||||
"""
|
||||
samples = []
|
||||
definition = self.meter_definition
|
||||
for (value, metadata, extra) in data:
|
||||
s = util.make_sample_from_host(host_url,
|
||||
name=definition.name,
|
||||
sample_type=definition.type,
|
||||
unit=definition.unit,
|
||||
volume=value,
|
||||
res_metadata=metadata,
|
||||
extra=extra,
|
||||
name_prefix=None)
|
||||
samples.append(s)
|
||||
return samples
|
||||
|
||||
@classmethod
|
||||
def build_pollsters(cls):
|
||||
if not cls.mapping:
|
||||
definition_cfg = declarative.load_definitions(
|
||||
{}, cfg.CONF.hardware.meter_definitions_file,
|
||||
pkg_resources.resource_filename(__name__, "data/snmp.yaml"))
|
||||
cls.mapping = load_definition(definition_cfg)
|
||||
|
||||
pollsters = []
|
||||
for name in cls.mapping:
|
||||
pollster = cls()
|
||||
pollster._update_meter_definition(cls.mapping[name])
|
||||
pollsters.append((name, pollster))
|
||||
return pollsters
|
||||
|
||||
|
||||
def load_definition(config_def):
|
||||
mappings = {}
|
||||
for meter_def in config_def.get('metric', []):
|
||||
try:
|
||||
meter = MeterDefinition(meter_def)
|
||||
mappings[meter.name] = meter
|
||||
except declarative.DefinitionException as e:
|
||||
errmsg = _LE("Error loading meter definition: %s")
|
||||
LOG.error(errmsg, e.brief_message)
|
||||
return mappings
|
|
@ -1,63 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 ZHAW SoE
|
||||
# Copyright 2014 Intel Corp.
|
||||
#
|
||||
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
|
||||
# Toni Zehnder <zehndton@students.zhaw.ch>
|
||||
# Lianhao Lu <lianhao.lu@intel.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
from ceilometer import sample
|
||||
|
||||
|
||||
def get_metadata_from_host(host_url):
|
||||
return {'resource_url': urlparse.urlunsplit(host_url)}
|
||||
|
||||
|
||||
def make_resource_metadata(res_metadata=None, host_url=None):
|
||||
resource_metadata = dict()
|
||||
if res_metadata is not None:
|
||||
metadata = copy.copy(res_metadata)
|
||||
resource_metadata.update(metadata)
|
||||
resource_metadata.update(get_metadata_from_host(host_url))
|
||||
return resource_metadata
|
||||
|
||||
|
||||
def make_sample_from_host(host_url, name, sample_type, unit, volume,
|
||||
project_id=None, user_id=None, resource_id=None,
|
||||
res_metadata=None, extra=None,
|
||||
name_prefix='hardware'):
|
||||
|
||||
extra = extra or {}
|
||||
resource_metadata = make_resource_metadata(res_metadata, host_url)
|
||||
resource_metadata.update(extra)
|
||||
|
||||
res_id = resource_id or extra.get('resource_id') or host_url.hostname
|
||||
if name_prefix:
|
||||
name = name_prefix + '.' + name
|
||||
return sample.Sample(
|
||||
name=name,
|
||||
type=sample_type,
|
||||
unit=unit,
|
||||
volume=volume,
|
||||
user_id=user_id or extra.get('user_id'),
|
||||
project_id=project_id or extra.get('project_id'),
|
||||
resource_id=res_id,
|
||||
resource_metadata=resource_metadata,
|
||||
source='hardware',
|
||||
)
|
|
@ -1,129 +0,0 @@
|
|||
#
|
||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Common code for working with images
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import glanceclient
|
||||
from oslo_config import cfg
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer import keystone_client
|
||||
from ceilometer import sample
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.IntOpt('glance_page_size',
|
||||
default=0,
|
||||
help="Number of items to request in "
|
||||
"each paginated Glance API request "
|
||||
"(parameter used by glanceclient). "
|
||||
"If this is less than or equal to 0, "
|
||||
"page size is not specified "
|
||||
"(default value in glanceclient is used)."),
|
||||
]
|
||||
|
||||
SERVICE_OPTS = [
|
||||
cfg.StrOpt('glance',
|
||||
default='image',
|
||||
help='Glance service type.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
cfg.CONF.register_opts(SERVICE_OPTS, group='service_types')
|
||||
|
||||
|
||||
class _Base(plugin_base.PollsterBase):
|
||||
|
||||
@property
|
||||
def default_discovery(self):
|
||||
return 'endpoint:%s' % cfg.CONF.service_types.glance
|
||||
|
||||
@staticmethod
|
||||
def get_glance_client(ksclient, endpoint):
|
||||
# hard-code v1 glance API version selection while v2 API matures
|
||||
return glanceclient.Client('1',
|
||||
session=keystone_client.get_session(),
|
||||
endpoint=endpoint,
|
||||
auth=ksclient.session.auth)
|
||||
|
||||
def _get_images(self, ksclient, endpoint):
|
||||
client = self.get_glance_client(ksclient, endpoint)
|
||||
page_size = cfg.CONF.glance_page_size
|
||||
kwargs = {}
|
||||
if page_size > 0:
|
||||
kwargs['page_size'] = page_size
|
||||
return client.images.list(filters={"is_public": None}, **kwargs)
|
||||
|
||||
def _iter_images(self, ksclient, cache, endpoint):
|
||||
"""Iterate over all images."""
|
||||
key = '%s-images' % endpoint
|
||||
if key not in cache:
|
||||
cache[key] = list(self._get_images(ksclient, endpoint))
|
||||
return iter(cache[key])
|
||||
|
||||
@staticmethod
|
||||
def extract_image_metadata(image):
|
||||
return dict((k, getattr(image, k))
|
||||
for k in
|
||||
[
|
||||
"status",
|
||||
"is_public",
|
||||
"name",
|
||||
"deleted",
|
||||
"container_format",
|
||||
"created_at",
|
||||
"disk_format",
|
||||
"updated_at",
|
||||
"properties",
|
||||
"min_disk",
|
||||
"protected",
|
||||
"checksum",
|
||||
"deleted_at",
|
||||
"min_ram",
|
||||
"size", ])
|
||||
|
||||
|
||||
class ImagePollster(_Base):
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for endpoint in resources:
|
||||
for image in self._iter_images(manager.keystone, cache, endpoint):
|
||||
yield sample.Sample(
|
||||
name='image',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='image',
|
||||
volume=1,
|
||||
user_id=None,
|
||||
project_id=image.owner,
|
||||
resource_id=image.id,
|
||||
resource_metadata=self.extract_image_metadata(image),
|
||||
)
|
||||
|
||||
|
||||
class ImageSizePollster(_Base):
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for endpoint in resources:
|
||||
for image in self._iter_images(manager.keystone, cache, endpoint):
|
||||
yield sample.Sample(
|
||||
name='image.size',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='B',
|
||||
volume=image.size,
|
||||
user_id=None,
|
||||
project_id=image.owner,
|
||||
resource_id=image.id,
|
||||
resource_metadata=self.extract_image_metadata(image),
|
||||
)
|
|
@ -1,174 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 Red Hat
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Converters for producing hardware sensor data sample messages from
|
||||
notification events.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import oslo_messaging as messaging
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer import sample
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('ironic_exchange',
|
||||
default='ironic',
|
||||
help='Exchange name for Ironic notifications.'),
|
||||
]
|
||||
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
|
||||
# Map unit name to SI
|
||||
UNIT_MAP = {
|
||||
'Watts': 'W',
|
||||
'Volts': 'V',
|
||||
}
|
||||
|
||||
|
||||
def validate_reading(data):
|
||||
"""Some sensors read "Disabled"."""
|
||||
return data != 'Disabled'
|
||||
|
||||
|
||||
def transform_id(data):
|
||||
return data.lower().replace(' ', '_')
|
||||
|
||||
|
||||
def parse_reading(data):
|
||||
try:
|
||||
volume, unit = data.split(' ', 1)
|
||||
unit = unit.rsplit(' ', 1)[-1]
|
||||
return float(volume), UNIT_MAP.get(unit, unit)
|
||||
except ValueError:
|
||||
raise InvalidSensorData('unable to parse sensor reading: %s' %
|
||||
data)
|
||||
|
||||
|
||||
class InvalidSensorData(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class SensorNotification(plugin_base.NotificationBase):
|
||||
"""A generic class for extracting samples from sensor data notifications.
|
||||
|
||||
A notification message can contain multiple samples from multiple
|
||||
sensors, all with the same basic structure: the volume for the sample
|
||||
is found as part of the value of a 'Sensor Reading' key. The unit
|
||||
is in the same value.
|
||||
|
||||
Subclasses exist solely to allow flexibility with stevedore configuration.
|
||||
"""
|
||||
|
||||
event_types = ['hardware.ipmi.*']
|
||||
metric = None
|
||||
|
||||
def get_targets(self, conf):
|
||||
"""oslo.messaging.TargetS for this plugin."""
|
||||
return [messaging.Target(topic=topic,
|
||||
exchange=conf.ironic_exchange)
|
||||
for topic in self.get_notification_topics(conf)]
|
||||
|
||||
def _get_sample(self, message):
|
||||
try:
|
||||
return (payload for _, payload
|
||||
in message['payload'][self.metric].items())
|
||||
except KeyError:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _package_payload(message, payload):
|
||||
# NOTE(chdent): How much of the payload should we keep?
|
||||
payload['node'] = message['payload']['node_uuid']
|
||||
info = {'publisher_id': message['publisher_id'],
|
||||
'timestamp': message['payload']['timestamp'],
|
||||
'event_type': message['payload']['event_type'],
|
||||
'user_id': message['payload'].get('user_id'),
|
||||
'project_id': message['payload'].get('project_id'),
|
||||
'payload': payload}
|
||||
return info
|
||||
|
||||
def process_notification(self, message):
|
||||
"""Read and process a notification.
|
||||
|
||||
The guts of a message are in dict value of a 'payload' key
|
||||
which then itself has a payload key containing a dict of
|
||||
multiple sensor readings.
|
||||
|
||||
If expected keys in the payload are missing or values
|
||||
are not in the expected form for transformations,
|
||||
KeyError and ValueError are caught and the current
|
||||
sensor payload is skipped.
|
||||
"""
|
||||
payloads = self._get_sample(message['payload'])
|
||||
for payload in payloads:
|
||||
try:
|
||||
# Provide a fallback resource_id in case parts are missing.
|
||||
resource_id = 'missing id'
|
||||
try:
|
||||
resource_id = '%(nodeid)s-%(sensorid)s' % {
|
||||
'nodeid': message['payload']['node_uuid'],
|
||||
'sensorid': transform_id(payload['Sensor ID'])
|
||||
}
|
||||
except KeyError as exc:
|
||||
raise InvalidSensorData('missing key in payload: %s' % exc)
|
||||
|
||||
info = self._package_payload(message, payload)
|
||||
|
||||
try:
|
||||
sensor_reading = info['payload']['Sensor Reading']
|
||||
except KeyError as exc:
|
||||
raise InvalidSensorData(
|
||||
"missing 'Sensor Reading' in payload"
|
||||
)
|
||||
|
||||
if validate_reading(sensor_reading):
|
||||
volume, unit = parse_reading(sensor_reading)
|
||||
yield sample.Sample.from_notification(
|
||||
name='hardware.ipmi.%s' % self.metric.lower(),
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit=unit,
|
||||
volume=volume,
|
||||
resource_id=resource_id,
|
||||
message=info,
|
||||
user_id=info['user_id'],
|
||||
project_id=info['project_id'])
|
||||
|
||||
except InvalidSensorData as exc:
|
||||
LOG.warning(
|
||||
'invalid sensor data for %(resource)s: %(error)s' %
|
||||
dict(resource=resource_id, error=exc)
|
||||
)
|
||||
continue
|
||||
|
||||
|
||||
class TemperatureSensorNotification(SensorNotification):
|
||||
metric = 'Temperature'
|
||||
|
||||
|
||||
class CurrentSensorNotification(SensorNotification):
|
||||
metric = 'Current'
|
||||
|
||||
|
||||
class FanSensorNotification(SensorNotification):
|
||||
metric = 'Fan'
|
||||
|
||||
|
||||
class VoltageSensorNotification(SensorNotification):
|
||||
metric = 'Voltage'
|
|
@ -1,22 +0,0 @@
|
|||
# Copyright 2014 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class NodeManagerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class IPMIException(Exception):
|
||||
pass
|
|
@ -1,342 +0,0 @@
|
|||
# Copyright 2014 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Node manager engine to collect power and temperature of compute node.
|
||||
|
||||
Intel Node Manager Technology enables the datacenter IT to monitor and control
|
||||
actual server power, thermal and compute utilization behavior through industry
|
||||
defined standard IPMI. This file provides Node Manager engine to get simple
|
||||
system power and temperature data based on ipmitool.
|
||||
"""
|
||||
|
||||
import binascii
|
||||
import collections
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer.ipmi.platform import exception as nmexcept
|
||||
from ceilometer.ipmi.platform import ipmitool
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.IntOpt('node_manager_init_retry',
|
||||
default=3,
|
||||
help='Number of retries upon Intel Node '
|
||||
'Manager initialization failure')
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(OPTS, group='ipmi')
|
||||
|
||||
IPMICMD = {"sdr_dump": "sdr dump",
|
||||
"sdr_info": "sdr info",
|
||||
"sensor_dump": "sdr -v"}
|
||||
IPMIRAWCMD = {"get_device_id": "raw 0x06 0x01",
|
||||
"get_nm_version": "raw 0x2e 0xca 0x57 0x01 0x00",
|
||||
"init_sensor_agent": "raw 0x0a 0x2c 0x01",
|
||||
"init_complete": "raw 0x0a 0x2c 0x00",
|
||||
"init_sensor_agent_status": "raw 0x0a 0x2c 0x00",
|
||||
"read_power_all": "raw 0x2e 0xc8 0x57 0x01 0x00 0x01 0x00 0x00",
|
||||
"read_inlet_temperature":
|
||||
"raw 0x2e 0xc8 0x57 0x01 0x00 0x02 0x00 0x00",
|
||||
"read_outlet_temperature":
|
||||
"raw 0x2e 0xc8 0x57 0x01 0x00 0x05 0x00 0x00",
|
||||
"read_airflow": "raw 0x2e 0xc8 0x57 0x01 0x00 0x04 0x00 0x00",
|
||||
"read_cups_utilization": "raw 0x2e 0x65 0x57 0x01 0x00 0x05",
|
||||
"read_cups_index": "raw 0x2e 0x65 0x57 0x01 0x00 0x01"}
|
||||
|
||||
MANUFACTURER_ID_INTEL = ['57', '01', '00']
|
||||
INTEL_PREFIX = '5701000d01'
|
||||
|
||||
# The template dict are made according to the spec. It contains the expected
|
||||
# length of each item. And it can be used to parse the output of IPMI command.
|
||||
|
||||
ONE_RETURN_TEMPLATE = {"ret": 1}
|
||||
|
||||
BMC_INFO_TEMPLATE = collections.OrderedDict()
|
||||
BMC_INFO_TEMPLATE['Device_ID'] = 1
|
||||
BMC_INFO_TEMPLATE['Device_Revision'] = 1
|
||||
BMC_INFO_TEMPLATE['Firmware_Revision_1'] = 1
|
||||
BMC_INFO_TEMPLATE['Firmware_Revision_2'] = 1
|
||||
BMC_INFO_TEMPLATE['IPMI_Version'] = 1
|
||||
BMC_INFO_TEMPLATE['Additional_Device_support'] = 1
|
||||
BMC_INFO_TEMPLATE['Manufacturer_ID'] = 3
|
||||
BMC_INFO_TEMPLATE['Product_ID'] = 2
|
||||
BMC_INFO_TEMPLATE['Auxiliary_Firmware_Revision'] = 4
|
||||
|
||||
NM_STATISTICS_TEMPLATE = collections.OrderedDict()
|
||||
NM_STATISTICS_TEMPLATE['Manufacturer_ID'] = 3
|
||||
NM_STATISTICS_TEMPLATE['Current_value'] = 2
|
||||
NM_STATISTICS_TEMPLATE['Minimum_value'] = 2
|
||||
NM_STATISTICS_TEMPLATE['Maximum_value'] = 2
|
||||
NM_STATISTICS_TEMPLATE['Average_value'] = 2
|
||||
NM_STATISTICS_TEMPLATE['Time_stamp'] = 4
|
||||
NM_STATISTICS_TEMPLATE['Report_period'] = 4
|
||||
NM_STATISTICS_TEMPLATE["DomainID_PolicyState"] = 1
|
||||
|
||||
NM_GET_DEVICE_ID_TEMPLATE = collections.OrderedDict()
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Device_ID'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Device_revision'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Firmware_revision_1'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Firmware_Revision_2'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['IPMI_Version'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Additinal_Device_support'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Manufacturer_ID'] = 3
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Product_ID_min_version'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Product_ID_major_version'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Implemented_firmware'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Firmware_build_number'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Last_digit_firmware_build_number'] = 1
|
||||
NM_GET_DEVICE_ID_TEMPLATE['Image_flags'] = 1
|
||||
|
||||
NM_GET_VERSION_TEMPLATE = collections.OrderedDict()
|
||||
NM_GET_VERSION_TEMPLATE['Manufacturer_ID'] = 3
|
||||
NM_GET_VERSION_TEMPLATE['NM_Version'] = 1
|
||||
NM_GET_VERSION_TEMPLATE['IPMI_Version'] = 1
|
||||
NM_GET_VERSION_TEMPLATE['Patch_Version'] = 1
|
||||
NM_GET_VERSION_TEMPLATE['Firmware_Revision_Major'] = 1
|
||||
NM_GET_VERSION_TEMPLATE['Firmware_Revision_Minor'] = 1
|
||||
|
||||
NM_CUPS_UTILIZATION_TEMPLATE = collections.OrderedDict()
|
||||
NM_CUPS_UTILIZATION_TEMPLATE['Manufacturer_ID'] = 3
|
||||
NM_CUPS_UTILIZATION_TEMPLATE['CPU_Utilization'] = 8
|
||||
NM_CUPS_UTILIZATION_TEMPLATE['Mem_Utilization'] = 8
|
||||
NM_CUPS_UTILIZATION_TEMPLATE['IO_Utilization'] = 8
|
||||
|
||||
NM_CUPS_INDEX_TEMPLATE = collections.OrderedDict()
|
||||
NM_CUPS_INDEX_TEMPLATE['Manufacturer_ID'] = 3
|
||||
NM_CUPS_INDEX_TEMPLATE['CUPS_Index'] = 2
|
||||
|
||||
|
||||
def _hex(list=None):
|
||||
"""Format the return value in list into hex."""
|
||||
|
||||
list = list or []
|
||||
if list:
|
||||
list.reverse()
|
||||
return int(''.join(list), 16)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
class NodeManager(object):
|
||||
"""The python implementation of Intel Node Manager engine using ipmitool
|
||||
|
||||
The class implements the engine to read power and temperature of
|
||||
compute node. It uses ipmitool to execute the IPMI command and parse
|
||||
the output into dict.
|
||||
"""
|
||||
_inited = False
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""Singleton to avoid duplicated initialization."""
|
||||
if not cls._instance:
|
||||
cls._instance = super(NodeManager, cls).__new__(cls, *args,
|
||||
**kwargs)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if not (self._instance and self._inited):
|
||||
# As singleton, only the 1st NM pollster would trigger its
|
||||
# initialization. nm_version indicate init result, and is shared
|
||||
# across all pollsters
|
||||
self._inited = True
|
||||
self.nm_version = 0
|
||||
self.channel_slave = ''
|
||||
|
||||
self.nm_version = self.check_node_manager()
|
||||
|
||||
@staticmethod
|
||||
def _parse_slave_and_channel(file_path):
|
||||
"""Parse the dumped file to get slave address and channel number.
|
||||
|
||||
:param file_path: file path of dumped SDR file.
|
||||
:return: slave address and channel number of target device or None if
|
||||
not found.
|
||||
"""
|
||||
prefix = INTEL_PREFIX
|
||||
# According to Intel Node Manager spec, section 4.5, for Intel NM
|
||||
# discovery OEM SDR records are type C0h. It contains manufacture ID
|
||||
# and OEM data in the record body.
|
||||
# 0-2 bytes are OEM ID, byte 3 is 0Dh and byte 4 is 01h. Byte 5, 6
|
||||
# is Intel NM device slave address and channel number/sensor owner LUN.
|
||||
with open(file_path, 'rb') as bin_fp:
|
||||
data_str = binascii.hexlify(bin_fp.read())
|
||||
|
||||
if six.PY3:
|
||||
data_str = data_str.decode('ascii')
|
||||
oem_id_index = data_str.find(prefix)
|
||||
if oem_id_index != -1:
|
||||
ret = data_str[oem_id_index + len(prefix):
|
||||
oem_id_index + len(prefix) + 4]
|
||||
# Byte 5 is slave address. [7:4] from byte 6 is channel
|
||||
# number, so just pick ret[2] here.
|
||||
return (ret[0:2], ret[2])
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(BMC_INFO_TEMPLATE)
|
||||
def get_device_id(self):
|
||||
"""IPMI command GET_DEVICE_ID."""
|
||||
return IPMIRAWCMD["get_device_id"]
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE)
|
||||
def _init_sensor_agent(self):
|
||||
"""Run initialization agent."""
|
||||
return IPMIRAWCMD["init_sensor_agent"]
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE)
|
||||
def _init_sensor_agent_process(self):
|
||||
"""Check the status of initialization agent."""
|
||||
return IPMIRAWCMD["init_sensor_agent_status"]
|
||||
|
||||
@ipmitool.execute_ipmi_cmd()
|
||||
def _dump_sdr_file(self, data_file=""):
|
||||
"""Dump SDR into a file."""
|
||||
return IPMICMD["sdr_dump"] + " " + data_file
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_GET_DEVICE_ID_TEMPLATE)
|
||||
def _node_manager_get_device_id(self):
|
||||
"""GET_DEVICE_ID command in Intel Node Manager
|
||||
|
||||
Different from IPMI command GET_DEVICE_ID, it contains more information
|
||||
of Intel Node Manager.
|
||||
"""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD["get_device_id"]
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_GET_VERSION_TEMPLATE)
|
||||
def _node_manager_get_version(self):
|
||||
"""GET_NODE_MANAGER_VERSION command in Intel Node Manager
|
||||
|
||||
Byte 4 of the response:
|
||||
01h - Intel NM 1.0
|
||||
02h - Intel NM 1.5
|
||||
03h - Intel NM 2.0
|
||||
04h - Intel NM 2.5
|
||||
05h - Intel NM 3.0
|
||||
"""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD["get_nm_version"]
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE)
|
||||
def _read_power_all(self):
|
||||
"""Get the power consumption of the whole platform."""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD['read_power_all']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE)
|
||||
def _read_inlet_temperature(self):
|
||||
"""Get the inlet temperature info of the whole platform."""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD['read_inlet_temperature']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE)
|
||||
def _read_outlet_temperature(self):
|
||||
"""Get the outlet temperature info of the whole platform."""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD['read_outlet_temperature']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE)
|
||||
def _read_airflow(self):
|
||||
"""Get the volumetric airflow of the whole platform."""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD['read_airflow']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_CUPS_UTILIZATION_TEMPLATE)
|
||||
def _read_cups_utilization(self):
|
||||
"""Get the average CUPS utilization of the whole platform."""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_utilization']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(NM_CUPS_INDEX_TEMPLATE)
|
||||
def _read_cups_index(self):
|
||||
"""Get the CUPS Index of the whole platform."""
|
||||
return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_index']
|
||||
|
||||
def read_power_all(self):
|
||||
return self._read_power_all() if self.nm_version > 0 else {}
|
||||
|
||||
def read_inlet_temperature(self):
|
||||
return self._read_inlet_temperature() if self.nm_version > 0 else {}
|
||||
|
||||
def read_outlet_temperature(self):
|
||||
return self._read_outlet_temperature() if self.nm_version >= 5 else {}
|
||||
|
||||
def read_airflow(self):
|
||||
# only available after NM 3.0
|
||||
return self._read_airflow() if self.nm_version >= 5 else {}
|
||||
|
||||
def read_cups_utilization(self):
|
||||
# only available after NM 3.0
|
||||
return self._read_cups_utilization() if self.nm_version >= 5 else {}
|
||||
|
||||
def read_cups_index(self):
|
||||
# only available after NM 3.0
|
||||
return self._read_cups_index() if self.nm_version >= 5 else {}
|
||||
|
||||
def init_node_manager(self):
|
||||
if self._init_sensor_agent_process()['ret'] == ['01']:
|
||||
return
|
||||
# Run sensor initialization agent
|
||||
for i in range(CONF.ipmi.node_manager_init_retry):
|
||||
self._init_sensor_agent()
|
||||
time.sleep(1)
|
||||
if self._init_sensor_agent_process()['ret'] == ['01']:
|
||||
return
|
||||
|
||||
raise nmexcept.NodeManagerException(_('Node Manager init failed'))
|
||||
|
||||
def discover_slave_channel(self):
|
||||
"""Discover target slave address and channel number."""
|
||||
file_path = tempfile.mkstemp()[1]
|
||||
self._dump_sdr_file(data_file=file_path)
|
||||
ret = self._parse_slave_and_channel(file_path)
|
||||
slave_address = ''.join(['0x', ret[0]])
|
||||
channel = ''.join(['0x', ret[1]])
|
||||
# String of channel and slave_address
|
||||
self.channel_slave = '-b ' + channel + ' -t ' + slave_address
|
||||
|
||||
def node_manager_version(self):
|
||||
"""Intel Node Manager capability checking
|
||||
|
||||
This function is used to detect if compute node support Intel Node
|
||||
Manager(return version number) or not(return -1) and parse out the
|
||||
slave address and channel number of node manager.
|
||||
"""
|
||||
self.manufacturer_id = self.get_device_id()['Manufacturer_ID']
|
||||
if MANUFACTURER_ID_INTEL != self.manufacturer_id:
|
||||
# If the manufacturer is not Intel, just set False and return.
|
||||
return 0
|
||||
|
||||
self.discover_slave_channel()
|
||||
support = self._node_manager_get_device_id()['Implemented_firmware']
|
||||
# According to Intel Node Manager spec, return value of GET_DEVICE_ID,
|
||||
# bits 3 to 0 shows if Intel NM implemented or not.
|
||||
if int(support[0], 16) & 0xf == 0:
|
||||
return 0
|
||||
|
||||
return _hex(self._node_manager_get_version()['NM_Version'])
|
||||
|
||||
def check_node_manager(self):
|
||||
"""Intel Node Manager init and check
|
||||
|
||||
This function is used to initialize Intel Node Manager and check the
|
||||
capability without throwing exception. It's safe to call it on
|
||||
non-NodeManager platform.
|
||||
"""
|
||||
try:
|
||||
self.init_node_manager()
|
||||
nm_version = self.node_manager_version()
|
||||
except (nmexcept.NodeManagerException, nmexcept.IPMIException):
|
||||
return 0
|
||||
return nm_version
|
|
@ -1,113 +0,0 @@
|
|||
# Copyright 2014 Intel Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""IPMI sensor to collect various sensor data of compute node"""
|
||||
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer.ipmi.platform import exception as ipmiexcept
|
||||
from ceilometer.ipmi.platform import ipmitool
|
||||
|
||||
IPMICMD = {"sdr_dump": "sdr dump",
|
||||
"sdr_info": "sdr info",
|
||||
"sensor_dump": "sdr -v",
|
||||
"sensor_dump_temperature": "sdr -v type Temperature",
|
||||
"sensor_dump_current": "sdr -v type Current",
|
||||
"sensor_dump_fan": "sdr -v type Fan",
|
||||
"sensor_dump_voltage": "sdr -v type Voltage"}
|
||||
|
||||
# Requires translation of output into dict
|
||||
DICT_TRANSLATE_TEMPLATE = {"translate": 1}
|
||||
|
||||
|
||||
class IPMISensor(object):
|
||||
"""The python implementation of IPMI sensor using ipmitool
|
||||
|
||||
The class implements the IPMI sensor to get various sensor data of
|
||||
compute node. It uses ipmitool to execute the IPMI command and parse
|
||||
the output into dict.
|
||||
"""
|
||||
_inited = False
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""Singleton to avoid duplicated initialization."""
|
||||
if not cls._instance:
|
||||
cls._instance = super(IPMISensor, cls).__new__(cls, *args,
|
||||
**kwargs)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if not (self._instance and self._inited):
|
||||
self.ipmi_support = False
|
||||
self._inited = True
|
||||
|
||||
self.ipmi_support = self.check_ipmi()
|
||||
|
||||
@ipmitool.execute_ipmi_cmd()
|
||||
def _get_sdr_info(self):
|
||||
"""Get the SDR info."""
|
||||
return IPMICMD['sdr_info']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
|
||||
def _read_sensor_all(self):
|
||||
"""Get the sensor data for type."""
|
||||
return IPMICMD['sensor_dump']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
|
||||
def _read_sensor_temperature(self):
|
||||
"""Get the sensor data for Temperature."""
|
||||
return IPMICMD['sensor_dump_temperature']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
|
||||
def _read_sensor_voltage(self):
|
||||
"""Get the sensor data for Voltage."""
|
||||
return IPMICMD['sensor_dump_voltage']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
|
||||
def _read_sensor_current(self):
|
||||
"""Get the sensor data for Current."""
|
||||
return IPMICMD['sensor_dump_current']
|
||||
|
||||
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
|
||||
def _read_sensor_fan(self):
|
||||
"""Get the sensor data for Fan."""
|
||||
return IPMICMD['sensor_dump_fan']
|
||||
|
||||
def read_sensor_any(self, sensor_type=''):
|
||||
"""Get the sensor data for type."""
|
||||
if not self.ipmi_support:
|
||||
return {}
|
||||
|
||||
mapping = {'': self._read_sensor_all,
|
||||
'Temperature': self._read_sensor_temperature,
|
||||
'Fan': self._read_sensor_fan,
|
||||
'Voltage': self._read_sensor_voltage,
|
||||
'Current': self._read_sensor_current}
|
||||
|
||||
try:
|
||||
return mapping[sensor_type]()
|
||||
except KeyError:
|
||||
raise ipmiexcept.IPMIException(_('Wrong sensor type'))
|
||||
|
||||
def check_ipmi(self):
|
||||
"""IPMI capability checking
|
||||
|
||||
This function is used to detect if compute node is IPMI capable
|
||||
platform. Just run a simple IPMI command to get SDR info for check.
|
||||
"""
|
||||
try:
|
||||
self._get_sdr_info()
|
||||
except ipmiexcept.IPMIException:
|
||||
return False
|
||||
return True
|
|
@ -1,132 +0,0 @@
|
|||
# Copyright 2014 Intel Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Utils to run ipmitool for data collection"""
|
||||
from oslo_concurrency import processutils
|
||||
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer.ipmi.platform import exception as ipmiexcept
|
||||
from ceilometer import utils
|
||||
|
||||
|
||||
# Following 2 functions are copied from ironic project to handle ipmitool's
|
||||
# sensor data output. Need code clean and sharing in future.
|
||||
# Check ironic/drivers/modules/ipmitool.py
|
||||
|
||||
|
||||
def _get_sensor_type(sensor_data_dict):
|
||||
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
|
||||
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
|
||||
|
||||
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
|
||||
'Sensor Type (Threshold)'):
|
||||
try:
|
||||
return sensor_data_dict[key].split(' ', 1)[0]
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed,"
|
||||
"unknown sensor type"))
|
||||
|
||||
|
||||
def _process_sensor(sensor_data):
|
||||
sensor_data_fields = sensor_data.split('\n')
|
||||
sensor_data_dict = {}
|
||||
for field in sensor_data_fields:
|
||||
if not field:
|
||||
continue
|
||||
kv_value = field.split(':')
|
||||
if len(kv_value) != 2:
|
||||
continue
|
||||
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
|
||||
|
||||
return sensor_data_dict
|
||||
|
||||
|
||||
def _translate_output(output):
|
||||
"""Translate the return value into JSON dict
|
||||
|
||||
:param output: output of the execution of IPMI command(sensor reading)
|
||||
"""
|
||||
sensors_data_dict = {}
|
||||
|
||||
sensors_data_array = output.split('\n\n')
|
||||
for sensor_data in sensors_data_array:
|
||||
sensor_data_dict = _process_sensor(sensor_data)
|
||||
if not sensor_data_dict:
|
||||
continue
|
||||
|
||||
sensor_type = _get_sensor_type(sensor_data_dict)
|
||||
|
||||
# ignore the sensors which have no current 'Sensor Reading' data
|
||||
sensor_id = sensor_data_dict['Sensor ID']
|
||||
if 'Sensor Reading' in sensor_data_dict:
|
||||
sensors_data_dict.setdefault(sensor_type,
|
||||
{})[sensor_id] = sensor_data_dict
|
||||
|
||||
# get nothing, no valid sensor data
|
||||
if not sensors_data_dict:
|
||||
raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed,"
|
||||
"No data retrieved from given input"))
|
||||
return sensors_data_dict
|
||||
|
||||
|
||||
def _parse_output(output, template):
|
||||
"""Parse the return value of IPMI command into dict
|
||||
|
||||
:param output: output of the execution of IPMI command
|
||||
:param template: a dict that contains the expected items of
|
||||
IPMI command and its length.
|
||||
"""
|
||||
ret = {}
|
||||
index = 0
|
||||
if not (output and template):
|
||||
return ret
|
||||
|
||||
if "translate" in template:
|
||||
ret = _translate_output(output)
|
||||
else:
|
||||
output_list = output.strip().replace('\n', '').split(' ')
|
||||
if sum(template.values()) != len(output_list):
|
||||
raise ipmiexcept.IPMIException(_("ipmitool output "
|
||||
"length mismatch"))
|
||||
for item in template.items():
|
||||
index_end = index + item[1]
|
||||
update_value = output_list[index: index_end]
|
||||
ret[item[0]] = update_value
|
||||
index = index_end
|
||||
return ret
|
||||
|
||||
|
||||
def execute_ipmi_cmd(template=None):
|
||||
"""Decorator for the execution of IPMI command.
|
||||
|
||||
It parses the output of IPMI command into dictionary.
|
||||
"""
|
||||
|
||||
template = template or []
|
||||
|
||||
def _execute_ipmi_cmd(f):
|
||||
def _execute(self, **kwargs):
|
||||
args = ['ipmitool']
|
||||
command = f(self, **kwargs)
|
||||
args.extend(command.split(" "))
|
||||
try:
|
||||
(out, __) = utils.execute(*args, run_as_root=True)
|
||||
except processutils.ProcessExecutionError:
|
||||
raise ipmiexcept.IPMIException(_("running ipmitool failure"))
|
||||
return _parse_output(out, template)
|
||||
return _execute
|
||||
|
||||
return _execute_ipmi_cmd
|
|
@ -1,29 +0,0 @@
|
|||
# Copyright 2014 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Pollsters for IPMI and Intel Node Manager
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
OPTS = [
|
||||
cfg.IntOpt('polling_retry',
|
||||
default=3,
|
||||
help='Tolerance of IPMI/NM polling failures '
|
||||
'before disable this pollster. '
|
||||
'Negative indicates retrying forever.')
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS, group='ipmi')
|
|
@ -1,180 +0,0 @@
|
|||
# Copyright 2014 Intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer.ipmi.platform import exception as nmexcept
|
||||
from ceilometer.ipmi.platform import intel_node_manager as node_manager
|
||||
from ceilometer import sample
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('host', 'ceilometer.service')
|
||||
CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters',
|
||||
group='ipmi')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _Base(plugin_base.PollsterBase):
|
||||
|
||||
def setup_environment(self):
|
||||
super(_Base, self).setup_environment()
|
||||
self.nodemanager = node_manager.NodeManager()
|
||||
self.polling_failures = 0
|
||||
|
||||
# Do not load this extension if no NM support
|
||||
if self.nodemanager.nm_version == 0:
|
||||
raise plugin_base.ExtensionLoadError()
|
||||
|
||||
@property
|
||||
def default_discovery(self):
|
||||
return 'local_node'
|
||||
|
||||
def get_value(self, stats):
|
||||
"""Get value from statistics."""
|
||||
return node_manager._hex(stats["Current_value"])
|
||||
|
||||
@abc.abstractmethod
|
||||
def read_data(self, cache):
|
||||
"""Return data sample for IPMI."""
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
# Only one resource for Node Manager pollster
|
||||
try:
|
||||
stats = self.read_data(cache)
|
||||
except nmexcept.IPMIException:
|
||||
self.polling_failures += 1
|
||||
LOG.warning(_('Polling %(name)s failed for %(cnt)s times!')
|
||||
% ({'name': self.NAME,
|
||||
'cnt': self.polling_failures}))
|
||||
if 0 <= CONF.ipmi.polling_retry < self.polling_failures:
|
||||
LOG.warning(_('Pollster for %s is disabled!') % self.NAME)
|
||||
raise plugin_base.PollsterPermanentError(resources)
|
||||
else:
|
||||
return
|
||||
|
||||
self.polling_failures = 0
|
||||
|
||||
metadata = {
|
||||
'node': CONF.host
|
||||
}
|
||||
|
||||
if stats:
|
||||
data = self.get_value(stats)
|
||||
|
||||
yield sample.Sample(
|
||||
name=self.NAME,
|
||||
type=self.TYPE,
|
||||
unit=self.UNIT,
|
||||
volume=data,
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=CONF.host,
|
||||
resource_metadata=metadata)
|
||||
|
||||
|
||||
class InletTemperaturePollster(_Base):
|
||||
# Note(ildikov): The new meter name should be
|
||||
# "hardware.ipmi.node.inlet_temperature". As currently there
|
||||
# is no meter deprecation support in the code, we should use the
|
||||
# old name in order to avoid confusion.
|
||||
NAME = "hardware.ipmi.node.temperature"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "C"
|
||||
|
||||
def read_data(self, cache):
|
||||
return self.nodemanager.read_inlet_temperature()
|
||||
|
||||
|
||||
class OutletTemperaturePollster(_Base):
|
||||
NAME = "hardware.ipmi.node.outlet_temperature"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "C"
|
||||
|
||||
def read_data(self, cache):
|
||||
return self.nodemanager.read_outlet_temperature()
|
||||
|
||||
|
||||
class PowerPollster(_Base):
|
||||
NAME = "hardware.ipmi.node.power"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "W"
|
||||
|
||||
def read_data(self, cache):
|
||||
return self.nodemanager.read_power_all()
|
||||
|
||||
|
||||
class AirflowPollster(_Base):
|
||||
NAME = "hardware.ipmi.node.airflow"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "CFM"
|
||||
|
||||
def read_data(self, cache):
|
||||
return self.nodemanager.read_airflow()
|
||||
|
||||
|
||||
class CUPSIndexPollster(_Base):
|
||||
NAME = "hardware.ipmi.node.cups"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "CUPS"
|
||||
|
||||
def read_data(self, cache):
|
||||
return self.nodemanager.read_cups_index()
|
||||
|
||||
def get_value(self, stats):
|
||||
return node_manager._hex(stats["CUPS_Index"])
|
||||
|
||||
|
||||
class _CUPSUtilPollsterBase(_Base):
|
||||
CACHE_KEY_CUPS = 'CUPS'
|
||||
|
||||
def read_data(self, cache):
|
||||
i_cache = cache.setdefault(self.CACHE_KEY_CUPS, {})
|
||||
if not i_cache:
|
||||
i_cache.update(self.nodemanager.read_cups_utilization())
|
||||
return i_cache
|
||||
|
||||
|
||||
class CPUUtilPollster(_CUPSUtilPollsterBase):
|
||||
NAME = "hardware.ipmi.node.cpu_util"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "%"
|
||||
|
||||
def get_value(self, stats):
|
||||
return node_manager._hex(stats["CPU_Utilization"])
|
||||
|
||||
|
||||
class MemUtilPollster(_CUPSUtilPollsterBase):
|
||||
NAME = "hardware.ipmi.node.mem_util"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "%"
|
||||
|
||||
def get_value(self, stats):
|
||||
return node_manager._hex(stats["Mem_Utilization"])
|
||||
|
||||
|
||||
class IOUtilPollster(_CUPSUtilPollsterBase):
|
||||
NAME = "hardware.ipmi.node.io_util"
|
||||
TYPE = sample.TYPE_GAUGE
|
||||
UNIT = "%"
|
||||
|
||||
def get_value(self, stats):
|
||||
return node_manager._hex(stats["IO_Utilization"])
|
|
@ -1,130 +0,0 @@
|
|||
# Copyright 2014 Intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from ceilometer.agent import plugin_base
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer.ipmi.notifications import ironic as parser
|
||||
from ceilometer.ipmi.platform import exception as ipmiexcept
|
||||
from ceilometer.ipmi.platform import ipmi_sensor
|
||||
from ceilometer import sample
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('host', 'ceilometer.service')
|
||||
CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters',
|
||||
group='ipmi')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class InvalidSensorData(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class SensorPollster(plugin_base.PollsterBase):
|
||||
METRIC = None
|
||||
|
||||
def setup_environment(self):
|
||||
super(SensorPollster, self).setup_environment()
|
||||
self.ipmi = ipmi_sensor.IPMISensor()
|
||||
self.polling_failures = 0
|
||||
|
||||
# Do not load this extension if no IPMI support
|
||||
if not self.ipmi.ipmi_support:
|
||||
raise plugin_base.ExtensionLoadError()
|
||||
|
||||
@property
|
||||
def default_discovery(self):
|
||||
return 'local_node'
|
||||
|
||||
@staticmethod
|
||||
def _get_sensor_types(data, sensor_type):
|
||||
try:
|
||||
return (sensor_type_data for _, sensor_type_data
|
||||
in data[sensor_type].items())
|
||||
except KeyError:
|
||||
return []
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
# Only one resource for IPMI pollster
|
||||
try:
|
||||
stats = self.ipmi.read_sensor_any(self.METRIC)
|
||||
except ipmiexcept.IPMIException:
|
||||
self.polling_failures += 1
|
||||
LOG.warning(_(
|
||||
'Polling %(mtr)s sensor failed for %(cnt)s times!')
|
||||
% ({'mtr': self.METRIC,
|
||||
'cnt': self.polling_failures}))
|
||||
if 0 <= CONF.ipmi.polling_retry < self.polling_failures:
|
||||
LOG.warning(_('Pollster for %s is disabled!') % self.METRIC)
|
||||
raise plugin_base.PollsterPermanentError(resources)
|
||||
else:
|
||||
return
|
||||
|
||||
self.polling_failures = 0
|
||||
|
||||
sensor_type_data = self._get_sensor_types(stats, self.METRIC)
|
||||
|
||||
for sensor_data in sensor_type_data:
|
||||
# Continue if sensor_data is not parseable.
|
||||
try:
|
||||
sensor_reading = sensor_data['Sensor Reading']
|
||||
sensor_id = sensor_data['Sensor ID']
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
if not parser.validate_reading(sensor_reading):
|
||||
continue
|
||||
|
||||
try:
|
||||
volume, unit = parser.parse_reading(sensor_reading)
|
||||
except parser.InvalidSensorData:
|
||||
continue
|
||||
|
||||
resource_id = '%(host)s-%(sensor-id)s' % {
|
||||
'host': CONF.host,
|
||||
'sensor-id': parser.transform_id(sensor_id)
|
||||
}
|
||||
|
||||
metadata = {
|
||||
'node': CONF.host
|
||||
}
|
||||
|
||||
yield sample.Sample(
|
||||
name='hardware.ipmi.%s' % self.METRIC.lower(),
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit=unit,
|
||||
volume=volume,
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=resource_id,
|
||||
resource_metadata=metadata)
|
||||
|
||||
|
||||
class TemperatureSensorPollster(SensorPollster):
|
||||
METRIC = 'Temperature'
|
||||
|
||||
|
||||
class CurrentSensorPollster(SensorPollster):
|
||||
METRIC = 'Current'
|
||||
|
||||
|
||||
class FanSensorPollster(SensorPollster):
|
||||
METRIC = 'Fan'
|
||||
|
||||
|
||||
class VoltageSensorPollster(SensorPollster):
|
||||
METRIC = 'Voltage'
|
|
@ -1,78 +0,0 @@
|
|||
#
|
||||
# Copyright 2015 eNovance <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from keystoneauth1 import loading as ka_loading
|
||||
from keystoneclient.v3 import client as ks_client_v3
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
CFG_GROUP = "service_credentials"
|
||||
|
||||
|
||||
def get_session(requests_session=None):
|
||||
"""Get a ceilometer service credentials auth session."""
|
||||
auth_plugin = ka_loading.load_auth_from_conf_options(cfg.CONF, CFG_GROUP)
|
||||
session = ka_loading.load_session_from_conf_options(
|
||||
cfg.CONF, CFG_GROUP, auth=auth_plugin, session=requests_session
|
||||
)
|
||||
return session
|
||||
|
||||
|
||||
def get_client(trust_id=None, requests_session=None):
|
||||
"""Return a client for keystone v3 endpoint, optionally using a trust."""
|
||||
session = get_session(requests_session=requests_session)
|
||||
return ks_client_v3.Client(session=session, trust_id=trust_id)
|
||||
|
||||
|
||||
def get_service_catalog(client):
|
||||
return client.session.auth.get_access(client.session).service_catalog
|
||||
|
||||
|
||||
def get_auth_token(client):
|
||||
return client.session.auth.get_access(client.session).auth_token
|
||||
|
||||
|
||||
CLI_OPTS = [
|
||||
cfg.StrOpt('region-name',
|
||||
deprecated_group="DEFAULT",
|
||||
deprecated_name="os-region-name",
|
||||
default=os.environ.get('OS_REGION_NAME'),
|
||||
help='Region name to use for OpenStack service endpoints.'),
|
||||
cfg.StrOpt('interface',
|
||||
default=os.environ.get(
|
||||
'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE',
|
||||
'public')),
|
||||
deprecated_name="os-endpoint-type",
|
||||
choices=('public', 'internal', 'admin', 'auth', 'publicURL',
|
||||
'internalURL', 'adminURL'),
|
||||
help='Type of endpoint in Identity service catalog to use for '
|
||||
'communication with OpenStack services.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_cli_opts(CLI_OPTS, group=CFG_GROUP)
|
||||
|
||||
|
||||
def register_keystoneauth_opts(conf):
|
||||
ka_loading.register_auth_conf_options(conf, CFG_GROUP)
|
||||
ka_loading.register_session_conf_options(
|
||||
conf, CFG_GROUP,
|
||||
deprecated_opts={'cacert': [
|
||||
cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP),
|
||||
cfg.DeprecatedOpt('os-cacert', group="DEFAULT")]
|
||||
})
|
|
@ -1,138 +0,0 @@
|
|||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# Monika Wolf <vcomas3@de.ibm.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-03-03 03:36+0000\n"
|
||||
"Last-Translator: Monika Wolf <vcomas3@de.ibm.com>\n"
|
||||
"Language-Team: German\n"
|
||||
"Language: de\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot load inspector %(name)s: %(err)s"
|
||||
msgstr "Inspector %(name)s kann nicht geladen werden: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Resident Memory Usage for %(id)s: %(e)s"
|
||||
msgstr ""
|
||||
"Die Verwendung des residenten Speichers für %(id)s konnte nicht abgerufen "
|
||||
"werden: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dispatcher failed to handle the %s, requeue it."
|
||||
msgstr ""
|
||||
"Dispatcher konnte %s nicht verarbeiten. Erneut in Warteschlange stellen."
|
||||
|
||||
msgid "Error connecting to coordination backend."
|
||||
msgstr "Fehler beim Herstellen einer Verbindung zum Koordinierungs-Back-End."
|
||||
|
||||
msgid "Error getting group membership info from coordination backend."
|
||||
msgstr ""
|
||||
"Fehler beim Abrufen von Mitgliedschaftsinformationen vom Koordinierungs-Back-"
|
||||
"End."
|
||||
|
||||
#, python-format
|
||||
msgid "Error joining partitioning group %s, re-trying"
|
||||
msgstr ""
|
||||
"Fehler beim Beitreten zur Partitionierungsgruppe %s. Operation wird "
|
||||
"wiederholt."
|
||||
|
||||
#, python-format
|
||||
msgid "Error processing event and it will be dropped: %s"
|
||||
msgstr "Fehler beim Verarbeiten des Ereignisses und es wird gelöscht: %s"
|
||||
|
||||
msgid "Error sending a heartbeat to coordination backend."
|
||||
msgstr ""
|
||||
"Fehler beim Senden eines Überwachungssignals an das Koordinierungs-Back-End."
|
||||
|
||||
msgid "Fail to process a notification"
|
||||
msgstr "Eine Benachrichtigung konnte nicht verarbeitet werden."
|
||||
|
||||
msgid "Fail to process notification"
|
||||
msgstr "Benachrichtigung konnte nicht verarbeitet werden."
|
||||
|
||||
msgid "Failed to connect to Gnocchi."
|
||||
msgstr "Fehler beim Herstellen einer Verbindung zu Gnocchi."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to Kafka service: %s"
|
||||
msgstr "Fehler beim Herstellen einer Verbindung zum Kafka-Service: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s"
|
||||
msgstr ""
|
||||
"Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s "
|
||||
"Später erneut versuchen: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s"
|
||||
msgstr ""
|
||||
"Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s "
|
||||
"Später erneut versuchen: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load resource due to error %s"
|
||||
msgstr "Fehler beim Laden der Ressource aufgrund des folgenden Fehlers %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to record event: %s"
|
||||
msgstr "Das Ereignis konnte nicht aufgezeichnet werden: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid type %s specified"
|
||||
msgstr "Ungültigen Typ %s angegeben"
|
||||
|
||||
#, python-format
|
||||
msgid "Missing field %s"
|
||||
msgstr "Fehlendes Feld %s"
|
||||
|
||||
msgid "Passed resource dict must contain keys resource_id and resource_url."
|
||||
msgstr ""
|
||||
"Das übergebene Ressourcenwörterverzeichnis muss die Schlüssel für "
|
||||
"resource_id und resource_url enthalten."
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %(field)s should be a %(type)s"
|
||||
msgstr "Erforderliches Feld %(field)s muss %(type)s sein."
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "Erforderliches Feld %s nicht angegeben."
|
||||
|
||||
#, python-format
|
||||
msgid "Required fields %s not specified"
|
||||
msgstr "Erforderliche Felder %s nicht angegeben."
|
||||
|
||||
#, python-format
|
||||
msgid "Skip invalid resource %s"
|
||||
msgstr "Ungültige Ressource %s überspringen"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping %(name)s, keystone issue: %(exc)s"
|
||||
msgstr "%(name)s wird übersprungen, Keystone-Problem: %(exc)s"
|
||||
|
||||
msgid "Status Code: %{code}s. Failed todispatch event: %{event}s"
|
||||
msgstr "Statuscode: %{code}s. Fehler beim Versenden des Ereignisses: %{event}s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load changed event pipeline: %s"
|
||||
msgstr "Die geänderte Ereignispipeline konnte nicht geladen werden: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load changed pipeline: %s"
|
||||
msgstr "Die geänderte Pipeline konnte nicht geladen werden: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unrecognized type value %s"
|
||||
msgstr "Nicht erkannter Typwert %s"
|
||||
|
||||
#, python-format
|
||||
msgid "inspector call failed for %(ident)s host %(host)s: %(err)s"
|
||||
msgstr "Inspector-Aufruf fehlgeschlagen für %(ident)s Host %(host)s: %(err)s"
|
|
@ -1,145 +0,0 @@
|
|||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# Frank Kloeker <eumel@arcor.de>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-06-06 06:20+0000\n"
|
||||
"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
|
||||
"Language-Team: German\n"
|
||||
"Language: de\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%d events are removed from database"
|
||||
msgstr "%d Ereignisse aus Datenbank entfernt"
|
||||
|
||||
#, python-format
|
||||
msgid "%d samples removed from database"
|
||||
msgstr "%d Beispiele aus Datenbank entfernt"
|
||||
|
||||
msgid "Configuration:"
|
||||
msgstr "Konfiguration:"
|
||||
|
||||
#, python-format
|
||||
msgid "Connecting to %(db)s on %(nodelist)s"
|
||||
msgstr "Verbindung mit %(db)s auf %(nodelist)s wird hergestellt"
|
||||
|
||||
msgid "Coordination backend started successfully."
|
||||
msgstr "Das Koordinierungs-Back-End wurde erfolgreich gestartet."
|
||||
|
||||
#, python-format
|
||||
msgid "Definitions: %s"
|
||||
msgstr "Definitionen: %s"
|
||||
|
||||
msgid "Detected change in pipeline configuration."
|
||||
msgstr "Es wurde eine Änderung in der Pipelinekonfiguration festgestellt."
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping event data with TTL %d"
|
||||
msgstr "Löschen von Ereignisdaten mit TTL %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping metering data with TTL %d"
|
||||
msgstr "Löschen von Messdaten mit TTL %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Duplicate event detected, skipping it: %s"
|
||||
msgstr "Doppeltes Ereignis erkannt. Wird übersprungen: %s"
|
||||
|
||||
msgid "Expired residual resource and meter definition data"
|
||||
msgstr "Abgelaufene Daten für residente Ressource und für Messdefinition"
|
||||
|
||||
#, python-format
|
||||
msgid "Index %s will be recreate."
|
||||
msgstr "Index %s wird erneut erstellt. "
|
||||
|
||||
#, python-format
|
||||
msgid "Joined partitioning group %s"
|
||||
msgstr "Partitionierungsgruppe %s beigetreten."
|
||||
|
||||
#, python-format
|
||||
msgid "Left partitioning group %s"
|
||||
msgstr "Partitionierungsgruppe %s verlassen."
|
||||
|
||||
#, python-format
|
||||
msgid "No limit value provided, result set will be limited to %(limit)d."
|
||||
msgstr ""
|
||||
"Es wurde kein Grenzwert angegeben. Der Ergebnissatz wird auf %(limit)d "
|
||||
"beschränkt."
|
||||
|
||||
msgid "Nothing to clean, database event time to live is disabled"
|
||||
msgstr ""
|
||||
"Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankereignisdaten ist "
|
||||
"deaktiviert."
|
||||
|
||||
msgid "Nothing to clean, database metering time to live is disabled"
|
||||
msgstr ""
|
||||
"Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankstichprobendaten "
|
||||
"ist deaktiviert."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter "
|
||||
"%(param)s"
|
||||
msgstr ""
|
||||
"Pipeline %(pipeline)s: Konfiguration von Transformerinstanz %(name)s mit "
|
||||
"Parameter %(param)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline config: %s"
|
||||
msgstr "Pipelinekonfiguration: %s"
|
||||
|
||||
msgid "Pipeline configuration file has been updated."
|
||||
msgstr "Die Pipelinekonfigurationsdatei wurde aktualisiert."
|
||||
|
||||
#, python-format
|
||||
msgid "Polling pollster %(poll)s in the context of %(src)s"
|
||||
msgstr "Abfrage von Pollster %(poll)s im Kontext von %(src)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy set to %s"
|
||||
msgstr "Veröffentlichungsrichtlinie auf %s gesetzt"
|
||||
|
||||
msgid "Reconfiguring polling tasks."
|
||||
msgstr "Polling-Tasks werden neu konfiguriert."
|
||||
|
||||
msgid "Reloading notification agent and listeners."
|
||||
msgstr "Benachrichtigungsagent und Listener werden erneut geladen."
|
||||
|
||||
#, python-format
|
||||
msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle"
|
||||
msgstr ""
|
||||
"Pollster %(name)s überspringen, keine %(p_context)sressourcen in diesem "
|
||||
"Zyklus gefunden."
|
||||
|
||||
#, python-format
|
||||
msgid "Starting server in PID %s"
|
||||
msgstr "Starten von Server in PID %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Swift endpoint not found: %s"
|
||||
msgstr "Swift-Endpunkt konnte nicht gefunden werden: %s"
|
||||
|
||||
msgid "detected decoupled pipeline config format"
|
||||
msgstr "entkoppeltes Pipeline-Konfigurationsformat erkannt"
|
||||
|
||||
#, python-format
|
||||
msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s"
|
||||
msgstr ""
|
||||
"Messung von Daten %(counter_name)s für %(resource_id)s: %(counter_volume)s"
|
||||
|
||||
#, python-format
|
||||
msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s"
|
||||
msgstr ""
|
||||
"Bereitstellung auf 0.0.0.0:%(sport)s, Ansicht unter http://127.0.0.1:"
|
||||
"%(vport)s"
|
||||
|
||||
#, python-format
|
||||
msgid "serving on http://%(host)s:%(port)s"
|
||||
msgstr "Bereitstellung auf http://%(host)s:%(port)s"
|
|
@ -1,125 +0,0 @@
|
|||
# Monika Wolf <vcomas3@de.ibm.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-03-04 10:08+0000\n"
|
||||
"Last-Translator: Monika Wolf <vcomas3@de.ibm.com>\n"
|
||||
"Language-Team: German\n"
|
||||
"Language: de\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
msgid ""
|
||||
"Cannot extract tasks because agent failed to join group properly. Rejoining "
|
||||
"group."
|
||||
msgstr ""
|
||||
"Extrahieren der Tasks nicht möglich, da der Agent nicht ordnungsgemäß in die "
|
||||
"Gruppe eingebunden werden konnte. Operation zum Wiedereinbinden in die "
|
||||
"Gruppe wird durchgeführt."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: "
|
||||
"%(exc)s"
|
||||
msgstr ""
|
||||
"Die %(pollster)s-Daten für %(instance_id)s können nicht untersucht werden. "
|
||||
"Behebbare Ursache: %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping out of time order sample: %s"
|
||||
msgstr ""
|
||||
"Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping sample with no predecessor: %s"
|
||||
msgstr "Beispiel ohne Vorgänger wird gelöscht: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any dispatchers for %s"
|
||||
msgstr "Es konnten keine Dispatcher für %s geladen werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Ignore unrecognized field %s"
|
||||
msgstr "Nicht erkanntes Feld %s ignorieren"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid status, skipping IP address %s"
|
||||
msgstr "Ungültiger Status. IP-Adresse %s wird übersprungen."
|
||||
|
||||
msgid "Negative delta detected, dropping value"
|
||||
msgstr "Negatives Delta erkannt. Wert wird verworfen."
|
||||
|
||||
#, python-format
|
||||
msgid "No endpoints found for service %s"
|
||||
msgstr "Es wurden keine Endpunkte für den Service %s gefunden."
|
||||
|
||||
msgid ""
|
||||
"Non-metric meters may be collected. It is highly advisable to disable these "
|
||||
"meters using ceilometer.conf or the pipeline.yaml"
|
||||
msgstr ""
|
||||
"Es werden möglicherweise nicht metrische Daten erfasst. Es wird dringend "
|
||||
"empfohlen, diese Zähler über die Datei ceilometer.conf oder pipeline.yaml zu "
|
||||
"inaktivieren."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Skipping %(name)s, %(service_type)s service is not registered in keystone"
|
||||
msgstr ""
|
||||
"%(name)s wird übersprungen. Der Service %(service_type)s ist nicht in "
|
||||
"Keystone registriert."
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping duplicate meter definition %s"
|
||||
msgstr "Doppelte Messdefinition %s wird übersprungen."
|
||||
|
||||
msgid ""
|
||||
"ceilometer-api started with aodh enabled. Alarms URLs will be redirected to "
|
||||
"aodh endpoint."
|
||||
msgstr ""
|
||||
"Die ceilometer-api wurde mit aktiviertem aodh gestartet. Alarm-URLs werden "
|
||||
"an den aodh-Endpunkt umgeleitet. "
|
||||
|
||||
msgid ""
|
||||
"ceilometer-api started with gnocchi enabled. The resources/meters/samples "
|
||||
"URLs are disabled."
|
||||
msgstr ""
|
||||
"Die ceilometer-api wurde mit aktiviertem Gnocchi gestartet. Die URLs für "
|
||||
"resources/meters/samples sind inaktiviert."
|
||||
|
||||
#, python-format
|
||||
msgid "event signature invalid, discarding event: %s"
|
||||
msgstr "Ereignissignatur ungültig. Ereignis wird verworfen: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no "
|
||||
"volume (volume: None), the sample will be dropped"
|
||||
msgstr ""
|
||||
"Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s "
|
||||
"enthält keinen Datenträger (volume: None). Die Stichprobe wird gelöscht."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has "
|
||||
"volume which is not a number (volume: %(counter_volume)s), the sample will "
|
||||
"be dropped"
|
||||
msgstr ""
|
||||
"Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s "
|
||||
"enthält einen Datenträger ohne Zahl (volume: %(counter_volume)s). Die "
|
||||
"Stichprobe wird gelöscht."
|
||||
|
||||
msgid ""
|
||||
"pecan_debug cannot be enabled, if workers is > 1, the value is overrided "
|
||||
"with False"
|
||||
msgstr ""
|
||||
"pecan_debug kann nicht aktiviert werden, wenn Worker > 1 ist. Der Wert wird "
|
||||
"mit False überschrieben."
|
||||
|
||||
#, python-format
|
||||
msgid "unable to configure oslo_cache: %s"
|
||||
msgstr "Konfigurieren von oslo_cache nicht möglich: %s"
|
|
@ -1,522 +0,0 @@
|
|||
# Translations template for ceilometer.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the ceilometer project.
|
||||
#
|
||||
# Translators:
|
||||
# Carsten Duch <cad@teuto.net>, 2014
|
||||
# Christian Berendt <berendt@b1-systems.de>, 2014
|
||||
# Ettore Atalan <atalanttore@googlemail.com>, 2014
|
||||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# Frank Kloeker <eumel@arcor.de>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-06-06 06:19+0000\n"
|
||||
"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
|
||||
"Language: de\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Language-Team: German\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s nicht gefunden"
|
||||
|
||||
#, python-format
|
||||
msgid "Arithmetic transformer must use at least one meter in expression '%s'"
|
||||
msgstr ""
|
||||
"Arithmetiktransformer muss mindestens eine Messgröße im Ausdruck '%s' "
|
||||
"verwenden"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr ""
|
||||
"Tabelle %(table_name)s kann nicht erstellt werden, da sie bereits vorhanden "
|
||||
"ist. Fehler wird ignoriert"
|
||||
|
||||
#, python-format
|
||||
msgid "Continue after error from %(name)s: %(error)s"
|
||||
msgstr "Fortfahren nach Fehler von %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect slave host: %s "
|
||||
msgstr ""
|
||||
"Es konnte keine Verbindung zum untergeordneten Host hergestellt werden: %s "
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect to XenAPI: %s"
|
||||
msgstr "Es konnte keine Verbindung zu XenAPI hergestellt werden: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get CPU Util for %(id)s: %(e)s"
|
||||
msgstr "Abruf von CPU-Auslastung nicht möglich für %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Memory Usage for %(id)s: %(e)s"
|
||||
msgstr "Abruf von Speicherbelegung nicht möglich für %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get VM %s CPU Utilization"
|
||||
msgstr "CPU-Auslastung für VM %s konnte nicht abgerufen werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't obtain IP address of instance %s"
|
||||
msgstr "IP-Adresse von Instanz %s konnte nicht abgerufen werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping Notification %(type)s (uuid:%(msgid)s)"
|
||||
msgstr "Löschen von Benachrichtigung %(type)s (UUID:%(msgid)s)"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Error from libvirt while looking up instance <name=%(name)s, id=%(id)s>: "
|
||||
"[Error Code %(error_code)s] %(ex)s"
|
||||
msgstr ""
|
||||
"Fehler von libvirt während Suche nach Instanz <name=%(name)s, id=%(id)s>: "
|
||||
"[Fehlercode %(error_code)s] %(ex)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Error parsing HTTP response: %s"
|
||||
msgstr "Fehler bei Auswertung der HTTP-Antwort %s"
|
||||
|
||||
msgid "Error stopping pollster."
|
||||
msgstr "Fehler beim Stoppen des Pollster."
|
||||
|
||||
msgid "Event"
|
||||
msgstr "Ereignis"
|
||||
|
||||
msgid "Expression evaluated to a NaN value!"
|
||||
msgstr "Ausdruck ergab einen NaN-Wert!"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to import extension for %(name)s: %(error)s"
|
||||
msgstr "Fehler beim Importieren der Erweiterung für %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect data of instance <name=%(name)s, id=%(id)s>, domain state "
|
||||
"is SHUTOFF."
|
||||
msgstr ""
|
||||
"Fehler beim Überprüfen von Daten der Instanz <name=%(name)s, id=%(id)s>, "
|
||||
"Domänenstatus ist ABGESCHALTET."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of %(instance_uuid)s, can not get info from "
|
||||
"libvirt: %(error)s"
|
||||
msgstr ""
|
||||
"Fehler beim Überprüfen der Speicherbelegung von %(instance_uuid)s, "
|
||||
"Informationen können nicht von libvirt abgerufen werden: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of instance <name=%(name)s, id=%(id)s>, can "
|
||||
"not get info from libvirt."
|
||||
msgstr ""
|
||||
"Fehler beim Überprüfen der Speicherbelegung von Instanz <name=%(name)s, id="
|
||||
"%(id)s>, Informationen können nicht von libvirt abgerufen werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any notification handlers for %s"
|
||||
msgstr "Es konnten keine Benachrichtigungshandler für %s geladen werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to parse the timestamp value %s"
|
||||
msgstr "Zeitmarkenwert %s konnte nicht analysiert werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, dropping them"
|
||||
msgstr "%d Datenpunkte konnten nicht veröffentlicht werden; werden gelöscht"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, queue them"
|
||||
msgstr ""
|
||||
"%d Datenpunkte konnten nicht veröffentlicht werden; in Warteschlange "
|
||||
"einreihen"
|
||||
|
||||
#, python-format
|
||||
msgid "Filter expression not valid: %s"
|
||||
msgstr "Filterausdruck nicht gültig: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s"
|
||||
msgstr "Instanz %(name)s (%(instance_id)s) wird ignoriert: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s: %(error)s"
|
||||
msgstr "Instanz %(name)s wird ignoriert: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring loadbalancer %(loadbalancer_id)s"
|
||||
msgstr "Loadbalancer %(loadbalancer_id)s wird ignoriert."
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring pool %(pool_id)s"
|
||||
msgstr "Pool %(pool_id)s wird ignoriert."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: "
|
||||
"%(column)s."
|
||||
msgstr ""
|
||||
"Ungültige YAML-Syntax in Definitionsdatei %(file)s in Zeile: %(line)s, "
|
||||
"Spalte: %(column)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid aggregation function: %s"
|
||||
msgstr "Ungültige Aggreation Funktion: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid period %(period)s: %(err)s"
|
||||
msgstr "Ungültiger Zeitraum %(period)s: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid trait type '%(type)s' for trait %(trait)s"
|
||||
msgstr "Ungültiger Traittyp '%(type)s' für Trait %(trait)s"
|
||||
|
||||
msgid "Limit must be positive"
|
||||
msgstr "Grenzwert muss positiv sein"
|
||||
|
||||
#, python-format
|
||||
msgid "More than one event with id %s returned from storage driver"
|
||||
msgstr "Mehr als ein Ereignis mit der ID %s vom Speichertreiber zurückgegeben"
|
||||
|
||||
#, python-format
|
||||
msgid "Multiple VM %s found in XenServer"
|
||||
msgstr "Mehrere VMs %s in XenServer gefunden"
|
||||
|
||||
msgid "Must specify connection_url, and connection_password to use"
|
||||
msgstr ""
|
||||
"Angabe von connection_url und connection_password für die Verwendung "
|
||||
"erforderlich"
|
||||
|
||||
#, python-format
|
||||
msgid "No plugin named %(plugin)s available for %(name)s"
|
||||
msgstr "Kein Plug-in mit dem Namen %(plugin)s verfügbar für %(name)s."
|
||||
|
||||
msgid "Node Manager init failed"
|
||||
msgstr "Initialisierung von Knoten-Manager fehlgeschlagen"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "Nicht berechtigt für den Zugriff auf %(aspect)s %(id)s"
|
||||
|
||||
#, python-format
|
||||
msgid "OpenDaylitght API returned %(status)s %(reason)s"
|
||||
msgstr "OpenDaylight-API hat Folgendes zurückgegeben: %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Opencontrail API returned %(status)s %(reason)s"
|
||||
msgstr "Opencontrail-API hat Folgendes zurückgegeben: %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only equality operator is available "
|
||||
"for field %(field)s"
|
||||
msgstr ""
|
||||
"Operator %(operator)s wird nicht unterstützt. Für das Feld %(field)s ist "
|
||||
"nur der Gleichheitsoperator verfügbar."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"Operator %(operator)s wird nicht unterstützt. Unterstützte Operatoren: "
|
||||
"%(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Order-by expression not valid: %s"
|
||||
msgstr "Ausdruck für 'Sortieren nach' nicht gültig: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s"
|
||||
msgstr ""
|
||||
"Analysefehler in JSONPath-Spezifikation '%(jsonpath)s' für %(name)s: %(err)s"
|
||||
|
||||
msgid "Period must be positive."
|
||||
msgstr "Zeitraum muss positiv sein."
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s"
|
||||
msgstr "Pipeline %(pipeline)s: %(status)s nach Fehler von Publisher %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s"
|
||||
msgstr "Pipeline %(pipeline)s: Fortsetzen nach Fehler von Publisher %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s"
|
||||
msgstr ""
|
||||
"Pipeline %(pipeline)s: Fehler bei Flushoperation für Transformer %(trans)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for "
|
||||
"%(smp)s"
|
||||
msgstr ""
|
||||
"Pipeline %(pipeline)s: Beendigung nach Fehler von Transformer %(trans)s für "
|
||||
"%(smp)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Plugin specified, but no plugin name supplied for %s"
|
||||
msgstr "Plug-in angegeben, aber kein Plug-in-Name für %s angegeben."
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(mtr)s sensor failed for %(cnt)s times!"
|
||||
msgstr "Polling von %(mtr)s-Sensor %(cnt)s Mal fehlgeschlagen!"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(name)s failed for %(cnt)s times!"
|
||||
msgstr "Polling von %(name)s %(cnt)s Mal fehlgeschlagen!"
|
||||
|
||||
#, python-format
|
||||
msgid "Pollster for %s is disabled!"
|
||||
msgstr "Pollster für %s ist inaktiviert!"
|
||||
|
||||
#, python-format
|
||||
msgid "Prevent pollster %(name)s for polling source %(source)s anymore!"
|
||||
msgstr ""
|
||||
"Verhindern Sie, dass Pollster %(name)s Quelle %(source)s weiterhin abfragt!"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Publisher max local_queue length is exceeded, dropping %d oldest samples"
|
||||
msgstr ""
|
||||
"Maximale Länge von local_queue für Publisher ist überschritten, die %d "
|
||||
"ältesten Beispiele werden gelöscht"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy is unknown (%s) force to default"
|
||||
msgstr ""
|
||||
"Veröffentlichungsrichtlinie ist unbekannt (%s); auf Standardeinstellung "
|
||||
"setzen"
|
||||
|
||||
#, python-format
|
||||
msgid "RGW AdminOps API returned %(status)s %(reason)s"
|
||||
msgstr "RGW-AdminOps-API hat Folgendes zurückgegeben: %(status)s %(reason)s"
|
||||
|
||||
msgid "Request failed to connect to OpenDaylight with NorthBound REST API"
|
||||
msgstr ""
|
||||
"Anforderung konnte keine Verbindung mit OpenDaylight über NorthBound REST-"
|
||||
"API herstellen"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "Erforderliches Feld %s nicht angegeben"
|
||||
|
||||
msgid "Resource"
|
||||
msgstr "Resource"
|
||||
|
||||
msgid "Sample"
|
||||
msgstr "Beispiel"
|
||||
|
||||
msgid "Samples should be included in request body"
|
||||
msgstr "Beispiele sollten in Anforderungshauptteil enthalten sein"
|
||||
|
||||
#, python-format
|
||||
msgid "Skip loading extension for %s"
|
||||
msgstr "Laden der Ausnahme für %s überspringen"
|
||||
|
||||
#, python-format
|
||||
msgid "String %s is not a valid isotime"
|
||||
msgstr "Zeichenfolge %s ist kein gültiger Wert für 'isotime'"
|
||||
|
||||
msgid ""
|
||||
"The Yaml file that defines mapping between samples and gnocchi resources/"
|
||||
"metrics"
|
||||
msgstr ""
|
||||
"Die YAML-Datei mit der Definition der Zuordnung zwischen Beispielen und "
|
||||
"gnocchi-Ressourcen/Metriken"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"Der Datentyp %(type)s wird nicht unterstützt. Die Liste der unterstützten "
|
||||
"Datentypen lautet: %(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "The field 'fields' is required for %s"
|
||||
msgstr "Das Feld 'fields' ist erforderlich für %s"
|
||||
|
||||
msgid "The path for the file publisher is required"
|
||||
msgstr "Der Pfad für den Datei-Publisher ist erforderlich"
|
||||
|
||||
#, python-format
|
||||
msgid "UDP: Cannot decode data sent by %s"
|
||||
msgstr "UPD: Von %s gesendete Daten konnten nicht dekodiert werden"
|
||||
|
||||
msgid "UDP: Unable to store meter"
|
||||
msgstr "UDP: Messgröße kann nicht gespeichert werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr ""
|
||||
"Es kann keine Verbindung zum Datenbankserver hergestellt werden: %(errmsg)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Wert %(value)s kann nicht in den erwarteten Datentyp %(type)s umgewandelt "
|
||||
"werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to discover resources: %s"
|
||||
msgstr "Ressourcen können nicht gefunden werden: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to evaluate expression %(expr)s: %(exc)s"
|
||||
msgstr "Auswertung nicht möglich für Ausdruck %(expr)s: %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load publisher %s"
|
||||
msgstr "Publisher %s kann nicht geladen werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load the hypervisor inspector: %s"
|
||||
msgstr "Hypervisorinspector %s kann nicht geladen werden"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving "
|
||||
"up."
|
||||
msgstr ""
|
||||
"Es kann keine erneute Verbindung zur primären mongodb nach %(retries)d "
|
||||
"Versuchen hergestellt werden. Abbruch."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in "
|
||||
"%(retry_interval)d seconds."
|
||||
msgstr ""
|
||||
"Es kann keine erneute Verbindung zur primären mongodb hergestellt werden: "
|
||||
"%(errmsg)s. Erneuter Versuch in %(retry_interval)d Sekunden."
|
||||
|
||||
msgid "Unable to send sample over UDP"
|
||||
msgstr "Beispiel kann nicht über UDP gesendet werden"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Unerwartete Ausnahme beim Konvertieren von %(value)s in den erwarteten "
|
||||
"Datentyp %(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown discovery extension: %s"
|
||||
msgstr "Unbekannte Erkennungserweiterung: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown metadata type. Key (%s) will not be queryable."
|
||||
msgstr "Unbekannter Metadatentyp. Schlüssel (%s) wird nicht abfragbar sein."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Unbekannten Status %(stat)s erhalten für Loadbalancer %(id)s; Beispiel wird "
|
||||
"übersprungen"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample"
|
||||
msgstr ""
|
||||
"Unbekannten Status %(stat)s erhalten für Firewall %(id)s; Beispiel wird "
|
||||
"übersprungen"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Unbekannten Status %(stat)s erhalten für Listener %(id)s; Beispiel wird "
|
||||
"übersprungen"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on member %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Unbekannten Status %(stat)s erhalten für Mitglied %(id)s; Beispiel wird "
|
||||
"übersprungen"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Unbekannten Status %(stat)s erhalten für Pool %(id)s; Beispiel wird "
|
||||
"übersprungen"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Unbekannten Status %(stat)s erhalten für VIP %(id)s; Beispiel wird "
|
||||
"übersprungen"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Unbekannten Status %(stat)s erhalten für VPN %(id)s; Beispiel wird "
|
||||
"übersprungen"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in VMware vSphere"
|
||||
msgstr "VM %s in VMware vSphere nicht gefunden"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in XenServer"
|
||||
msgstr "VM %s in XenServer nicht gefunden"
|
||||
|
||||
msgid "Wrong sensor type"
|
||||
msgstr "Falscher Sensortyp"
|
||||
|
||||
msgid "XenAPI not installed"
|
||||
msgstr "XenAPI nicht installiert"
|
||||
|
||||
#, python-format
|
||||
msgid "YAML error reading Definitions file %(file)s"
|
||||
msgstr "YAML-Fehler beim Lesen von Definitionsdatei %(file)s."
|
||||
|
||||
msgid "alarms URLs is unavailable when Aodh is disabled or unavailable."
|
||||
msgstr ""
|
||||
"Alarm-URLs sind nicht verfügbar, wenn Aodh inaktiviert oder nicht verfügbar "
|
||||
"ist."
|
||||
|
||||
#, python-format
|
||||
msgid "could not get CPU time for %(id)s: %(e)s"
|
||||
msgstr "Abruf von CPU-Zeit nicht möglich für %(id)s: %(e)s"
|
||||
|
||||
msgid "direct option cannot be true when Gnocchi is enabled."
|
||||
msgstr ""
|
||||
"Wenn Gnocci aktiviert ist, kann die Option 'direct' nicht den Wert 'true' "
|
||||
"haben. "
|
||||
|
||||
#, python-format
|
||||
msgid "dropping out of time order sample: %s"
|
||||
msgstr ""
|
||||
"Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "dropping sample with no predecessor: %s"
|
||||
msgstr "Beispiel ohne Vorgänger wird gelöscht: %s"
|
||||
|
||||
msgid "ipmitool output length mismatch"
|
||||
msgstr "Abweichung bei ipmitool-Ausgabelänge"
|
||||
|
||||
msgid "max_bytes and backup_count should be numbers."
|
||||
msgstr "max_bytes und backup_count sollten Zahlen sein."
|
||||
|
||||
msgid "parse IPMI sensor data failed,No data retrieved from given input"
|
||||
msgstr ""
|
||||
"Analyse von IPMI-Sensordaten fehlgeschlagen, keine Daten von angegebener "
|
||||
"Eingabe abgerufen"
|
||||
|
||||
msgid "parse IPMI sensor data failed,unknown sensor type"
|
||||
msgstr "Analyse von IPMI-Sensordaten fehlgeschlagen, unbekannter Sensortyp"
|
||||
|
||||
msgid "running ipmitool failure"
|
||||
msgstr "Fehler beim Ausführen von ipmitool"
|
|
@ -1,132 +0,0 @@
|
|||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# Eugènia Torrella <tester03@es.ibm.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-03-18 11:52+0000\n"
|
||||
"Last-Translator: Eugènia Torrella <tester03@es.ibm.com>\n"
|
||||
"Language-Team: Spanish\n"
|
||||
"Language: es\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot load inspector %(name)s: %(err)s"
|
||||
msgstr "No se ha podido cargar el inspector %(name)s: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Resident Memory Usage for %(id)s: %(e)s"
|
||||
msgstr "No se ha podido obtener el uso de memoria residente para %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dispatcher failed to handle the %s, requeue it."
|
||||
msgstr "El asignador no ha podido manejar el %s, vuelva a ponerlo en la cola."
|
||||
|
||||
msgid "Error connecting to coordination backend."
|
||||
msgstr "Error de conexión con el servidor coordinador."
|
||||
|
||||
msgid "Error getting group membership info from coordination backend."
|
||||
msgstr ""
|
||||
"Error al obtener información de pertenencia a grupos del servidor "
|
||||
"coordinador."
|
||||
|
||||
#, python-format
|
||||
msgid "Error joining partitioning group %s, re-trying"
|
||||
msgstr "Error al unirse al grupo de partición %s, se está reintentando"
|
||||
|
||||
#, python-format
|
||||
msgid "Error processing event and it will be dropped: %s"
|
||||
msgstr "Se ha producido un error al procesar el suceso y se descartará: %s"
|
||||
|
||||
msgid "Error sending a heartbeat to coordination backend."
|
||||
msgstr "Error al enviar una señal de latido al servidor coordinador."
|
||||
|
||||
msgid "Fail to process a notification"
|
||||
msgstr "Error al procesar una notificación"
|
||||
|
||||
msgid "Fail to process notification"
|
||||
msgstr "No se ha podido procesar la notificación"
|
||||
|
||||
msgid "Failed to connect to Gnocchi."
|
||||
msgstr "No se ha podido conectar con Gnocchi."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to Kafka service: %s"
|
||||
msgstr "No se ha podido conectar con el servicio Kafka: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s"
|
||||
msgstr ""
|
||||
"No se ha podido establecer conexión con la base de datos con el propósito "
|
||||
"%(purpose)s. Vuelva a intentarlo más tarde: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s"
|
||||
msgstr ""
|
||||
"No se ha podido establecer conexión con la base de datos con el propósito "
|
||||
"%(purpose)s. Vuelva a intentarlo más tarde: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load resource due to error %s"
|
||||
msgstr "No se ha podido cargar el recurso debido a un error: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to record event: %s"
|
||||
msgstr "No se ha podido registrar el suceso: %s"
|
||||
|
||||
msgid "Failed to retry to send sample data with max_retry times"
|
||||
msgstr ""
|
||||
"No se ha podido volver a intentar enviar datos de ejemplo max_retry veces"
|
||||
|
||||
msgid ""
|
||||
"Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is "
|
||||
"not part of group and cannot take tasks"
|
||||
msgstr ""
|
||||
"ID de grupo: %{group_id}s, Miembros: %{members}s, Yo: %{me}s: El agente "
|
||||
"actual no forma parte del grupo y no puede coger tareas"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid type %s specified"
|
||||
msgstr "Se ha especificado un tipo no válido: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Missing field %s"
|
||||
msgstr "Falta el campo %s"
|
||||
|
||||
msgid "Passed resource dict must contain keys resource_id and resource_url."
|
||||
msgstr ""
|
||||
"El dicionario de recursos que se pase debe contener las claves resource_id y "
|
||||
"resource_url"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %(field)s should be a %(type)s"
|
||||
msgstr "El campo obligatorio %(field)s s debería ser un %(type)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "No se ha especificado el campo obligatorio %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Required fields %s not specified"
|
||||
msgstr "No se han especificado los campos obligatorios %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Skip invalid resource %s"
|
||||
msgstr "Omitir el recurso no válido %s"
|
||||
|
||||
msgid "Status Code: %{code}s. Failed todispatch event: %{event}s"
|
||||
msgstr ""
|
||||
"Código de estado: %{code}s. No se ha podido asignar el suceso: %{event}s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unrecognized type value %s"
|
||||
msgstr "Valor de tipo no reconocido %s"
|
||||
|
||||
#, python-format
|
||||
msgid "inspector call failed for %(ident)s host %(host)s: %(err)s"
|
||||
msgstr "Error en la llamada al inspector del host %(ident)s %(host)s: %(err)s"
|
|
@ -1,139 +0,0 @@
|
|||
# Eugènia Torrella <tester03@es.ibm.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev57\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-04-18 02:09+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-03-18 02:45+0000\n"
|
||||
"Last-Translator: Eugènia Torrella <tester03@es.ibm.com>\n"
|
||||
"Language-Team: Spanish\n"
|
||||
"Language: es\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%d events are removed from database"
|
||||
msgstr "Se han eliminado %d sucesos de la base de datos"
|
||||
|
||||
#, python-format
|
||||
msgid "%d samples removed from database"
|
||||
msgstr "Se han eliminado %d ejemplos de la base de datos"
|
||||
|
||||
msgid "Configuration:"
|
||||
msgstr "Configuración:"
|
||||
|
||||
#, python-format
|
||||
msgid "Connecting to %(db)s on %(nodelist)s"
|
||||
msgstr "Se está estableciendo conexión con %(db)s en %(nodelist)s"
|
||||
|
||||
msgid "Coordination backend started successfully."
|
||||
msgstr "El servidor coordinador se ha iniciado satisfactoriamente."
|
||||
|
||||
#, python-format
|
||||
msgid "Definitions: %s"
|
||||
msgstr "Definiciones: %s"
|
||||
|
||||
msgid "Detected change in pipeline configuration."
|
||||
msgstr "Se ha detectado un cambio en la configuración de la interconexión."
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping event data with TTL %d"
|
||||
msgstr "Descartando datos de sucesos con TTL %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping metering data with TTL %d"
|
||||
msgstr "Descartando datos de calibración con TTL %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Duplicate event detected, skipping it: %s"
|
||||
msgstr "Se ha detectado un suceso duplicado, se omitirá: %s"
|
||||
|
||||
msgid "Expired residual resource and meter definition data"
|
||||
msgstr "El recurso residual y los datos de definición del medidor han caducado"
|
||||
|
||||
#, python-format
|
||||
msgid "Index %s will be recreate."
|
||||
msgstr "Se volverá a crear el índice %s."
|
||||
|
||||
#, python-format
|
||||
msgid "Joined partitioning group %s"
|
||||
msgstr "Se ha unido al grupo de partición %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Left partitioning group %s"
|
||||
msgstr "Ha dejado el grupo de partición %s"
|
||||
|
||||
#, python-format
|
||||
msgid "No limit value provided, result set will be limited to %(limit)d."
|
||||
msgstr ""
|
||||
"No se ha proporcionado ningún valor límite, el conjunto de resultados estará "
|
||||
"limitado a %(limit)d."
|
||||
|
||||
msgid "Nothing to clean, database event time to live is disabled"
|
||||
msgstr ""
|
||||
"No hay nada que limpiar, el tiempo de vida de sucesos de base de datos está "
|
||||
"inhabilitado"
|
||||
|
||||
msgid "Nothing to clean, database metering time to live is disabled"
|
||||
msgstr ""
|
||||
"No hay nada que limpiar, el tiempo de vida de medición de base de datos está "
|
||||
"inhabilitado"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter "
|
||||
"%(param)s"
|
||||
msgstr ""
|
||||
"Interconexión %(pipeline)s: Configure la instancia de transformador %(name)s "
|
||||
"con el parámetro %(param)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline config: %s"
|
||||
msgstr "Configuración de interconexión: %s"
|
||||
|
||||
msgid "Pipeline configuration file has been updated."
|
||||
msgstr "Se ha actualizado el archivo de configuración de la interconexión."
|
||||
|
||||
#, python-format
|
||||
msgid "Polling pollster %(poll)s in the context of %(src)s"
|
||||
msgstr "Sondeando pollster %(poll)s en el contexto de %(src)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy set to %s"
|
||||
msgstr "Política de publicación establecida en %s"
|
||||
|
||||
msgid "Reconfiguring polling tasks."
|
||||
msgstr "Reconfigurando las tareas de sondeo."
|
||||
|
||||
msgid "Reloading notification agent and listeners."
|
||||
msgstr "Recargando la notificación, el agente y los escuchas."
|
||||
|
||||
#, python-format
|
||||
msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle"
|
||||
msgstr ""
|
||||
"Omitir pollster %(name)s, ningún recurso de %(p_context)s ha encontrado "
|
||||
"este ciclo"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting server in PID %s"
|
||||
msgstr "Iniciando servidor en PID %s"
|
||||
|
||||
msgid "detected decoupled pipeline config format"
|
||||
msgstr ""
|
||||
"se ha detectado un formato de configuración de interconexión desacoplado"
|
||||
|
||||
#, python-format
|
||||
msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s"
|
||||
msgstr ""
|
||||
"datos de medición %(counter_name)s para %(resource_id)s: %(counter_volume)s"
|
||||
|
||||
#, python-format
|
||||
msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s"
|
||||
msgstr "sirviendo en 0.0.0.0:%(sport)s, vista en http://127.0.0.1:%(vport)s"
|
||||
|
||||
#, python-format
|
||||
msgid "serving on http://%(host)s:%(port)s"
|
||||
msgstr "sirviendo en http://%(host)s:%(port)s"
|
|
@ -1,511 +0,0 @@
|
|||
# Translations template for ceilometer.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the ceilometer project.
|
||||
#
|
||||
# Translators:
|
||||
# Rafael Rivero <rafael@cloudscaling.com>, 2015
|
||||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-03-24 11:03+0000\n"
|
||||
"Last-Translator: Eugènia Torrella <tester03@es.ibm.com>\n"
|
||||
"Language: es\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Language-Team: Spanish\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s No encontrado"
|
||||
|
||||
#, python-format
|
||||
msgid "Arithmetic transformer must use at least one meter in expression '%s'"
|
||||
msgstr ""
|
||||
"El transformador aritmético debe utilizar al menos un medidor en la "
|
||||
"expresión '%s'"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr ""
|
||||
"No se puede crear la tabla %(table_name)s, ya existe. Se ignorará el error."
|
||||
|
||||
#, python-format
|
||||
msgid "Continue after error from %(name)s: %(error)s"
|
||||
msgstr "Continuar después de error desde %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect slave host: %s "
|
||||
msgstr "No se ha podido conectar con el host esclavo: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect to XenAPI: %s"
|
||||
msgstr "No se puede conectar a XenAPI: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get CPU Util for %(id)s: %(e)s"
|
||||
msgstr "No se ha podido obtener CPU Util para %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Memory Usage for %(id)s: %(e)s"
|
||||
msgstr "No se ha podido obtener el uso de memoria para %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get VM %s CPU Utilization"
|
||||
msgstr "No se puede obtener la utilización de CPU de VM %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't obtain IP address of instance %s"
|
||||
msgstr "No se ha podido obtener la dirección IP de la instancia %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping Notification %(type)s (uuid:%(msgid)s)"
|
||||
msgstr "Descartando la notificación %(type)s (uuid:%(msgid)s)"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Error from libvirt while looking up instance <name=%(name)s, id=%(id)s>: "
|
||||
"[Error Code %(error_code)s] %(ex)s"
|
||||
msgstr ""
|
||||
"Error de libvirt al buscar la instancia <name=%(name)s, id=%(id)s>: [Código "
|
||||
"de error %(error_code)s] %(ex)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Error parsing HTTP response: %s"
|
||||
msgstr "Error analizándo respuesta HTTP: %s."
|
||||
|
||||
msgid "Error stopping pollster."
|
||||
msgstr "Error al detener el pollster."
|
||||
|
||||
msgid "Event"
|
||||
msgstr "Suceso"
|
||||
|
||||
msgid "Expression evaluated to a NaN value!"
|
||||
msgstr "La expresión se ha evaluado en un valor NaN."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to import extension for %(name)s: %(error)s"
|
||||
msgstr "No se ha podido importar la extensión para %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect data of instance <name=%(name)s, id=%(id)s>, domain state "
|
||||
"is SHUTOFF."
|
||||
msgstr ""
|
||||
"No se han podido analizar los datos de la instancia <name=%(name)s, id="
|
||||
"%(id)s>, el estado del dominio es SHUTOFF."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of %(instance_uuid)s, can not get info from "
|
||||
"libvirt: %(error)s"
|
||||
msgstr ""
|
||||
"No se ha podido analizar el uso de memoria de %(instance_uuid)s, no se puede "
|
||||
"obtener información de libvirt: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of instance <name=%(name)s, id=%(id)s>, can "
|
||||
"not get info from libvirt."
|
||||
msgstr ""
|
||||
"No se ha podido analizar el uso de memoria de la instancia <name=%(name)s, "
|
||||
"id=%(id)s>, no se puede obtener información de libvirt."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any notification handlers for %s"
|
||||
msgstr "No se ha podido cargar ningún manejador de notificación para %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to parse the timestamp value %s"
|
||||
msgstr "No se ha podido analizar el valor de indicación de fecha y hora %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, dropping them"
|
||||
msgstr "No se han podido publicar los puntos de datos %d, descartándolos"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, queue them"
|
||||
msgstr "No se han podido publicar los puntos de datos %d, póngalos en cola"
|
||||
|
||||
#, python-format
|
||||
msgid "Filter expression not valid: %s"
|
||||
msgstr "Expresión de filtro no válida: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s"
|
||||
msgstr "Ignorando la instancia %(name)s (%(instance_id)s) : %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s: %(error)s"
|
||||
msgstr "Ignorando la instancia %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring loadbalancer %(loadbalancer_id)s"
|
||||
msgstr "Se ignorará el equilibrador de carga %(loadbalancer_id)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring pool %(pool_id)s"
|
||||
msgstr "Se ignorará la agrupación %(pool_id)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: "
|
||||
"%(column)s."
|
||||
msgstr ""
|
||||
"Sintaxis de YAML no válida en archivo de definiciones %(file)s en la línea: "
|
||||
"%(line)s, columna: %(column)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid period %(period)s: %(err)s"
|
||||
msgstr "Periodo no válido %(period)s: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid trait type '%(type)s' for trait %(trait)s"
|
||||
msgstr "Tipo de rasgo no válido '%(type)s' para el rasgo %(trait)s"
|
||||
|
||||
msgid "Limit must be positive"
|
||||
msgstr "El límite debe ser positivo"
|
||||
|
||||
#, python-format
|
||||
msgid "More than one event with id %s returned from storage driver"
|
||||
msgstr ""
|
||||
"Se ha devuelto más de un suceso con el %s del controlador de almacenamiento"
|
||||
|
||||
#, python-format
|
||||
msgid "Multiple VM %s found in XenServer"
|
||||
msgstr "Se han encontrado varias VM %s en XenServer"
|
||||
|
||||
msgid "Must specify connection_url, and connection_password to use"
|
||||
msgstr ""
|
||||
"Debe especificar el url_conexión y la contraseña_conexión para utilizar"
|
||||
|
||||
#, python-format
|
||||
msgid "No plugin named %(plugin)s available for %(name)s"
|
||||
msgstr "No hay ningún plug-in denominado %(plugin)s disponible para %(name)s"
|
||||
|
||||
msgid "Node Manager init failed"
|
||||
msgstr "El inicio de Gestor de nodos ha fallado"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "No está autorizado para acceder a %(aspect)s %(id)s"
|
||||
|
||||
#, python-format
|
||||
msgid "OpenDaylitght API returned %(status)s %(reason)s"
|
||||
msgstr "La API OpenDaylitght ha devuelto %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Opencontrail API returned %(status)s %(reason)s"
|
||||
msgstr "La API Opencontrail ha devuelto %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only equality operator is available "
|
||||
"for field %(field)s"
|
||||
msgstr ""
|
||||
"El operador %(operator)s no se admite. Solo hay disponible el operador de "
|
||||
"igualdad para el campo %(field)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"El operador %(operator)s no está admitido. Los operadores admitidos son: "
|
||||
"%(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Order-by expression not valid: %s"
|
||||
msgstr "Expresión de ordenar por no válida: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s"
|
||||
msgstr ""
|
||||
"Error de análisis en especificación de JSONPath '%(jsonpath)s' para "
|
||||
"%(name)s: %(err)s"
|
||||
|
||||
msgid "Period must be positive."
|
||||
msgstr "El período debe ser positivo."
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s"
|
||||
msgstr ""
|
||||
"Interconexión %(pipeline)s: %(status)s tras el error de la aplicación de "
|
||||
"publicación %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s"
|
||||
msgstr ""
|
||||
"Interconexión %(pipeline)s: Continúe tras el error de la aplicación de "
|
||||
"publicación %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s"
|
||||
msgstr "Interconexión %(pipeline)s: Error al vaciar el transformador %(trans)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for "
|
||||
"%(smp)s"
|
||||
msgstr ""
|
||||
"Interconexión %(pipeline)s: Salga tras error del transformador %(trans)s "
|
||||
"para %(smp)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Plugin specified, but no plugin name supplied for %s"
|
||||
msgstr ""
|
||||
"Se ha especificado un plug-in, pero no se ha proporcionado ningún nombre de "
|
||||
"plug-in para %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(mtr)s sensor failed for %(cnt)s times!"
|
||||
msgstr "¡El sensor de sondeo %(mtr)s ha fallado %(cnt)s veces!"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(name)s failed for %(cnt)s times!"
|
||||
msgstr "El sondeo %(name)s ha fallado %(cnt)s veces."
|
||||
|
||||
#, python-format
|
||||
msgid "Pollster for %s is disabled!"
|
||||
msgstr "¡El Pollster para %s está inhabilitado!"
|
||||
|
||||
#, python-format
|
||||
msgid "Prevent pollster %(name)s for polling source %(source)s anymore!"
|
||||
msgstr "¡Impedir pollster %(name)s para el origen de sondeo %(source)s ahora!"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Publisher max local_queue length is exceeded, dropping %d oldest samples"
|
||||
msgstr ""
|
||||
"Se supera la longitud máxima de aplicación de publicación local_queue, "
|
||||
"descartando los ejemplos más antiguos %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy is unknown (%s) force to default"
|
||||
msgstr ""
|
||||
"No se conoce la política de publicación (%s) forzar para tomar el valor "
|
||||
"predeterminado"
|
||||
|
||||
#, python-format
|
||||
msgid "RGW AdminOps API returned %(status)s %(reason)s"
|
||||
msgstr "La API de RGW AdminOps ha devuelto %(status)s %(reason)s"
|
||||
|
||||
msgid "Request failed to connect to OpenDaylight with NorthBound REST API"
|
||||
msgstr ""
|
||||
"La solicitud no ha podido conectar con OpenDaylight con la API REST "
|
||||
"NorthBound"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "Campo necesario %s no especificado"
|
||||
|
||||
msgid "Resource"
|
||||
msgstr "Recurso"
|
||||
|
||||
msgid "Sample"
|
||||
msgstr "Muestra"
|
||||
|
||||
msgid "Samples should be included in request body"
|
||||
msgstr "Los ejemplos se deben incluir en el cuerpo de la solicitud"
|
||||
|
||||
#, python-format
|
||||
msgid "Skip loading extension for %s"
|
||||
msgstr "Omitir la extensión de carga para %s"
|
||||
|
||||
#, python-format
|
||||
msgid "String %s is not a valid isotime"
|
||||
msgstr "La serie %s no es una hora iso válida"
|
||||
|
||||
msgid ""
|
||||
"The Yaml file that defines mapping between samples and gnocchi resources/"
|
||||
"metrics"
|
||||
msgstr ""
|
||||
"El archivo Yaml que define la correlación entre los ejemplos y recursos/"
|
||||
"métricas gnocchi"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"El tipo de datos %(type)s no es compatible. La lista de tipo de datos "
|
||||
"admitido es: %(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "The field 'fields' is required for %s"
|
||||
msgstr "El campo 'campos' es obligatorio para %s"
|
||||
|
||||
msgid "The path for the file publisher is required"
|
||||
msgstr ""
|
||||
"La vía de acceso para la aplicación de publicación de archivos es necesaria"
|
||||
|
||||
#, python-format
|
||||
msgid "UDP: Cannot decode data sent by %s"
|
||||
msgstr "UDP: no se pueden decodificar los datos enviados por %s"
|
||||
|
||||
msgid "UDP: Unable to store meter"
|
||||
msgstr "UDP: no se puede almacenar el medidor"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr "No se ha podido conectar con el servidor de base de datos: %(errmsg)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"No se ha podido convertir el valor %(value)s al tipo de datos esperado "
|
||||
"%(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to discover resources: %s"
|
||||
msgstr "No se pueden descubrir recursos: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to evaluate expression %(expr)s: %(exc)s"
|
||||
msgstr "No se puede evaluar la expresión %(expr)s: %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load publisher %s"
|
||||
msgstr "No se puede cargar la aplicación de publicación %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load the hypervisor inspector: %s"
|
||||
msgstr "No se puede cargar el inspector de hipervisor: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving "
|
||||
"up."
|
||||
msgstr ""
|
||||
"No se ha podido volver a conectar con la mongodb primaria después de "
|
||||
"%(retries)d intentos. Se va a abandonar."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in "
|
||||
"%(retry_interval)d seconds."
|
||||
msgstr ""
|
||||
"No se ha podido volver a conectar con la mongodb primaria: %(errmsg)s. Se "
|
||||
"volverá a intentar en %(retry_interval)d segundos."
|
||||
|
||||
msgid "Unable to send sample over UDP"
|
||||
msgstr "No se ha podido enviar una muestra sobre UDP"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Excepción inesperada al convertir %(value)s al tipo de dato esperado "
|
||||
"%(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown discovery extension: %s"
|
||||
msgstr "Extensión de descubrimiento desconocida: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown metadata type. Key (%s) will not be queryable."
|
||||
msgstr "Tipo de metadatos desconocido. La clave (%s) no se podrá consultar."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Se ha recibido un estado desconocido %(stat)s en el equilibrador de carga "
|
||||
"%(id)s, se omitirá el ejemplo"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample"
|
||||
msgstr ""
|
||||
"Se ha recibido un estado desconocido %(stat)s en fw %(id)s, se omitirá el "
|
||||
"ejemplo"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Se ha recibido un estado desconocido %(stat)s en el escucha %(id)s, se "
|
||||
"omitirá el ejemplo"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on member %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Se ha recibido un estado desconocido %(stat)s en el miembro %(id)s, se "
|
||||
"omitirá el ejemplo"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Se ha recibido un estado desconocido %(stat)s en la agrupación %(id)s, se "
|
||||
"omitirá el ejemplo"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Se ha recibido un estado desconocido %(stat)s en vip %(id)s, se omitirá el "
|
||||
"ejemplo"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Se ha recibido un estado desconocido %(stat)s en vpn %(id)s, se omitirá el "
|
||||
"ejemplo"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in VMware vSphere"
|
||||
msgstr "VM %s no se ha encontrado en VMware vSphere"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in XenServer"
|
||||
msgstr "No se han encontrado VM %s en XenServer"
|
||||
|
||||
msgid "Wrong sensor type"
|
||||
msgstr "Tipo de sensor incorrecto"
|
||||
|
||||
msgid "XenAPI not installed"
|
||||
msgstr "XenAPI no está instalado"
|
||||
|
||||
#, python-format
|
||||
msgid "YAML error reading Definitions file %(file)s"
|
||||
msgstr "Error de YAML al leer el archivo de definiciones %(file)s"
|
||||
|
||||
#, python-format
|
||||
msgid "could not get CPU time for %(id)s: %(e)s"
|
||||
msgstr "no se ha podido obtener tiempo de CPU para %(id)s: %(e)s"
|
||||
|
||||
msgid "direct option cannot be true when Gnocchi is enabled."
|
||||
msgstr ""
|
||||
"la opción directo no puede estar definida como true cuando Gnocchi esté "
|
||||
"habilitado."
|
||||
|
||||
#, python-format
|
||||
msgid "dropping out of time order sample: %s"
|
||||
msgstr "saliendo del ejemplo de orden de tiempo: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "dropping sample with no predecessor: %s"
|
||||
msgstr "eliminando la muestra sin predecesor: %s"
|
||||
|
||||
msgid "ipmitool output length mismatch"
|
||||
msgstr "la longitud de salida de ipmitool no coincide"
|
||||
|
||||
msgid "max_bytes and backup_count should be numbers."
|
||||
msgstr "max_bytes y backup_count deben ser números."
|
||||
|
||||
msgid "parse IPMI sensor data failed,No data retrieved from given input"
|
||||
msgstr ""
|
||||
"ha fallado el análisis de datos de sensor IPMI,no se ha recuperado ningún "
|
||||
"dato de la entrada"
|
||||
|
||||
msgid "parse IPMI sensor data failed,unknown sensor type"
|
||||
msgstr ""
|
||||
"ha fallado el análisis de datos de sensor IPMI,tipo de sensor desconocido"
|
||||
|
||||
msgid "running ipmitool failure"
|
||||
msgstr "fallo de ejecución de ipmitool"
|
|
@ -1,516 +0,0 @@
|
|||
# Translations template for ceilometer.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the ceilometer project.
|
||||
#
|
||||
# Translators:
|
||||
# Corinne Verheyde <cverheyd@hotmail.com>, 2013
|
||||
# CHABERT Loic <chabert.loic.74@gmail.com>, 2013
|
||||
# Christophe kryskool <christophe.chauvet@gmail.com>, 2013
|
||||
# Corinne Verheyde <cverheyd@hotmail.com>, 2013-2014
|
||||
# EVEILLARD <stephane.eveillard@gmail.com>, 2013-2014
|
||||
# Francesco Vollero <fvollero@redhat.com>, 2015
|
||||
# Jonathan Dupart <jonathan+transifex@dupart.org>, 2014
|
||||
# CHABERT Loic <chabert.loic.74@gmail.com>, 2013
|
||||
# Maxime COQUEREL <max.coquerel@gmail.com>, 2014
|
||||
# Nick Barcet <nicolas@barcet.com>, 2013
|
||||
# Nick Barcet <nicolas@barcet.com>, 2013
|
||||
# Andrew Melim <nokostya.translation@gmail.com>, 2014
|
||||
# Patrice LACHANCE <patlachance@gmail.com>, 2013
|
||||
# Patrice LACHANCE <patlachance@gmail.com>, 2013
|
||||
# Rémi Le Trocquer <remi.letrocquer@orange.com>, 2014
|
||||
# EVEILLARD <stephane.eveillard@gmail.com>, 2013
|
||||
# Corinne Verheyde <cverheyd@hotmail.com>, 2013
|
||||
# Corinne Verheyde <cverheyd@hotmail.com>, 2013
|
||||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# Angelique Pillal <pillal@fr.ibm.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-03-29 08:24+0000\n"
|
||||
"Last-Translator: Angelique Pillal <pillal@fr.ibm.com>\n"
|
||||
"Language: fr\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Language-Team: French\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s n'a pas été trouvé"
|
||||
|
||||
#, python-format
|
||||
msgid "Arithmetic transformer must use at least one meter in expression '%s'"
|
||||
msgstr ""
|
||||
"Le transformateur arithmétique doit utiliser au moins un mètre dans "
|
||||
"l'expression '%s'"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr ""
|
||||
"Impossible de créer la table %(table_name)s car elle existe déjà. Erreur "
|
||||
"ignorée"
|
||||
|
||||
#, python-format
|
||||
msgid "Continue after error from %(name)s: %(error)s"
|
||||
msgstr "Continue après l'erreur %(name)s: %(error)s "
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect slave host: %s "
|
||||
msgstr "Impossible de se connecter à l'hôte slave: %s "
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect to XenAPI: %s"
|
||||
msgstr "Connexion impossible XenAPI: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get CPU Util for %(id)s: %(e)s"
|
||||
msgstr "Ne peut pas recevoir l'utilisation CPU pour %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Memory Usage for %(id)s: %(e)s"
|
||||
msgstr ""
|
||||
"Impossible de récupérer l'utilisation de la mémoire pour %(id)s : %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get VM %s CPU Utilization"
|
||||
msgstr "Impossible d'obtenir l'utilisation CPU de la VM %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't obtain IP address of instance %s"
|
||||
msgstr "Impossible d'obtenir l'adresse IP de l'instance %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping Notification %(type)s (uuid:%(msgid)s)"
|
||||
msgstr "Suppression du %(type)s de notification (uuid:%(msgid)s)"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Error from libvirt while looking up instance <name=%(name)s, id=%(id)s>: "
|
||||
"[Error Code %(error_code)s] %(ex)s"
|
||||
msgstr ""
|
||||
"Erreur de libvirt lors de la recherche de l'instance <name=%(name)s, id="
|
||||
"%(id)s> : [Code d'erreur %(error_code)s] %(ex)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Error parsing HTTP response: %s"
|
||||
msgstr "Erreur lors de l'analyse syntaxique de la réponse: %s"
|
||||
|
||||
msgid "Error stopping pollster."
|
||||
msgstr "Erreur lors de l'arrêt du sondeur."
|
||||
|
||||
msgid "Event"
|
||||
msgstr "Événement"
|
||||
|
||||
msgid "Expression evaluated to a NaN value!"
|
||||
msgstr "Expression évaluée avec une valeur not-a-number !"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to import extension for %(name)s: %(error)s"
|
||||
msgstr "Echec de l'importation de l'extension pour %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect data of instance <name=%(name)s, id=%(id)s>, domain state "
|
||||
"is SHUTOFF."
|
||||
msgstr ""
|
||||
"Echec de l'inspection des données de l'instance <name=%(name)s, id=%(id)s>. "
|
||||
"Le domaine est à l'état SHUTOFF (INTERRUPTION)."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of %(instance_uuid)s, can not get info from "
|
||||
"libvirt: %(error)s"
|
||||
msgstr ""
|
||||
"Echec de l'inspection de l'utilisation de la mémoire de %(instance_uuid)s. "
|
||||
"Impossible d'obtenir des informations de libvirt : %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of instance <name=%(name)s, id=%(id)s>, can "
|
||||
"not get info from libvirt."
|
||||
msgstr ""
|
||||
"Echec de l'inspection de l'utilisation de la mémoire de l'instance <name="
|
||||
"%(name)s, id=%(id)s>. Impossible d'obtenir des informations de libvirt."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any notification handlers for %s"
|
||||
msgstr "Échec du chargement de tous les gestionnaires de notification pour %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to parse the timestamp value %s"
|
||||
msgstr "Echec de l'analyse syntaxique de la valeur d'horodatage %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, dropping them"
|
||||
msgstr "Echec de la publication des points de données %d. Suppression en cours"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, queue them"
|
||||
msgstr ""
|
||||
"Echec de la publication des points de données %d. Mettez-les en file "
|
||||
"d'attente"
|
||||
|
||||
#, python-format
|
||||
msgid "Filter expression not valid: %s"
|
||||
msgstr "Filtre de l'expression n'est pas valide: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s"
|
||||
msgstr "L'instance %(name)s est ignorée (%(instance_id)s) : %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s: %(error)s"
|
||||
msgstr "instance %(name)s: %(error)s ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring loadbalancer %(loadbalancer_id)s"
|
||||
msgstr "Loadbalancer %(loadbalancer_id)s ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring pool %(pool_id)s"
|
||||
msgstr "Pool %(pool_id)s ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: "
|
||||
"%(column)s."
|
||||
msgstr ""
|
||||
"Syntaxe YAML non valide dans le fichier de définitions %(file)s à la ligne : "
|
||||
"%(line)s, colonne : %(column)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid period %(period)s: %(err)s"
|
||||
msgstr "Période %(period)s non valide : %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid trait type '%(type)s' for trait %(trait)s"
|
||||
msgstr "Type de trait non valide '%(type)s' pour le trait %(trait)s"
|
||||
|
||||
msgid "Limit must be positive"
|
||||
msgstr "La limite doit être positive"
|
||||
|
||||
#, python-format
|
||||
msgid "More than one event with id %s returned from storage driver"
|
||||
msgstr ""
|
||||
"Plus d'un événement avec l'identificateur %s a été renvoyé à partir du "
|
||||
"pilote de stockage"
|
||||
|
||||
#, python-format
|
||||
msgid "Multiple VM %s found in XenServer"
|
||||
msgstr "Plusieurs machines virtuelles %s trouvées dans XenServer"
|
||||
|
||||
msgid "Must specify connection_url, and connection_password to use"
|
||||
msgstr "Il faut indiquer connection_url et connection_password pour utiliser"
|
||||
|
||||
#, python-format
|
||||
msgid "No plugin named %(plugin)s available for %(name)s"
|
||||
msgstr "Aucun plugin nommé %(plugin)s n'est disponible pour %(name)s"
|
||||
|
||||
msgid "Node Manager init failed"
|
||||
msgstr "Echec de l'initialisation du gestionnaire de noeud"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "Non autorisé à accéder %(aspect)s %(id)s "
|
||||
|
||||
#, python-format
|
||||
msgid "OpenDaylitght API returned %(status)s %(reason)s"
|
||||
msgstr "L'API OpenDaylight a renvoyé %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Opencontrail API returned %(status)s %(reason)s"
|
||||
msgstr "L'API Opencontrail a renvoyé %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only equality operator is available "
|
||||
"for field %(field)s"
|
||||
msgstr ""
|
||||
"Opérateur %(operator)s non supporté. Seul l'opérateur égalité est disponible "
|
||||
"pour le champ %(field)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"L'opérateur %(operator)s n'est pas supporté. Les opérateurs supportés sont: "
|
||||
"%(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Order-by expression not valid: %s"
|
||||
msgstr "L'expression de tri n'est pas valide : %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s"
|
||||
msgstr ""
|
||||
"Erreur d'analyse dans la spécification JSONPath '%(jsonpath)s' pour "
|
||||
"%(name)s : %(err)s"
|
||||
|
||||
msgid "Period must be positive."
|
||||
msgstr "La période doit être positive."
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s"
|
||||
msgstr ""
|
||||
"Pipeline %(pipeline)s : statut %(status)s après erreur du diffuseur %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s"
|
||||
msgstr "Pipeline %(pipeline)s: Reprise après une erreur de l'éditeur %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s"
|
||||
msgstr "Pipeline %(pipeline)s: Erreur à la purge du transformateur %(trans)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for "
|
||||
"%(smp)s"
|
||||
msgstr ""
|
||||
"Pipeline %(pipeline)s: Sortie après erreur du transformateur %(trans)s pour "
|
||||
"%(smp)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Plugin specified, but no plugin name supplied for %s"
|
||||
msgstr "Plugin spécifié, mais aucun nom de plugin n'est fourni pour %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(mtr)s sensor failed for %(cnt)s times!"
|
||||
msgstr "L'interrogation du capteur %(mtr)s a échoué %(cnt)s fois !"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(name)s failed for %(cnt)s times!"
|
||||
msgstr "Sondage de %(name)s %(cnt)s fois en échec!"
|
||||
|
||||
#, python-format
|
||||
msgid "Pollster for %s is disabled!"
|
||||
msgstr "Le pollster pour %s est désactivé !"
|
||||
|
||||
#, python-format
|
||||
msgid "Prevent pollster %(name)s for polling source %(source)s anymore!"
|
||||
msgstr "Empêcher le pollster %(name)s d'interroger la source %(source)s !"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Publisher max local_queue length is exceeded, dropping %d oldest samples"
|
||||
msgstr ""
|
||||
"La longueur maximale de local_queue du diffuseur est dépassée, suppression "
|
||||
"des %d échantillons les plus anciens"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy is unknown (%s) force to default"
|
||||
msgstr "La politique de publication est inconnue (%s) forcé le défaut"
|
||||
|
||||
#, python-format
|
||||
msgid "RGW AdminOps API returned %(status)s %(reason)s"
|
||||
msgstr "L'API AdminOps RGW a renvoyé %(status)s %(reason)s"
|
||||
|
||||
msgid "Request failed to connect to OpenDaylight with NorthBound REST API"
|
||||
msgstr ""
|
||||
"La demande n'a pas réussi à se connecter à OpenDaylight avec l'API REST "
|
||||
"NorthBound"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "Champ requis %s non spécifiée"
|
||||
|
||||
msgid "Resource"
|
||||
msgstr "Ressource"
|
||||
|
||||
msgid "Sample"
|
||||
msgstr "Echantillon"
|
||||
|
||||
msgid "Samples should be included in request body"
|
||||
msgstr "Des exemples doivent être inclus dans le corps de demande"
|
||||
|
||||
#, python-format
|
||||
msgid "Skip loading extension for %s"
|
||||
msgstr "Passer le chargement de l'extension pour %s"
|
||||
|
||||
#, python-format
|
||||
msgid "String %s is not a valid isotime"
|
||||
msgstr "La chaine de caractère %s n'est pas valide isotime"
|
||||
|
||||
msgid ""
|
||||
"The Yaml file that defines mapping between samples and gnocchi resources/"
|
||||
"metrics"
|
||||
msgstr ""
|
||||
"Fichier Yaml qui définit le mappage entre les exemples et les ressources "
|
||||
"gnocchi /les métriques"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"Le type de données %(type)s n'est pas supporté. Les types de données "
|
||||
"supportés sont: %(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "The field 'fields' is required for %s"
|
||||
msgstr "Le champ 'fields' est requis pour %s"
|
||||
|
||||
msgid "The path for the file publisher is required"
|
||||
msgstr "Le chemin du éditeur de fichier est obligatoire "
|
||||
|
||||
#, python-format
|
||||
msgid "UDP: Cannot decode data sent by %s"
|
||||
msgstr "UDP: Impossible de décoder les données envoyées par %s"
|
||||
|
||||
msgid "UDP: Unable to store meter"
|
||||
msgstr "UDP: Impossible de stocker les mesures"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr "Impossible de se connecter au serveur de base de données : %(errmsg)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Impossible de convertir la valeur %(value)s vers le type de données attendu "
|
||||
"%(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to discover resources: %s"
|
||||
msgstr "Impossible de découvrir les ressources: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to evaluate expression %(expr)s: %(exc)s"
|
||||
msgstr "Impossible d'évaluer l'expression %(expr)s : %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load publisher %s"
|
||||
msgstr "Impossible de charger l'éditeur %s "
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load the hypervisor inspector: %s"
|
||||
msgstr "Impossible de télécharger l'inspecteur hypervisor: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving "
|
||||
"up."
|
||||
msgstr ""
|
||||
"Impossible de se reconnecter au serveur mongodb principal après %(retries)d "
|
||||
"tentatives. Abandon."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in "
|
||||
"%(retry_interval)d seconds."
|
||||
msgstr ""
|
||||
"Impossible de se reconnecter au serveur mongodb principal : %(errmsg)s. "
|
||||
"Nouvelle tentative dans %(retry_interval)d secondes."
|
||||
|
||||
msgid "Unable to send sample over UDP"
|
||||
msgstr "Impossible d'envoyer l'échantillon en UDP"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Exception inattendue lors de la conversion de %(value)s dans le type de "
|
||||
"donnée attendue %(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown discovery extension: %s"
|
||||
msgstr "Découverte d'une extension inconnue: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown metadata type. Key (%s) will not be queryable."
|
||||
msgstr "Type de métadonnées inconnu, la clé (%s) n'est pas requêtable"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Statut %(stat)s inconnu reçu sur le Load Balancer %(id)s, échantillon ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample"
|
||||
msgstr "Etat %(stat)s inconnu reçu sur le pare-feu %(id)s, échantillon ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample"
|
||||
msgstr "Etat %(stat)s inconnu reçu sur le listener %(id)s, échantillon ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on member %(id)s, skipping sample"
|
||||
msgstr "Etat %(stat)s inconnu reçu sur le membre %(id)s, échantillon ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample"
|
||||
msgstr "Etat %(stat)s inconnu reçu sur le pool %(id)s, échantillon ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Etat %(stat)s inconnu reçu sur l'IP virtuelle %(id)s, échantillon ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample"
|
||||
msgstr "Etat %(stat)s inconnu reçu sur le vpn %(id)s, échantillon ignoré"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in VMware vSphere"
|
||||
msgstr "La machine virtuelle %s est introuvable dans VMware vSphere"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in XenServer"
|
||||
msgstr "VM %s non trouvé dans XenServer"
|
||||
|
||||
msgid "Wrong sensor type"
|
||||
msgstr "Type de détecteur incorrect"
|
||||
|
||||
msgid "XenAPI not installed"
|
||||
msgstr "XenAPI n'est pas installé"
|
||||
|
||||
#, python-format
|
||||
msgid "YAML error reading Definitions file %(file)s"
|
||||
msgstr "Erreur YAML lors de la lecture du fichier de définitions %(file)s"
|
||||
|
||||
msgid "alarms URLs is unavailable when Aodh is disabled or unavailable."
|
||||
msgstr ""
|
||||
"Les URL d'alarmes ne sont pas disponibles lorsque Aodh est désactivé ou non "
|
||||
"disponible."
|
||||
|
||||
#, python-format
|
||||
msgid "could not get CPU time for %(id)s: %(e)s"
|
||||
msgstr "impossible d'obtenir le temps UC pour %(id)s : %(e)s"
|
||||
|
||||
msgid "direct option cannot be true when Gnocchi is enabled."
|
||||
msgstr "L'option directe ne peut pas être à vrai si Gnocchi est activé."
|
||||
|
||||
#, python-format
|
||||
msgid "dropping out of time order sample: %s"
|
||||
msgstr "suppression de l'exemple de classement dans le temps : %s"
|
||||
|
||||
#, python-format
|
||||
msgid "dropping sample with no predecessor: %s"
|
||||
msgstr "abandon de l'échantillon sans prédécesseur: %s"
|
||||
|
||||
msgid "ipmitool output length mismatch"
|
||||
msgstr "Non-concordance de longueur de la sortie ipmitool"
|
||||
|
||||
msgid "max_bytes and backup_count should be numbers."
|
||||
msgstr "max_bytes et backup_count doivent etre des chiffres."
|
||||
|
||||
msgid "parse IPMI sensor data failed,No data retrieved from given input"
|
||||
msgstr ""
|
||||
"Echec de l'analyse des données du détecteur IPMI, aucune donnée extraite à "
|
||||
"partir de l'entrée fournie"
|
||||
|
||||
msgid "parse IPMI sensor data failed,unknown sensor type"
|
||||
msgstr ""
|
||||
"Echec de l'analyse des données du détecteur IPMI, type de détecteur inconnu"
|
||||
|
||||
msgid "running ipmitool failure"
|
||||
msgstr "Echec d'exécution d'ipmitool"
|
|
@ -1,505 +0,0 @@
|
|||
# Translations template for ceilometer.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the ceilometer project.
|
||||
#
|
||||
# Translators:
|
||||
# Stefano Maffulli <smaffulli@gmail.com>, 2013
|
||||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-04-12 02:04+0000\n"
|
||||
"Last-Translator: Alessandra <alessandra@translated.net>\n"
|
||||
"Language: it\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Language-Team: Italian\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s non trovato"
|
||||
|
||||
#, python-format
|
||||
msgid "Arithmetic transformer must use at least one meter in expression '%s'"
|
||||
msgstr ""
|
||||
"Il trasformatore aritmetico deve utilizzare almeno un contatore "
|
||||
"nell'espressione '%s'"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr ""
|
||||
"Impossibile creare la tabella %(table_name)s la tabella già esiste. "
|
||||
"Ignorare l'errore"
|
||||
|
||||
#, python-format
|
||||
msgid "Continue after error from %(name)s: %(error)s"
|
||||
msgstr "Continua dopo errore da %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect slave host: %s "
|
||||
msgstr "Impossibile connettersi all'host slave: %s "
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect to XenAPI: %s"
|
||||
msgstr "Impossibile connettersi a XenAPI: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get CPU Util for %(id)s: %(e)s"
|
||||
msgstr "Impossibile ricevere CPU Util per %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Memory Usage for %(id)s: %(e)s"
|
||||
msgstr "Impossibile ricevere l'Uso della Memoria per %(id)s: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get VM %s CPU Utilization"
|
||||
msgstr "Impossibile conoscere l'utilizzo CPU della VM %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't obtain IP address of instance %s"
|
||||
msgstr "Impossibile ottenere l'indirizzo IP dell'istanza %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping Notification %(type)s (uuid:%(msgid)s)"
|
||||
msgstr "Eliminazione della notifica %(type)s (uuid:%(msgid)s)"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Error from libvirt while looking up instance <name=%(name)s, id=%(id)s>: "
|
||||
"[Error Code %(error_code)s] %(ex)s"
|
||||
msgstr ""
|
||||
"Errore da libvirt durante la ricerca dell'istanza <name=%(name)s, id="
|
||||
"%(id)s>: [Codice di errore %(error_code)s] %(ex)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Error parsing HTTP response: %s"
|
||||
msgstr "Errore durante l'analisi della risposta HTTP: %s"
|
||||
|
||||
msgid "Error stopping pollster."
|
||||
msgstr "Errore durante l'arresto del sondaggio. "
|
||||
|
||||
msgid "Event"
|
||||
msgstr "Evento"
|
||||
|
||||
msgid "Expression evaluated to a NaN value!"
|
||||
msgstr "Espressione valutata a un valore NaN!"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to import extension for %(name)s: %(error)s"
|
||||
msgstr "Impossibile importare l'estensione per %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect data of instance <name=%(name)s, id=%(id)s>, domain state "
|
||||
"is SHUTOFF."
|
||||
msgstr ""
|
||||
"Impossibile ispezionare i dati dell'istanza <name=%(name)s, id=%(id)s>, "
|
||||
"stato dominio SHUTOFF."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of %(instance_uuid)s, can not get info from "
|
||||
"libvirt: %(error)s"
|
||||
msgstr ""
|
||||
"Impossibile ispezionare l'utilizzo della memoria da parte di "
|
||||
"%(instance_uuid)s, impossibile ottenere informazioni da libvirt: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of instance <name=%(name)s, id=%(id)s>, can "
|
||||
"not get info from libvirt."
|
||||
msgstr ""
|
||||
"Impossibile ispezionare l'utilizzo della memoria da parte dell'istanza <name="
|
||||
"%(name)s, id=%(id)s>, impossibile ottenere informazioni da libvirt."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any notification handlers for %s"
|
||||
msgstr "Impossibile caricare eventuali gestori di notifica per %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to parse the timestamp value %s"
|
||||
msgstr "Impossibile analizzare il valore data/ora %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, dropping them"
|
||||
msgstr "Impossibile pubblicare %d datapoint, eliminati"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, queue them"
|
||||
msgstr "Impossibile pubblicare %d datapoint, inseriti in coda"
|
||||
|
||||
#, python-format
|
||||
msgid "Filter expression not valid: %s"
|
||||
msgstr "Espressione del filtro non valida: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s"
|
||||
msgstr "L'istanza %(name)s (%(instance_id)s) viene ignorata: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s: %(error)s"
|
||||
msgstr "Si sta ignorando l'istanza %(name)s: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring loadbalancer %(loadbalancer_id)s"
|
||||
msgstr "Ignora loadbalancer %(loadbalancer_id)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring pool %(pool_id)s"
|
||||
msgstr "Ignora pool %(pool_id)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: "
|
||||
"%(column)s."
|
||||
msgstr ""
|
||||
"Sintassi YAML non valida nel file delle definizioni %(file)s alla riga: "
|
||||
"%(line)s, colonna: %(column)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid period %(period)s: %(err)s"
|
||||
msgstr "Periodo non valido %(period)s: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid trait type '%(type)s' for trait %(trait)s"
|
||||
msgstr ""
|
||||
"Tipo di caratteristica non valido '%(type)s' per la caratteristica %(trait)s"
|
||||
|
||||
msgid "Limit must be positive"
|
||||
msgstr "Il limite deve essere un positivo"
|
||||
|
||||
#, python-format
|
||||
msgid "More than one event with id %s returned from storage driver"
|
||||
msgstr "Più di un evento con id %s restituito dal driver di archiviazione"
|
||||
|
||||
#, python-format
|
||||
msgid "Multiple VM %s found in XenServer"
|
||||
msgstr "Più VM %s trovate in XenServer"
|
||||
|
||||
msgid "Must specify connection_url, and connection_password to use"
|
||||
msgstr ""
|
||||
"È necessario specificare connection_url e connection_password da utilizzare"
|
||||
|
||||
#, python-format
|
||||
msgid "No plugin named %(plugin)s available for %(name)s"
|
||||
msgstr "Nessun plug-in con nome %(plugin)s disponibile per %(name)s"
|
||||
|
||||
msgid "Node Manager init failed"
|
||||
msgstr "Inizializzazione gestore nodi non riuscita"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "Non autorizzato ad accedere %(aspect)s %(id)s"
|
||||
|
||||
#, python-format
|
||||
msgid "OpenDaylitght API returned %(status)s %(reason)s"
|
||||
msgstr "L'API OpenDaylitght ha restituito %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Opencontrail API returned %(status)s %(reason)s"
|
||||
msgstr "L'API Opencontrail ha restituito %(status)s %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only equality operator is available "
|
||||
"for field %(field)s"
|
||||
msgstr ""
|
||||
"Operatore %(operator)s non è supportato. Solo gli operatori di uguaglianza "
|
||||
"sono disponibili per il campo %(field)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"Operatore %(operator)s non è supportato. Gli operatori supportati sono: "
|
||||
"%(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Order-by expression not valid: %s"
|
||||
msgstr "L'espressione ordina per non è valida: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s"
|
||||
msgstr ""
|
||||
"Errore di analisi nella specifica JSONPath '%(jsonpath)s' per %(name)s: "
|
||||
"%(err)s"
|
||||
|
||||
msgid "Period must be positive."
|
||||
msgstr "Il periodo deve essere positivo"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s"
|
||||
msgstr "Pipeline %(pipeline)s: %(status)s dopo errore da publisher %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s"
|
||||
msgstr "Pipeline %(pipeline)s: Continuare dopo errore da publisher %(pub)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s"
|
||||
msgstr ""
|
||||
"Pipeline %(pipeline)s: errore durante lo scaricamento del trasformatore "
|
||||
"%(trans)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for "
|
||||
"%(smp)s"
|
||||
msgstr ""
|
||||
"Pipeline %(pipeline)s: Uscita dopo errore del trasformatore %(trans)s per "
|
||||
"%(smp)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Plugin specified, but no plugin name supplied for %s"
|
||||
msgstr "Plug-in specificato, ma nessun nome di plug-in fornito per %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(mtr)s sensor failed for %(cnt)s times!"
|
||||
msgstr "Polling del sensore %(mtr)s non riuscito per %(cnt)s volte!"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(name)s failed for %(cnt)s times!"
|
||||
msgstr "Polling di %(name)s non riuscito per %(cnt)s volte!"
|
||||
|
||||
#, python-format
|
||||
msgid "Pollster for %s is disabled!"
|
||||
msgstr "Pollster per %s disabilitato!"
|
||||
|
||||
#, python-format
|
||||
msgid "Prevent pollster %(name)s for polling source %(source)s anymore!"
|
||||
msgstr ""
|
||||
"Impedire al pollster %(name)s di eseguire il polling dell'origine %(source)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Publisher max local_queue length is exceeded, dropping %d oldest samples"
|
||||
msgstr ""
|
||||
"La lunghezza local_queue massima del publisher è stata superata, "
|
||||
"eliminazione di esempi %d meno recenti"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy is unknown (%s) force to default"
|
||||
msgstr ""
|
||||
"La politica di pubblicazione è sconosciuta (%s), applicazione del valore "
|
||||
"predefinito"
|
||||
|
||||
#, python-format
|
||||
msgid "RGW AdminOps API returned %(status)s %(reason)s"
|
||||
msgstr "L'API RGW AdminOps ha restituito %(status)s %(reason)s"
|
||||
|
||||
msgid "Request failed to connect to OpenDaylight with NorthBound REST API"
|
||||
msgstr ""
|
||||
"Richiesta di collegamento a OpenDaylight con API NorthBound REST non riuscita"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "Campo richiesto %s non specificato"
|
||||
|
||||
msgid "Resource"
|
||||
msgstr "Risorsa"
|
||||
|
||||
msgid "Sample"
|
||||
msgstr "Esempio"
|
||||
|
||||
msgid "Samples should be included in request body"
|
||||
msgstr "I campioni devono essere inclusi nel corpo della richiesta "
|
||||
|
||||
#, python-format
|
||||
msgid "Skip loading extension for %s"
|
||||
msgstr "Ignora caricamento dell'estensione per %s"
|
||||
|
||||
#, python-format
|
||||
msgid "String %s is not a valid isotime"
|
||||
msgstr "La stringa %s non è un orario standard (isotime) valido"
|
||||
|
||||
msgid ""
|
||||
"The Yaml file that defines mapping between samples and gnocchi resources/"
|
||||
"metrics"
|
||||
msgstr ""
|
||||
"Il file Yaml che definisce l'associazione tra i campioni e le risorse "
|
||||
"gnocchi/metriche"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"Il tipo di dati %(type)s non è supportato. L'elenco dei tipi di dati "
|
||||
"supportati è: %(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "The field 'fields' is required for %s"
|
||||
msgstr "Il campo 'fields' è obbligatorio per %s"
|
||||
|
||||
msgid "The path for the file publisher is required"
|
||||
msgstr "Il percorso per il publisher di file è obbligatorio"
|
||||
|
||||
#, python-format
|
||||
msgid "UDP: Cannot decode data sent by %s"
|
||||
msgstr "UDP: impossibile decodificare i dati inviati da %s"
|
||||
|
||||
msgid "UDP: Unable to store meter"
|
||||
msgstr "UDP: impossibile memorizzare il contatore"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr "Impossibile connettersi al server di database: %(errmsg)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Impossibile convertire il valore %(value)s nel tipo di dati previsto "
|
||||
"%(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to discover resources: %s"
|
||||
msgstr "Impossibile rilevare le risorse: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to evaluate expression %(expr)s: %(exc)s"
|
||||
msgstr "Impossibile valutare l'espressione %(expr)s: %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load publisher %s"
|
||||
msgstr "Impossibile caricare il publisher %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load the hypervisor inspector: %s"
|
||||
msgstr "Impossibile caricare il programma di controllo hypervisor: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving "
|
||||
"up."
|
||||
msgstr ""
|
||||
"Impossibile riconnettersi al mongodb primario dopo %(retries)d tentativi. "
|
||||
"L'operazione viene interrotta."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in "
|
||||
"%(retry_interval)d seconds."
|
||||
msgstr ""
|
||||
"Impossibile connettersi al mongodb primario: %(errmsg)s. Prossimo tentativo "
|
||||
"tra %(retry_interval)d secondi."
|
||||
|
||||
msgid "Unable to send sample over UDP"
|
||||
msgstr "Impossibile inviare l'esempio su UDP"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"Eccezione non prevista durante la conversione di %(value)s per il tipo di "
|
||||
"dati previsto %(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown discovery extension: %s"
|
||||
msgstr "Estensione di rilevamento sconosciuta: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown metadata type. Key (%s) will not be queryable."
|
||||
msgstr ""
|
||||
"Tipo di metadati sconosciuto. La chiave (%s) non potrà essere sottoposta a "
|
||||
"query."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Stato non conosciuto %(stat)s ricevuto su bilanciatore del carico %(id)s, "
|
||||
"ignorare l'esempio"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample"
|
||||
msgstr "Stato non conosciuto %(stat)s ricevuto su fw %(id)s,ignorare l'esempio"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Stato non conosciuto %(stat)s ricevuto su listener %(id)s, ignorare l'esempio"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on member %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Stato non conosciuto %(stat)s ricevuto su membro %(id)s, ignorare l'esempio"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Stato non conosciuto %(stat)s ricevuto sul pool %(id)s, ignorare l'esempio"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Stato non conosciuto %(stat)s ricevuto su vip %(id)s, ignorare l'esempio"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"Stato non conosciuto %(stat)s ricevuto su vpn %(id)s, ignorare l'esempio"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in VMware vSphere"
|
||||
msgstr "VM %s non trovata in VMware vSphere"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in XenServer"
|
||||
msgstr "VM %s non trovata in XenServer"
|
||||
|
||||
msgid "Wrong sensor type"
|
||||
msgstr "Tipo di sensore errato"
|
||||
|
||||
msgid "XenAPI not installed"
|
||||
msgstr "XenAPI non installato"
|
||||
|
||||
#, python-format
|
||||
msgid "YAML error reading Definitions file %(file)s"
|
||||
msgstr "Errore YAML durante la lettura del file definizioni %(file)s"
|
||||
|
||||
msgid "alarms URLs is unavailable when Aodh is disabled or unavailable."
|
||||
msgstr ""
|
||||
"alarm URLs non è disponibile con Aodh perché disabilitato oppure non "
|
||||
"disponibile "
|
||||
|
||||
#, python-format
|
||||
msgid "could not get CPU time for %(id)s: %(e)s"
|
||||
msgstr "impossibile ricevere l'ora CPU per %(id)s: %(e)s"
|
||||
|
||||
msgid "direct option cannot be true when Gnocchi is enabled."
|
||||
msgstr "L'opzione direct non può essere true quando Gnocchi è abilitato."
|
||||
|
||||
#, python-format
|
||||
msgid "dropping out of time order sample: %s"
|
||||
msgstr "rilascio campione ordinamento fuori tempo: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "dropping sample with no predecessor: %s"
|
||||
msgstr "eliminazione in corso dell'esempio senza predecessore: %s"
|
||||
|
||||
msgid "ipmitool output length mismatch"
|
||||
msgstr "mancata corrispondenza della lunghezza dell'output ipmitool"
|
||||
|
||||
msgid "max_bytes and backup_count should be numbers."
|
||||
msgstr "max_bytes e backup_count devono essere numeri."
|
||||
|
||||
msgid "parse IPMI sensor data failed,No data retrieved from given input"
|
||||
msgstr ""
|
||||
"analisi dei dati del sensore IPMI non riuscita, nessun dato recuperato "
|
||||
"dall'input fornito"
|
||||
|
||||
msgid "parse IPMI sensor data failed,unknown sensor type"
|
||||
msgstr ""
|
||||
"analisi dei dati del sensore IPMI non riuscita, tipo di sensore sconosciuto"
|
||||
|
||||
msgid "running ipmitool failure"
|
||||
msgstr "errore nell'esecuzione ipmitool"
|
|
@ -1,506 +0,0 @@
|
|||
# Translations template for ceilometer.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the ceilometer project.
|
||||
#
|
||||
# Translators:
|
||||
# Tomoyuki KATO <tomo@dream.daynight.jp>, 2013
|
||||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# 笹原 昌美 <ebb0de1@jp.ibm.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-04-16 11:33+0000\n"
|
||||
"Last-Translator: 笹原 昌美 <ebb0de1@jp.ibm.com>\n"
|
||||
"Language: ja\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Language-Team: Japanese\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s が見つかりません"
|
||||
|
||||
#, python-format
|
||||
msgid "Arithmetic transformer must use at least one meter in expression '%s'"
|
||||
msgstr ""
|
||||
"演算変換プログラムは、式 '%s' で少なくとも 1 つのメーターを使用する必要があり"
|
||||
"ます"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr ""
|
||||
"テーブル %(table_name)s は既に存在するため、作成できません。エラーを無視しま"
|
||||
"す"
|
||||
|
||||
#, python-format
|
||||
msgid "Continue after error from %(name)s: %(error)s"
|
||||
msgstr "%(name)s からのエラーの後で続行します: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect slave host: %s "
|
||||
msgstr "スレーブホストに接続できませんでした: %s "
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect to XenAPI: %s"
|
||||
msgstr "XenAPI に接続できませんでした: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get CPU Util for %(id)s: %(e)s"
|
||||
msgstr "%(id)s の CPU 使用率を取得できませんでした: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Memory Usage for %(id)s: %(e)s"
|
||||
msgstr "%(id)s のメモリー使用量を取得できませんでした: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get VM %s CPU Utilization"
|
||||
msgstr "VM %s のCPU 使用率を取得できませんでした"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't obtain IP address of instance %s"
|
||||
msgstr "インスタンス %s の IP アドレスを取得できませんでした"
|
||||
|
||||
#, fuzzy, python-format
|
||||
msgid "Dropping Notification %(type)s (uuid:%(msgid)s)"
|
||||
msgstr "通知 %(type)s を除去しています (uuid:%(msgid)s)"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Error from libvirt while looking up instance <name=%(name)s, id=%(id)s>: "
|
||||
"[Error Code %(error_code)s] %(ex)s"
|
||||
msgstr ""
|
||||
"インスタンス <name=%(name)s, id=%(id)s> の検索中に libvirt でエラーが発生しま"
|
||||
"した: [エラーコード %(error_code)s] %(ex)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Error parsing HTTP response: %s"
|
||||
msgstr "HTTP 応答を解析している際にエラーが発生しました: %s"
|
||||
|
||||
msgid "Error stopping pollster."
|
||||
msgstr "pollster 停止エラー。"
|
||||
|
||||
msgid "Event"
|
||||
msgstr "イベント"
|
||||
|
||||
msgid "Expression evaluated to a NaN value!"
|
||||
msgstr "式が NaN 値に評価されました。"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to import extension for %(name)s: %(error)s"
|
||||
msgstr "%(name)s の拡張機能のインポートに失敗しました: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect data of instance <name=%(name)s, id=%(id)s>, domain state "
|
||||
"is SHUTOFF."
|
||||
msgstr ""
|
||||
"インスタンス <name=%(name)s, id=%(id)s> のデータを検査できませんでした。ドメ"
|
||||
"イン状態は SHUTOFF です。"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of %(instance_uuid)s, can not get info from "
|
||||
"libvirt: %(error)s"
|
||||
msgstr ""
|
||||
"%(instance_uuid)s のメモリー使用状況を検査できませんでした。libvirt から情報"
|
||||
"を取得できません: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of instance <name=%(name)s, id=%(id)s>, can "
|
||||
"not get info from libvirt."
|
||||
msgstr ""
|
||||
"インスタンス <name=%(name)s, id=%(id)s> のメモリー使用状況を検査できませんで"
|
||||
"した。libvirt から情報を取得できません。"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any notification handlers for %s"
|
||||
msgstr "%s の通知ハンドラーをロードできませんでした"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to parse the timestamp value %s"
|
||||
msgstr "タイムスタンプ値 %s を解析できませんでした"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, dropping them"
|
||||
msgstr "%d データポイントの公開に失敗しました。これらは廃棄されます"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, queue them"
|
||||
msgstr "%d データポイントの公開に失敗しました。これらをキューに入れてください"
|
||||
|
||||
#, python-format
|
||||
msgid "Filter expression not valid: %s"
|
||||
msgstr "フィルター式が無効です: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s"
|
||||
msgstr "インスタンス %(name)s (%(instance_id)s) を無視しています: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s: %(error)s"
|
||||
msgstr "インスタンス %(name)s を無視しています: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring loadbalancer %(loadbalancer_id)s"
|
||||
msgstr "ロードバランサー %(loadbalancer_id)s を無視しています"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring pool %(pool_id)s"
|
||||
msgstr "プール %(pool_id)s を無視しています"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: "
|
||||
"%(column)s."
|
||||
msgstr ""
|
||||
"%(line)s 行目の %(column)s 列で定義ファイル %(file)s の YAML 構文 が無効で"
|
||||
"す。"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid period %(period)s: %(err)s"
|
||||
msgstr "無効な期間 %(period)s: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid trait type '%(type)s' for trait %(trait)s"
|
||||
msgstr "特性 %(trait)s の特性タイプ '%(type)s' が無効です"
|
||||
|
||||
msgid "Limit must be positive"
|
||||
msgstr "上限は正の値でなければなりません"
|
||||
|
||||
#, python-format
|
||||
msgid "More than one event with id %s returned from storage driver"
|
||||
msgstr "ストレージドライバーから id %s のイベントが複数返されました"
|
||||
|
||||
#, python-format
|
||||
msgid "Multiple VM %s found in XenServer"
|
||||
msgstr "複数の VM %s が XenServer に見つかりました"
|
||||
|
||||
msgid "Must specify connection_url, and connection_password to use"
|
||||
msgstr ""
|
||||
"connection_url と、使用する connection_password を指定する必要があります"
|
||||
|
||||
#, python-format
|
||||
msgid "No plugin named %(plugin)s available for %(name)s"
|
||||
msgstr "%(name)s に使用できる %(plugin)s という名前のプラグインがありません"
|
||||
|
||||
msgid "Node Manager init failed"
|
||||
msgstr "ノードマネージャーの初期化に失敗しました"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "%(aspect)s %(id)s にアクセスする権限がありません"
|
||||
|
||||
#, python-format
|
||||
msgid "OpenDaylitght API returned %(status)s %(reason)s"
|
||||
msgstr "OpenDaylitght API から %(status)s %(reason)s が返されました"
|
||||
|
||||
#, python-format
|
||||
msgid "Opencontrail API returned %(status)s %(reason)s"
|
||||
msgstr "Opencontrail API から %(status)s %(reason)s が返されました"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only equality operator is available "
|
||||
"for field %(field)s"
|
||||
msgstr ""
|
||||
"演算子 %(operator)s はサポートされていません。フィールド %(field)s で使用でき"
|
||||
"るのは等価演算子のみです。"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"演算子 %(operator)s はサポートされていません。サポートされている演算子は "
|
||||
"%(supported)s です。"
|
||||
|
||||
#, python-format
|
||||
msgid "Order-by expression not valid: %s"
|
||||
msgstr "order-by 式が無効です: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s"
|
||||
msgstr ""
|
||||
"%(name)s に関する JSONPath の指定 '%(jsonpath)s' のエラーを解析します: "
|
||||
"%(err)s"
|
||||
|
||||
msgid "Period must be positive."
|
||||
msgstr "期間は正の数でなければなりません。"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s"
|
||||
msgstr ""
|
||||
"パイプライン %(pipeline)s: パブリッシャー %(pub)s からのエラーの発生後の "
|
||||
"%(status)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s"
|
||||
msgstr ""
|
||||
"パイプライン %(pipeline)s: パブリッシャー %(pub)s からのエラーの後で続行しま"
|
||||
"す"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s"
|
||||
msgstr ""
|
||||
"パイプライン %(pipeline)s: 変換プログラム %(trans)s をフラッシュするときにエ"
|
||||
"ラーが発生しました"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for "
|
||||
"%(smp)s"
|
||||
msgstr ""
|
||||
"パイプライン %(pipeline)s: %(smp)s について変換プログラム %(trans)s からエ"
|
||||
"ラーが発生した後に終了します"
|
||||
|
||||
#, python-format
|
||||
msgid "Plugin specified, but no plugin name supplied for %s"
|
||||
msgstr "プラグインが指定されていますが、%s にプラグイン名が提供されていません"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(mtr)s sensor failed for %(cnt)s times!"
|
||||
msgstr "センサー %(mtr)s のポーリングが %(cnt)s 回失敗しました"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(name)s failed for %(cnt)s times!"
|
||||
msgstr "ポーリング %(name)s が %(cnt)s 回失敗しました"
|
||||
|
||||
#, python-format
|
||||
msgid "Pollster for %s is disabled!"
|
||||
msgstr "%s の pollster が無効になっています"
|
||||
|
||||
#, python-format
|
||||
msgid "Prevent pollster %(name)s for polling source %(source)s anymore!"
|
||||
msgstr ""
|
||||
"pollster %(name)s がこれ以上ソース %(source)s をポーリングしないようにしてく"
|
||||
"ださい"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Publisher max local_queue length is exceeded, dropping %d oldest samples"
|
||||
msgstr ""
|
||||
"パブリッシャー local_queue 最大長を超えました。古い方から %d 個のサンプルを除"
|
||||
"去します"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy is unknown (%s) force to default"
|
||||
msgstr "パブリッシュポリシーが不明です (%s)。強制的にデフォルトに設定されます"
|
||||
|
||||
#, python-format
|
||||
msgid "RGW AdminOps API returned %(status)s %(reason)s"
|
||||
msgstr "RGW AdminOps API から %(status)s %(reason)s が返されました"
|
||||
|
||||
msgid "Request failed to connect to OpenDaylight with NorthBound REST API"
|
||||
msgstr "NorthBound REST API を使用した OpenDaylight への接続要求が失敗しました"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "必須フィールド %s が指定されていません"
|
||||
|
||||
msgid "Resource"
|
||||
msgstr "リソース"
|
||||
|
||||
msgid "Sample"
|
||||
msgstr "サンプル"
|
||||
|
||||
msgid "Samples should be included in request body"
|
||||
msgstr "サンプルは要求本文に含まれる必要があります"
|
||||
|
||||
#, python-format
|
||||
msgid "Skip loading extension for %s"
|
||||
msgstr "%s の拡張機能のロードをスキップします"
|
||||
|
||||
#, python-format
|
||||
msgid "String %s is not a valid isotime"
|
||||
msgstr "文字列 %s は無効な isotime です"
|
||||
|
||||
msgid ""
|
||||
"The Yaml file that defines mapping between samples and gnocchi resources/"
|
||||
"metrics"
|
||||
msgstr ""
|
||||
"サンプルと gnocchi のリソース/メトリクス間のマッピングを定義する Yaml ファイ"
|
||||
"ル"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"データ型 %(type)s はサポートされていません。サポートされているデータ型のリス"
|
||||
"ト: %(supported)s"
|
||||
|
||||
#, python-format
|
||||
msgid "The field 'fields' is required for %s"
|
||||
msgstr "%s にはフィールド 'fields' が必要です"
|
||||
|
||||
msgid "The path for the file publisher is required"
|
||||
msgstr "ファイルパブリッシャーのパスが必要です"
|
||||
|
||||
#, python-format
|
||||
msgid "UDP: Cannot decode data sent by %s"
|
||||
msgstr "UDP: %s から送信されたデータをデコードできません"
|
||||
|
||||
msgid "UDP: Unable to store meter"
|
||||
msgstr "UDP: メーターを保存できません"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr "データベースサーバーに接続できません: %(errmsg)s。"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr "値 %(value)s を、想定されるデータ型 %(type)s に変換できません。"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to discover resources: %s"
|
||||
msgstr "リソースを検出できません: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to evaluate expression %(expr)s: %(exc)s"
|
||||
msgstr "式 %(expr)s を評価できません: %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load publisher %s"
|
||||
msgstr "パブリッシャー %s をロードできません"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load the hypervisor inspector: %s"
|
||||
msgstr "ハイパーバイザーインスペクターをロードできません: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving "
|
||||
"up."
|
||||
msgstr ""
|
||||
"%(retries)d 回の再試行後、1 次 mongodb に再接続できません。中止します。"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in "
|
||||
"%(retry_interval)d seconds."
|
||||
msgstr ""
|
||||
"プライマリー mongodb に再接続できません: %(errmsg)s。%(retry_interval)d 秒以"
|
||||
"内に再試行します。"
|
||||
|
||||
msgid "Unable to send sample over UDP"
|
||||
msgstr "UDP 経由でサンプルを送信できません"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"%(value)s を想定されるデータ型 %(type)s に変換する際に、想定しない例外が発生"
|
||||
"しました。"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown discovery extension: %s"
|
||||
msgstr "不明なディスカバリーエクステンション: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown metadata type. Key (%s) will not be queryable."
|
||||
msgstr "不明なメタデータ種別です。キー (%s) は照会不可になります。"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"ロードバランサー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをス"
|
||||
"キップします"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample"
|
||||
msgstr ""
|
||||
"ファイアウォール %(id)s で不明な状態 %(stat)s を受信しました。サンプルをス"
|
||||
"キップします"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"リスナー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップしま"
|
||||
"す"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on member %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"メンバー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップしま"
|
||||
"す"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"プール %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"仮想 IP %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"vpn %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in VMware vSphere"
|
||||
msgstr "VMware vSphere で VM %s が見つかりません"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in XenServer"
|
||||
msgstr "VM %s が XenServer に見つかりません"
|
||||
|
||||
msgid "Wrong sensor type"
|
||||
msgstr "センサー種別が正しくありません"
|
||||
|
||||
msgid "XenAPI not installed"
|
||||
msgstr "XenAPI がインストールされていません"
|
||||
|
||||
#, python-format
|
||||
msgid "YAML error reading Definitions file %(file)s"
|
||||
msgstr "定義ファイル %(file)s での読み取りの YAML エラー"
|
||||
|
||||
msgid "alarms URLs is unavailable when Aodh is disabled or unavailable."
|
||||
msgstr ""
|
||||
"Aodh が無効化されるか使用不可の場合、URL が使用できないことを警告します。"
|
||||
|
||||
#, python-format
|
||||
msgid "could not get CPU time for %(id)s: %(e)s"
|
||||
msgstr "%(id)s の CPU 時間を取得できませんでした: %(e)s"
|
||||
|
||||
msgid "direct option cannot be true when Gnocchi is enabled."
|
||||
msgstr ""
|
||||
"Gnocchi を有効化した場合は、direct オプションを True に設定することはできませ"
|
||||
"ん。"
|
||||
|
||||
#, python-format
|
||||
msgid "dropping out of time order sample: %s"
|
||||
msgstr "期限切れのオーダーサンプルを廃棄しています: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "dropping sample with no predecessor: %s"
|
||||
msgstr "サンプル (先行なし) を廃棄しています: %s"
|
||||
|
||||
msgid "ipmitool output length mismatch"
|
||||
msgstr "ipmitool 出力の長さが一致しません"
|
||||
|
||||
msgid "max_bytes and backup_count should be numbers."
|
||||
msgstr "max_bytes と backup_count は数値でなければなりません。"
|
||||
|
||||
msgid "parse IPMI sensor data failed,No data retrieved from given input"
|
||||
msgstr ""
|
||||
"IPMI センサーデータの解析に失敗しました。指定された入力からデータが取得されま"
|
||||
"せんでした"
|
||||
|
||||
msgid "parse IPMI sensor data failed,unknown sensor type"
|
||||
msgstr "IPMI センサーデータの解析に失敗しました。不明なセンサー種別です。"
|
||||
|
||||
msgid "running ipmitool failure"
|
||||
msgstr "ipmitool の実行に失敗しました"
|
|
@ -1,135 +0,0 @@
|
|||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-04-07 03:38+0000\n"
|
||||
"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
|
||||
"Language-Team: Korean (South Korea)\n"
|
||||
"Language: ko-KR\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot load inspector %(name)s: %(err)s"
|
||||
msgstr "%(name)s 검사기를 로드할 수 없음: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Resident Memory Usage for %(id)s: %(e)s"
|
||||
msgstr "%(id)s의 상주 메모리 사용을 가져올 수 없음 : %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dispatcher failed to handle the %s, requeue it."
|
||||
msgstr "디스패처에서 %s을(를) 처리하지 못하여 다시 대기열에 둡니다."
|
||||
|
||||
msgid "Error connecting to coordination backend."
|
||||
msgstr "조정 백엔드를 연결하는 중에 오류가 발생했습니다."
|
||||
|
||||
msgid "Error getting group membership info from coordination backend."
|
||||
msgstr "조정 백엔드에서 그룹 멤버십 정보를 가져오는 중에 오류가 발생했습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Error joining partitioning group %s, re-trying"
|
||||
msgstr ""
|
||||
"파티션 지정 그룹 %s을(를) 결합하는 중에 오류가 발생하여, 다시 시도 중입니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Error processing event and it will be dropped: %s"
|
||||
msgstr "이벤트 처리 중 오류가 발생하므로 삭제됨: %s"
|
||||
|
||||
msgid "Error sending a heartbeat to coordination backend."
|
||||
msgstr "하트비트를 조정 백엔드에서 보내는 중에 오류가 발생했습니다."
|
||||
|
||||
msgid "Fail to process a notification"
|
||||
msgstr "알림을 처리하는 데 실패"
|
||||
|
||||
msgid "Fail to process notification"
|
||||
msgstr "알림을 처리하는 데 실패"
|
||||
|
||||
msgid "Failed to connect to Gnocchi."
|
||||
msgstr "Gnocchi에 연결하지 못했습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to Kafka service: %s"
|
||||
msgstr "Kafka 서비스에 연결하는 데 실패: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s"
|
||||
msgstr "DB에 연결하는 데 실패, %(purpose)s 용도를 나중에 다시 시도: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s"
|
||||
msgstr "DB에 연결하는 데 실패, %(purpose)s 용도를 나중에 다시 시도: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load resource due to error %s"
|
||||
msgstr "%s 오류로 인해 자원을 로드하는 데 실패"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to record event: %s"
|
||||
msgstr "이벤트를 기록하는 데 실패: %s"
|
||||
|
||||
msgid "Failed to retry to send sample data with max_retry times"
|
||||
msgstr "샘플 데이터를 max_retry 횟수만큼 보내는 데 실패"
|
||||
|
||||
msgid ""
|
||||
"Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is "
|
||||
"not part of group and cannot take tasks"
|
||||
msgstr ""
|
||||
"그룹 ID: %{group_id}s, 멤버: %{members}s, 사용자: %{me}s: 현재 에이전트가 그"
|
||||
"룹의 일부가 아니므로 작업을 수행할 수 없음"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid type %s specified"
|
||||
msgstr "올바르지 않은 유형 %s이(가) 지정됨"
|
||||
|
||||
#, python-format
|
||||
msgid "Missing field %s"
|
||||
msgstr "%s 필드 누락"
|
||||
|
||||
msgid "Passed resource dict must contain keys resource_id and resource_url."
|
||||
msgstr "전달된 자원 dict에 키 resource_id와 resource_url이 포함되어야 합니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %(field)s should be a %(type)s"
|
||||
msgstr "필수 필드 %(field)s은(는) %(type)s이어야 함"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "필수 필드 %s이(가) 지정되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid "Required fields %s not specified"
|
||||
msgstr "필수 필드 %s이(가) 지정되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid "Skip invalid resource %s"
|
||||
msgstr "올바르지 않은 자원 %s 건너뛰기"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping %(name)s, keystone issue: %(exc)s"
|
||||
msgstr "%(name)s 건너뛰기, keystone 문제: %(exc)s"
|
||||
|
||||
msgid "Status Code: %{code}s. Failed todispatch event: %{event}s"
|
||||
msgstr "상태 코드: %{code}s. 이벤트를 디스패치하는 데 실패: %{event}s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load changed event pipeline: %s"
|
||||
msgstr "변경된 이벤트 파이프라인을 로드할 수 없음: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load changed pipeline: %s"
|
||||
msgstr "변경된 파이프라인을 로드할 수 없음: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unrecognized type value %s"
|
||||
msgstr "인식되지 않은 유형 값 %s"
|
||||
|
||||
#, python-format
|
||||
msgid "inspector call failed for %(ident)s host %(host)s: %(err)s"
|
||||
msgstr "%(ident)s 호스트 %(host)s의 검사기 호출에 실패: %(err)s"
|
|
@ -1,128 +0,0 @@
|
|||
# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev57\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-04-18 02:09+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-04-07 03:39+0000\n"
|
||||
"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
|
||||
"Language-Team: Korean (South Korea)\n"
|
||||
"Language: ko-KR\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%d events are removed from database"
|
||||
msgstr "데이터베이스에서 %d 이벤트가 제거됨"
|
||||
|
||||
#, python-format
|
||||
msgid "%d samples removed from database"
|
||||
msgstr "데이터베이스에서 %d 샘플이 제거됨"
|
||||
|
||||
msgid "Configuration:"
|
||||
msgstr "구성:"
|
||||
|
||||
#, python-format
|
||||
msgid "Connecting to %(db)s on %(nodelist)s"
|
||||
msgstr "%(nodelist)s에서 %(db)s에 연결 중 "
|
||||
|
||||
msgid "Coordination backend started successfully."
|
||||
msgstr "조정 백엔드가 성공적으로 시작되었습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Definitions: %s"
|
||||
msgstr "정의: %s"
|
||||
|
||||
msgid "Detected change in pipeline configuration."
|
||||
msgstr "파이프라인 구성의 변경을 발견했습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping event data with TTL %d"
|
||||
msgstr "TTL이 %d인 이벤트 데이터 삭제"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping metering data with TTL %d"
|
||||
msgstr "TTL이 %d인 측정 데이터 삭제"
|
||||
|
||||
#, python-format
|
||||
msgid "Duplicate event detected, skipping it: %s"
|
||||
msgstr "중복 이벤트가 발견되어 해당 이벤트를 건너뜀: %s"
|
||||
|
||||
msgid "Expired residual resource and meter definition data"
|
||||
msgstr "잔여 자원 및 측정 정의 데이터 만료됨"
|
||||
|
||||
#, python-format
|
||||
msgid "Index %s will be recreate."
|
||||
msgstr "%s 인덱스가 다시 생성됩니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Joined partitioning group %s"
|
||||
msgstr "결합된 파티션 그룹 %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Left partitioning group %s"
|
||||
msgstr "남은 파티션 그룹 %s"
|
||||
|
||||
#, python-format
|
||||
msgid "No limit value provided, result set will be limited to %(limit)d."
|
||||
msgstr "한계 값이 제공되지 않음, 결과 세트가 %(limit)d(으)로 제한됩니다."
|
||||
|
||||
msgid "Nothing to clean, database event time to live is disabled"
|
||||
msgstr "정리할 사항이 없음, 데이터베이스 이벤트 지속 시간(TTL)이 사용되지 않음"
|
||||
|
||||
msgid "Nothing to clean, database metering time to live is disabled"
|
||||
msgstr "정리할 사항이 없음, 데이터베이스 측정 지속 시간(TTL)이 사용되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter "
|
||||
"%(param)s"
|
||||
msgstr ""
|
||||
"파이프라인 %(pipeline)s: %(param)s 매개변수로 변환기 인스턴스 %(name)s 설정 "
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline config: %s"
|
||||
msgstr "파이프라인 구성: %s"
|
||||
|
||||
msgid "Pipeline configuration file has been updated."
|
||||
msgstr "파이프라인 구성 파일이 업데이트되었습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Polling pollster %(poll)s in the context of %(src)s"
|
||||
msgstr "%(src)s 컨텍스트의 의견조사자 %(poll)s 폴링"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy set to %s"
|
||||
msgstr "공개 정책이 %s(으)로 설정됨"
|
||||
|
||||
msgid "Reconfiguring polling tasks."
|
||||
msgstr "폴링 작업을 재구성합니다."
|
||||
|
||||
msgid "Reloading notification agent and listeners."
|
||||
msgstr "알림 에이전트와 리스너를 다시 로드합니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle"
|
||||
msgstr "Pollster %(name)s 건너뛰기, %(p_context)s 자원에서 이 주기를 발견함"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting server in PID %s"
|
||||
msgstr "PID %s의 서버 시작"
|
||||
|
||||
msgid "detected decoupled pipeline config format"
|
||||
msgstr "비결합 파이프라인 구성 형식 발견"
|
||||
|
||||
#, python-format
|
||||
msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s"
|
||||
msgstr "%(resource_id)s의 측정 데이터 %(counter_name)s: %(counter_volume)s"
|
||||
|
||||
#, python-format
|
||||
msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s"
|
||||
msgstr "0.0.0.0:%(sport)s에서 전달 중, http://127.0.0.1:%(vport)s에서 보기"
|
||||
|
||||
#, python-format
|
||||
msgid "serving on http://%(host)s:%(port)s"
|
||||
msgstr "http://%(host)s:%(port)s에서 전달 중"
|
|
@ -1,155 +0,0 @@
|
|||
# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-04-07 03:34+0000\n"
|
||||
"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
|
||||
"Language-Team: Korean (South Korea)\n"
|
||||
"Language: ko-KR\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
|
||||
msgid "Can't connect to keystone, assuming aodh is disabled and retry later."
|
||||
msgstr ""
|
||||
"Keystone에 연결할 수 없습니다 . Aodh가 사용되지 않는다고 가정하여 나중에 다"
|
||||
"시 시도합니다."
|
||||
|
||||
msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later"
|
||||
msgstr ""
|
||||
"Keystone에 연결할 수 없습니다. Gnocchi가 사용되지 않는다고 가정하여 나중에 다"
|
||||
"시 시도합니다."
|
||||
|
||||
msgid ""
|
||||
"Cannot extract tasks because agent failed to join group properly. Rejoining "
|
||||
"group."
|
||||
msgstr ""
|
||||
"에이전트가 적절하게 그룹을 결합하지 못했으므로 작업을 추출할 수 없습니다. 그"
|
||||
"룹을 다시 결합합니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: "
|
||||
"%(exc)s"
|
||||
msgstr ""
|
||||
"%(instance_id)s의 %(pollster)s 데이터를 검사할 수 없습니다. 치명적이지 않은 "
|
||||
"이유: %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping out of time order sample: %s"
|
||||
msgstr "시간 순서 샘플에서 삭제: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping sample with no predecessor: %s"
|
||||
msgstr "선행 작업이 없는 샘플 삭제: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Duplicated values: %s found in CLI options, auto de-duplicated"
|
||||
msgstr "중복된 값: CLI 옵션에 %s이(가) 있습니다. 자동으로 중복이 해제됩니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any dispatchers for %s"
|
||||
msgstr "%s의 디스패처를 로드하는 데 실패"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to parse date from set fields, both fields %(start)s and %(end)s must "
|
||||
"be datetime: %(err)s"
|
||||
msgstr ""
|
||||
"설정 필드에서 데이터를 구문 분석하는 데 실패, 두 필드 %(start)s 와 %(end)s은"
|
||||
"(는) 모두 datetime임: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignore unrecognized field %s"
|
||||
msgstr "인식되지 않는 필드 %s 무시"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid status, skipping IP address %s"
|
||||
msgstr "올바르지 않은 상태, IP 주소 %s 건너뛰기"
|
||||
|
||||
msgid "Negative delta detected, dropping value"
|
||||
msgstr "음수의 델타가 발견되어 값을 삭제함"
|
||||
|
||||
#, python-format
|
||||
msgid "No endpoints found for service %s"
|
||||
msgstr "%s 서비스의 엔드포인트를 찾을 수 없음"
|
||||
|
||||
msgid ""
|
||||
"Non-metric meters may be collected. It is highly advisable to disable these "
|
||||
"meters using ceilometer.conf or the pipeline.yaml"
|
||||
msgstr ""
|
||||
"비측정 미터를 수집할 수 없습니다. celometer.conf 또는 pipeline.yaml을 사용하"
|
||||
"여 이러한 미터를 사용하지 않게 설정하는 것이 좋습니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Skipping %(name)s, %(service_type)s service is not registered in keystone"
|
||||
msgstr " %(name)s, %(service_type)s 서비스 건너뛰기는 keystone에 등록되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping duplicate meter definition %s"
|
||||
msgstr "중복 측정 정의 %s 건너뛰기"
|
||||
|
||||
msgid ""
|
||||
"Timedelta plugin is required two timestamp fields to create timedelta value."
|
||||
msgstr ""
|
||||
"Timedelta 플러그인에서 timedelta 값을 생성하려면 두 개의 시간소인 필드가 필요"
|
||||
"합니다."
|
||||
|
||||
msgid ""
|
||||
"ceilometer-api started with aodh enabled. Alarms URLs will be redirected to "
|
||||
"aodh endpoint."
|
||||
msgstr ""
|
||||
"Aodh가 사용된 상태로 ceilometer-api가 시작되었습니다. 알람 URL이 aodh 엔드포"
|
||||
"인트로 경로가 재지정됩니다."
|
||||
|
||||
msgid ""
|
||||
"ceilometer-api started with gnocchi enabled. The resources/meters/samples "
|
||||
"URLs are disabled."
|
||||
msgstr ""
|
||||
"Gnocchi를 사용한 상태로 ceilometer-api가 시작되었습니다. 자원/측정/샘플 URL"
|
||||
"을 사용하지 않습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "event signature invalid, discarding event: %s"
|
||||
msgstr "이벤트 서명이 올바르지 않아 이벤트를 삭제함: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no "
|
||||
"volume (volume: None), the sample will be dropped"
|
||||
msgstr ""
|
||||
"%(resource_id)s @ %(timestamp)s의 측정 데이터 %(counter_name)s에 볼륨"
|
||||
"(volume: None)이 없으므로 샘플이 삭제됩니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has "
|
||||
"volume which is not a number (volume: %(counter_volume)s), the sample will "
|
||||
"be dropped"
|
||||
msgstr ""
|
||||
"%(resource_id)s @ %(timestamp)s의 측정 데이터 %(counter_name)s에 번호"
|
||||
"(volume: %(counter_volume)s)가 아닌 볼륨이 있으므로, 샘플이 삭제됩니다."
|
||||
|
||||
msgid ""
|
||||
"pecan_debug cannot be enabled, if workers is > 1, the value is overrided "
|
||||
"with False"
|
||||
msgstr ""
|
||||
"pecan_debug를 사용하도록 설정할 수 없습니다. 작업자가 > 1이면 값이 False로 겹"
|
||||
"쳐씁니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"split plugin is deprecated, add \".`split(%(sep)s, %(segment)d, "
|
||||
"%(max_split)d)`\" to your jsonpath instead"
|
||||
msgstr ""
|
||||
"분할 플러그인은 더 이상 사용되지 않음, 대신 \".`split(%(sep)s, %(segment)d, "
|
||||
"%(max_split)d)`\"을(를) jsonpath에 추가"
|
||||
|
||||
#, python-format
|
||||
msgid "unable to configure oslo_cache: %s"
|
||||
msgstr "oslo_cache를 구성할 수 없음: %s"
|
|
@ -1,484 +0,0 @@
|
|||
# Translations template for ceilometer.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the ceilometer project.
|
||||
#
|
||||
# Translators:
|
||||
# Seong-ho Cho <darkcircle.0426@gmail.com>, 2014
|
||||
# Seunghyo Chun <seunghyo.chun@gmail.com>, 2013
|
||||
# Seunghyo Chun <seunghyo.chun@gmail.com>, 2013
|
||||
# Sungjin Kang <potopro@gmail.com>, 2013
|
||||
# Sungjin Kang <potopro@gmail.com>, 2013
|
||||
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||
# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: ceilometer 6.0.1.dev170\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2016-06-07 17:37+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2016-04-07 03:44+0000\n"
|
||||
"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
|
||||
"Language: ko-KR\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
"X-Generator: Zanata 3.7.3\n"
|
||||
"Language-Team: Korean (South Korea)\n"
|
||||
|
||||
#, python-format
|
||||
msgid "%(entity)s %(id)s Not Found"
|
||||
msgstr "%(entity)s %(id)s을(를) 찾을 수 없음"
|
||||
|
||||
#, python-format
|
||||
msgid "Arithmetic transformer must use at least one meter in expression '%s'"
|
||||
msgstr "'%s' 표현식에서 산술 변환기는 하나 이상의 미터를 사용해야 함"
|
||||
|
||||
#, python-format
|
||||
msgid "Cannot create table %(table_name)s it already exists. Ignoring error"
|
||||
msgstr "%(table_name)s 테이블을 작성할 수 없음, 이미 존재합니다. 오류 무시"
|
||||
|
||||
#, python-format
|
||||
msgid "Continue after error from %(name)s: %(error)s"
|
||||
msgstr "%(name)s에서 오류 후 계속: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect slave host: %s "
|
||||
msgstr "슬레이브 호스트를 연결할 수 없음: %s "
|
||||
|
||||
#, python-format
|
||||
msgid "Could not connect to XenAPI: %s"
|
||||
msgstr "XenAPI를 연결할 수 없음: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get CPU Util for %(id)s: %(e)s"
|
||||
msgstr "%(id)s에 대해 CPU Util을 가져올 수 없음: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get Memory Usage for %(id)s: %(e)s"
|
||||
msgstr "%(id)s에 대한 메모리 사용을 가져올 수 없음: %(e)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Could not get VM %s CPU Utilization"
|
||||
msgstr "VM %s CPU 이용률을 가져올 수 없음"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't obtain IP address of instance %s"
|
||||
msgstr "%s 인스턴스의 IP 주소를 얻을 수 없음"
|
||||
|
||||
#, python-format
|
||||
msgid "Dropping Notification %(type)s (uuid:%(msgid)s)"
|
||||
msgstr "알림 %(type)s 삭제 중(uuid:%(msgid)s)"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Error from libvirt while looking up instance <name=%(name)s, id=%(id)s>: "
|
||||
"[Error Code %(error_code)s] %(ex)s"
|
||||
msgstr ""
|
||||
"인스턴스 <name=%(name)s, id=%(id)s> 검색 중 libvirt에서 오류 발생: [오류 코"
|
||||
"드 %(error_code)s] %(ex)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Error parsing HTTP response: %s"
|
||||
msgstr "HTTP 응답 구문 분석 중 오류 발생: %s"
|
||||
|
||||
msgid "Error stopping pollster."
|
||||
msgstr "의견조사자를 중지하는 중에 오류가 발생했습니다. "
|
||||
|
||||
msgid "Event"
|
||||
msgstr "이벤트"
|
||||
|
||||
msgid "Expression evaluated to a NaN value!"
|
||||
msgstr "표현식이 NaN 값으로 평가되었습니다!"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to import extension for %(name)s: %(error)s"
|
||||
msgstr "%(name)s 확장자를 가져오는 데 실패함: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect data of instance <name=%(name)s, id=%(id)s>, domain state "
|
||||
"is SHUTOFF."
|
||||
msgstr ""
|
||||
"인스턴스 <이름=%(name)s, id=%(id)s>의 데이터 검사 실패, 도메인 상태가 SHUTOFF"
|
||||
"입니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of %(instance_uuid)s, can not get info from "
|
||||
"libvirt: %(error)s"
|
||||
msgstr ""
|
||||
"%(instance_uuid)s의 메모리 사용량 검사 실패, libvirt에서 정보를 가져올 수 없"
|
||||
"음: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Failed to inspect memory usage of instance <name=%(name)s, id=%(id)s>, can "
|
||||
"not get info from libvirt."
|
||||
msgstr ""
|
||||
"인스턴스 <이름=%(name)s, id=%(id)s>의 메모리 사용량 검사 실패, libvirt에서 정"
|
||||
"보를 가져올 수 없습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to load any notification handlers for %s"
|
||||
msgstr "%s의 알림 핸들러 로드 실패"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to parse the timestamp value %s"
|
||||
msgstr "시간소인 값 %s 구문 분석 실패"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, dropping them"
|
||||
msgstr "%d 데이터포인트 공개 실패. 이를 삭제하는 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to publish %d datapoints, queue them"
|
||||
msgstr "%d 데이터포인트 공개 실패. 이를 큐에 대기시킴"
|
||||
|
||||
#, python-format
|
||||
msgid "Filter expression not valid: %s"
|
||||
msgstr "필터 표현식이 올바르지 않음: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s"
|
||||
msgstr "인스턴스 %(name)s (%(instance_id)s) 무시 중: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring instance %(name)s: %(error)s"
|
||||
msgstr "인스턴스 %(name)s 무시 중: %(error)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring loadbalancer %(loadbalancer_id)s"
|
||||
msgstr "로드 밸런서 %(loadbalancer_id)s 무시"
|
||||
|
||||
#, python-format
|
||||
msgid "Ignoring pool %(pool_id)s"
|
||||
msgstr "풀 %(pool_id)s 무시"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: "
|
||||
"%(column)s."
|
||||
msgstr ""
|
||||
"다음에서 정의 파일 %(file)s의 올바르지 않은 YAML 구문: 행: %(line)s, 열: "
|
||||
"%(column)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid period %(period)s: %(err)s"
|
||||
msgstr "올바르지 않은 기간 %(period)s: %(err)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid trait type '%(type)s' for trait %(trait)s"
|
||||
msgstr "특성 %(trait)s에 대한 올바르지 않은 특성 유형 '%(type)s'"
|
||||
|
||||
msgid "Limit must be positive"
|
||||
msgstr "제한 값은 양수여야 합니다."
|
||||
|
||||
#, python-format
|
||||
msgid "More than one event with id %s returned from storage driver"
|
||||
msgstr "ID가 %s인 둘 이상의 이벤트가 스토리지 드라이버에서 리턴됨"
|
||||
|
||||
#, python-format
|
||||
msgid "Multiple VM %s found in XenServer"
|
||||
msgstr "여러 VM %s을(를) XenServer에서 찾음 "
|
||||
|
||||
msgid "Must specify connection_url, and connection_password to use"
|
||||
msgstr "사용할 connection_url 및 connection_password를 지정해야 함 "
|
||||
|
||||
#, python-format
|
||||
msgid "No plugin named %(plugin)s available for %(name)s"
|
||||
msgstr "%(name)s에 대해 %(plugin)s(이)라는 플러그인을 사용할 수 없음"
|
||||
|
||||
msgid "Node Manager init failed"
|
||||
msgstr "노드 관리자 초기화 실패"
|
||||
|
||||
#, python-format
|
||||
msgid "Not Authorized to access %(aspect)s %(id)s"
|
||||
msgstr "%(aspect)s %(id)s에 대한 액세스 권한이 부여되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid "OpenDaylitght API returned %(status)s %(reason)s"
|
||||
msgstr "OpenDaylitght API가 %(status)s 리턴: %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Opencontrail API returned %(status)s %(reason)s"
|
||||
msgstr "Opencontrail API가 %(status)s 리턴: %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. Only equality operator is available "
|
||||
"for field %(field)s"
|
||||
msgstr ""
|
||||
"연산자 %(operator)s이(가) 지원되지 않습니다. 필드 %(field)s에는 등호 연산자"
|
||||
"만 사용할 수 있습니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Operator %(operator)s is not supported. The supported operators are: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"연산자 %(operator)s이(가) 지원되지 않습니다. 지원되는 연산자는 %(supported)s"
|
||||
"입니다. "
|
||||
|
||||
#, python-format
|
||||
msgid "Order-by expression not valid: %s"
|
||||
msgstr "Order-by 표현식이 올바르지 않음: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s"
|
||||
msgstr ""
|
||||
" %(name)s에 대한 JSONPath 스펙 '%(jsonpath)s'의 구문 분석 오류: %(err)s"
|
||||
|
||||
msgid "Period must be positive."
|
||||
msgstr "기간은 양수여야 합니다. "
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s"
|
||||
msgstr "파이프라인 %(pipeline)s: 공개자 %(pub)s에서 오류 후 %(status)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s"
|
||||
msgstr "파이프라인 %(pipeline)s: 공개자 %(pub)s에서 오류 후 계속"
|
||||
|
||||
#, python-format
|
||||
msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s"
|
||||
msgstr "파이프라인 %(pipeline)s: 변환기 %(trans)s을(를) 비우는 중 오류 발생"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for "
|
||||
"%(smp)s"
|
||||
msgstr "파이프라인 %(pipeline)s: %(smp)s의 변환기 %(trans)s에서 오류 후 종료"
|
||||
|
||||
#, python-format
|
||||
msgid "Plugin specified, but no plugin name supplied for %s"
|
||||
msgstr "플러그인이 지정되지 않았지만, %s에 플러그인 이름이 제공되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(mtr)s sensor failed for %(cnt)s times!"
|
||||
msgstr "폴링 %(mtr)s 센서가 %(cnt)s번 실패했습니다!"
|
||||
|
||||
#, python-format
|
||||
msgid "Polling %(name)s failed for %(cnt)s times!"
|
||||
msgstr "폴링 %(name)s이(가) %(cnt)s번 실패했습니다!"
|
||||
|
||||
#, python-format
|
||||
msgid "Pollster for %s is disabled!"
|
||||
msgstr "%s의 의견조사자가 사용 안함으로 설정되어 있습니다!"
|
||||
|
||||
#, python-format
|
||||
msgid "Prevent pollster %(name)s for polling source %(source)s anymore!"
|
||||
msgstr ""
|
||||
"의견조사자 %(name)s이(가) 소스 %(source)s를 더 이상 폴링하지 않도록 하십시오!"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Publisher max local_queue length is exceeded, dropping %d oldest samples"
|
||||
msgstr "공개자 최대 local_queue 길이가 초과됨. %d 가장 오래된 샘플 삭제 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Publishing policy is unknown (%s) force to default"
|
||||
msgstr "공개 정책을 알 수 없음(%s). 기본값으로 강제 설정함"
|
||||
|
||||
#, python-format
|
||||
msgid "RGW AdminOps API returned %(status)s %(reason)s"
|
||||
msgstr "RGW AdminOps API가 %(status)s %(reason)s을(를) 리턴함"
|
||||
|
||||
msgid "Request failed to connect to OpenDaylight with NorthBound REST API"
|
||||
msgstr "요청이 NorthBound REST API로 OpenDaylight에 연결하는 데 실패함"
|
||||
|
||||
#, python-format
|
||||
msgid "Required field %s not specified"
|
||||
msgstr "필수 필드 %s이(가) 지정되지 않음"
|
||||
|
||||
msgid "Resource"
|
||||
msgstr "리소스"
|
||||
|
||||
msgid "Sample"
|
||||
msgstr "샘플"
|
||||
|
||||
msgid "Samples should be included in request body"
|
||||
msgstr "샘플이 요청 본문에 포함되어야 함"
|
||||
|
||||
#, python-format
|
||||
msgid "Skip loading extension for %s"
|
||||
msgstr "%s 확장자 로드 건너뛰기"
|
||||
|
||||
#, python-format
|
||||
msgid "String %s is not a valid isotime"
|
||||
msgstr "문자열 %s이(가) 올바른 등시간이 아님"
|
||||
|
||||
msgid ""
|
||||
"The Yaml file that defines mapping between samples and gnocchi resources/"
|
||||
"metrics"
|
||||
msgstr "샘플과 gnocchi resources/ 메트릭 간 맵핑을 정의하는 Yaml 파일"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The data type %(type)s is not supported. The supported data type list is: "
|
||||
"%(supported)s"
|
||||
msgstr ""
|
||||
"데이터 유형 %(type)s이(가) 지원되지 않습니다. 지원되는 데이터 유형 목록은 "
|
||||
"%(supported)s입니다."
|
||||
|
||||
#, python-format
|
||||
msgid "The field 'fields' is required for %s"
|
||||
msgstr "%s에 'fields' 필드 필요"
|
||||
|
||||
msgid "The path for the file publisher is required"
|
||||
msgstr "파일 공개자의 경로가 필요함"
|
||||
|
||||
#, python-format
|
||||
msgid "UDP: Cannot decode data sent by %s"
|
||||
msgstr " UDP: %s이(가) 보낸 데이터를 해독할 수 없습니다"
|
||||
|
||||
msgid "UDP: Unable to store meter"
|
||||
msgstr "UDP: 측정을 저장할 수 없습니다"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to connect to the database server: %(errmsg)s."
|
||||
msgstr "데이터베이스 서버에 연결할 수 없음: %(errmsg)s."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to convert the value %(value)s to the expected data type %(type)s."
|
||||
msgstr "%(value)s 값을 예상 데이터 유형 %(type)s(으)로 변환할 수 없습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to discover resources: %s"
|
||||
msgstr "자원을 검색할 수 없음: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to evaluate expression %(expr)s: %(exc)s"
|
||||
msgstr "%(expr)s 표현식을 평가할 수 없음: %(exc)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load publisher %s"
|
||||
msgstr "공개자 %s을(를) 로드할 수 없음"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to load the hypervisor inspector: %s"
|
||||
msgstr "하이퍼바이저 검사기를 로드할 수 없음: %s"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving "
|
||||
"up."
|
||||
msgstr ""
|
||||
"%(retries)d회 재시도한 이후에는 1차 mongodb에 다시 연결할 수 없습니다. 포기하"
|
||||
"는 중입니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in "
|
||||
"%(retry_interval)d seconds."
|
||||
msgstr ""
|
||||
"1차 mongodb에 다시 연결할 수 없음: %(errmsg)s. %(retry_interval)d초 후에 다"
|
||||
"시 시도합니다."
|
||||
|
||||
msgid "Unable to send sample over UDP"
|
||||
msgstr "UDP를 통해 샘플을 전송할 수 없음"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected exception converting %(value)s to the expected data type %(type)s."
|
||||
msgstr ""
|
||||
"%(value)s을(를) 예상된 데이터 유형으로 변환하는 중에 예상치 않은 예외 발생 "
|
||||
"%(type)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown discovery extension: %s"
|
||||
msgstr "알 수 없는 검색 확장자: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown metadata type. Key (%s) will not be queryable."
|
||||
msgstr "알 수 없는 메타데이터 유형입니다. 키(%s)를 조회할 수 없습니다."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"로드 밸런서 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample"
|
||||
msgstr ""
|
||||
"fw %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample"
|
||||
msgstr "리스너 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on member %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"멤버 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"풀 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample"
|
||||
msgstr ""
|
||||
"vip %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample"
|
||||
msgstr "vpn%(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in VMware vSphere"
|
||||
msgstr "VM %s을(를) VMware vSphere에서 찾을 수 없음"
|
||||
|
||||
#, python-format
|
||||
msgid "VM %s not found in XenServer"
|
||||
msgstr "VM %s을(를) XenServer에서 찾을 수 없음 "
|
||||
|
||||
msgid "Wrong sensor type"
|
||||
msgstr "잘못된 센서 유형"
|
||||
|
||||
msgid "XenAPI not installed"
|
||||
msgstr "XenAPI가 설치되지 않음"
|
||||
|
||||
#, python-format
|
||||
msgid "YAML error reading Definitions file %(file)s"
|
||||
msgstr "정의 파일 %(file)s을(를) 읽는 중에 YAML 오류 발생"
|
||||
|
||||
msgid "alarms URLs is unavailable when Aodh is disabled or unavailable."
|
||||
msgstr ""
|
||||
"Aodh를 사용하지 않게 설정하거나 사용할 수 없는 경우 경보 URL을 사용할 수 없습"
|
||||
"니다."
|
||||
|
||||
#, python-format
|
||||
msgid "could not get CPU time for %(id)s: %(e)s"
|
||||
msgstr "%(id)s의 CPU 시간을 가져올 수 없음: %(e)s"
|
||||
|
||||
msgid "direct option cannot be true when Gnocchi is enabled."
|
||||
msgstr "Gnocchi를 사용할 때 직접 옵션은 true일 수 없습니다."
|
||||
|
||||
#, python-format
|
||||
msgid "dropping out of time order sample: %s"
|
||||
msgstr "시간 순서 샘플에서 벗어남: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "dropping sample with no predecessor: %s"
|
||||
msgstr "선행 작업이 없는 샘플 삭제: %s"
|
||||
|
||||
msgid "ipmitool output length mismatch"
|
||||
msgstr "ipmitool 출력 길이 불일치"
|
||||
|
||||
msgid "max_bytes and backup_count should be numbers."
|
||||
msgstr "max_bytes 및 backup_count는 숫자여야 합니다."
|
||||
|
||||
msgid "parse IPMI sensor data failed,No data retrieved from given input"
|
||||
msgstr ""
|
||||
"IPMI 센서 데이터 구문 분석에 실패했음, 제공된 입력에서 검색된 데이터가 없음"
|
||||
|
||||
msgid "parse IPMI sensor data failed,unknown sensor type"
|
||||
msgstr "IPMI 센서 데이터 구문 분석에 실패했음, 알 수 없는 센서 유형"
|
||||
|
||||
msgid "running ipmitool failure"
|
||||
msgstr "ipmitool 실행 실패"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue