Merge "do not translate debug logs"

This commit is contained in:
Jenkins 2015-08-13 11:49:23 +00:00 committed by Gerrit Code Review
commit 92fcb8ba74
35 changed files with 156 additions and 174 deletions

View File

@ -101,8 +101,8 @@ class CombinationEvaluator(evaluator.Evaluator):
def evaluate(self, alarm):
if not self.within_time_constraint(alarm):
LOG.debug(_('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.') % alarm.alarm_id)
LOG.debug('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.', alarm.alarm_id)
return
states = zip(alarm.rule['alarm_ids'],

View File

@ -103,7 +103,7 @@ class GnocchiThresholdEvaluator(evaluator.Evaluator):
alarm.rule['resource_type'],
alarm.rule['resource_id'], alarm.rule['metric'])
LOG.debug(_('stats query %s') % req['url'])
LOG.debug('stats query %s', req['url'])
try:
r = getattr(requests, method)(**req)
except Exception:
@ -125,8 +125,8 @@ class GnocchiThresholdEvaluator(evaluator.Evaluator):
window = (alarm.rule['granularity'] *
(alarm.rule['evaluation_periods'] + cls.look_back))
start = now - datetime.timedelta(seconds=window)
LOG.debug(_('query stats from %(start)s to '
'%(now)s') % {'start': start, 'now': now})
LOG.debug('query stats from %(start)s to '
'%(now)s', {'start': start, 'now': now})
return start.isoformat(), now.isoformat()
def _sufficient(self, alarm, statistics):
@ -211,8 +211,8 @@ class GnocchiThresholdEvaluator(evaluator.Evaluator):
def evaluate(self, alarm):
if not self.within_time_constraint(alarm):
LOG.debug(_('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.') % alarm.alarm_id)
LOG.debug('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.', alarm.alarm_id)
return
start, end = self._bound_duration(alarm)
@ -223,9 +223,8 @@ class GnocchiThresholdEvaluator(evaluator.Evaluator):
def _compare(value):
op = COMPARATORS[alarm.rule['comparison_operator']]
limit = alarm.rule['threshold']
LOG.debug(_('comparing value %(value)s against threshold'
' %(limit)s') %
{'value': value, 'limit': limit})
LOG.debug('comparing value %(value)s against threshold'
' %(limit)s', {'value': value, 'limit': limit})
return op(value, limit)
self._transition(alarm,

View File

@ -53,8 +53,8 @@ class ThresholdEvaluator(evaluator.Evaluator):
window = (alarm.rule['period'] *
(alarm.rule['evaluation_periods'] + look_back))
start = now - datetime.timedelta(seconds=window)
LOG.debug(_('query stats from %(start)s to '
'%(now)s') % {'start': start, 'now': now})
LOG.debug('query stats from %(start)s to '
'%(now)s', {'start': start, 'now': now})
after = dict(field='timestamp', op='ge', value=start.isoformat())
before = dict(field='timestamp', op='le', value=now.isoformat())
constraints.extend([before, after])
@ -63,7 +63,7 @@ class ThresholdEvaluator(evaluator.Evaluator):
@staticmethod
def _sanitize(alarm, statistics):
"""Sanitize statistics."""
LOG.debug(_('sanitize stats %s') % statistics)
LOG.debug('sanitize stats %s', statistics)
if alarm.rule.get('exclude_outliers'):
key = operator.attrgetter('count')
mean = utils.mean(statistics, key)
@ -72,7 +72,7 @@ class ThresholdEvaluator(evaluator.Evaluator):
upper = mean + 2 * stddev
inliers, outliers = utils.anomalies(statistics, key, lower, upper)
if outliers:
LOG.debug(_('excluded weak datapoints with sample counts %s'),
LOG.debug('excluded weak datapoints with sample counts %s',
[s.count for s in outliers])
statistics = inliers
else:
@ -81,12 +81,12 @@ class ThresholdEvaluator(evaluator.Evaluator):
# in practice statistics are always sorted by period start, not
# strictly required by the API though
statistics = statistics[-alarm.rule['evaluation_periods']:]
LOG.debug(_('pruned statistics to %d') % len(statistics))
LOG.debug('pruned statistics to %d', len(statistics))
return statistics
def _statistics(self, alarm, query):
"""Retrieve statistics over the current window."""
LOG.debug(_('stats query %s') % query)
LOG.debug('stats query %s', query)
try:
return self._client.statistics.list(
meter_name=alarm.rule['meter_name'], q=query,
@ -175,8 +175,8 @@ class ThresholdEvaluator(evaluator.Evaluator):
def evaluate(self, alarm):
if not self.within_time_constraint(alarm):
LOG.debug(_('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.') % alarm.alarm_id)
LOG.debug('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.', alarm.alarm_id)
return
query = self._bound_duration(
@ -194,9 +194,8 @@ class ThresholdEvaluator(evaluator.Evaluator):
op = COMPARATORS[alarm.rule['comparison_operator']]
value = getattr(stat, alarm.rule['statistic'])
limit = alarm.rule['threshold']
LOG.debug(_('comparing value %(value)s against threshold'
' %(limit)s') %
{'value': value, 'limit': limit})
LOG.debug('comparing value %(value)s against threshold'
' %(limit)s', {'value': value, 'limit': limit})
return op(value, limit)
self._transition(alarm,

View File

@ -21,7 +21,6 @@ from oslo_log import log
import six
from ceilometer.alarm.storage import models
from ceilometer.i18n import _
from ceilometer import messaging
OPTS = [
@ -46,9 +45,9 @@ class RPCAlarmNotifier(object):
def notify(self, alarm, previous, reason, reason_data):
actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state])
if not actions:
LOG.debug(_('alarm %(alarm_id)s has no action configured '
'for state transition from %(previous)s to '
'state %(state)s, skipping the notification.') %
LOG.debug('alarm %(alarm_id)s has no action configured '
'for state transition from %(previous)s to '
'state %(state)s, skipping the notification.',
{'alarm_id': alarm.alarm_id,
'previous': previous,
'state': alarm.state})

View File

@ -100,11 +100,10 @@ class AlarmService(object):
def _evaluate_alarm(self, alarm):
"""Evaluate the alarms assigned to this evaluator."""
if alarm.type not in self.supported_evaluators:
LOG.debug(_('skipping alarm %s: type unsupported') %
alarm.alarm_id)
LOG.debug('skipping alarm %s: type unsupported', alarm.alarm_id)
return
LOG.debug(_('evaluating alarm %s') % alarm.alarm_id)
LOG.debug('evaluating alarm %s', alarm.alarm_id)
try:
self.evaluators[alarm.type].obj.evaluate(alarm)
except Exception:
@ -191,8 +190,8 @@ class AlarmNotifierService(os_service.Service):
return
try:
LOG.debug(_("Notifying alarm %(id)s with action %(act)s") % (
{'id': alarm_id, 'act': action}))
LOG.debug("Notifying alarm %(id)s with action %(act)s",
{'id': alarm_id, 'act': action})
notifier.notify(action, alarm_id, alarm_name, severity,
previous, current, reason, reason_data)
except Exception:

View File

@ -19,7 +19,6 @@ from oslo_log import log
import ceilometer
from ceilometer.alarm.storage import base
from ceilometer.alarm.storage import models
from ceilometer.i18n import _
from ceilometer.storage.hbase import base as hbase_base
from ceilometer.storage.hbase import migration as hbase_migration
from ceilometer.storage.hbase import utils as hbase_utils
@ -84,18 +83,18 @@ class Connection(hbase_base.Connection, base.Connection):
hbase_migration.migrate_tables(conn, tables)
def clear(self):
LOG.debug(_('Dropping HBase schema...'))
LOG.debug('Dropping HBase schema...')
with self.conn_pool.connection() as conn:
for table in [self.ALARM_TABLE,
self.ALARM_HISTORY_TABLE]:
try:
conn.disable_table(table)
except Exception:
LOG.debug(_('Cannot disable table but ignoring error'))
LOG.debug('Cannot disable table but ignoring error')
try:
conn.delete_table(table)
except Exception:
LOG.debug(_('Cannot delete table but ignoring error'))
LOG.debug('Cannot delete table but ignoring error')
def update_alarm(self, alarm):
"""Create an alarm.

View File

@ -194,7 +194,7 @@ class TraitsController(rest.RestController):
:param event_type: Event type to filter traits by
:param trait_name: Trait to return values for
"""
LOG.debug(_("Getting traits for %s") % event_type)
LOG.debug("Getting traits for %s", event_type)
return [Trait._convert_storage_trait(t)
for t in pecan.request.event_storage_conn
.get_traits(event_type, trait_name)]

View File

@ -181,12 +181,12 @@ class Statistics(base.Base):
self.duration_start and
self.duration_start < start_timestamp):
self.duration_start = start_timestamp
LOG.debug(_('clamping min timestamp to range'))
LOG.debug('clamping min timestamp to range')
if (end_timestamp and
self.duration_end and
self.duration_end > end_timestamp):
self.duration_end = end_timestamp
LOG.debug(_('clamping max timestamp to range'))
LOG.debug('clamping max timestamp to range')
# If we got valid timestamps back, compute a duration in seconds.
#

View File

@ -18,7 +18,7 @@ import logging
from oslo_config import cfg
from ceilometer.i18n import _, _LI
from ceilometer.i18n import _LI
from ceilometer import service
from ceilometer import storage
@ -37,7 +37,7 @@ def expirer():
service.prepare_service()
if cfg.CONF.database.metering_time_to_live > 0:
LOG.debug(_("Clearing expired metering data"))
LOG.debug("Clearing expired metering data")
storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering')
storage_conn.clear_expired_metering_data(
cfg.CONF.database.metering_time_to_live)
@ -46,7 +46,7 @@ def expirer():
"is disabled"))
if cfg.CONF.database.event_time_to_live > 0:
LOG.debug(_("Clearing expired event data"))
LOG.debug("Clearing expired event data")
event_conn = storage.get_connection_from_config(cfg.CONF, 'event')
event_conn.clear_expired_event_data(
cfg.CONF.database.event_time_to_live)

View File

@ -134,7 +134,7 @@ class CollectorService(os_service.Service):
LOG.warn(_("UDP: Cannot decode data sent by %s"), source)
else:
try:
LOG.debug(_("UDP: Storing %s"), sample)
LOG.debug("UDP: Storing %s", sample)
self.dispatcher_manager.map_method('record_metering_data',
sample)
except Exception:

View File

@ -30,10 +30,10 @@ class CPUPollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
for instance in resources:
LOG.debug(_('checking instance %s'), instance.id)
LOG.debug('checking instance %s', instance.id)
try:
cpu_info = self.inspector.inspect_cpus(instance)
LOG.debug(_("CPUTIME USAGE: %(instance)s %(time)d"),
LOG.debug("CPUTIME USAGE: %(instance)s %(time)d",
{'instance': instance.__dict__,
'time': cpu_info.time})
cpu_num = {'cpu_number': cpu_info.number}
@ -47,11 +47,11 @@ class CPUPollster(pollsters.BaseComputePollster):
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU time is not implemented for %s'
), self.inspector.__class__.__name__)
LOG.debug('Obtaining CPU time is not implemented for %s',
self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('could not get CPU time for %(id)s: %(e)s'),
{'id': instance.id, 'e': err})
@ -62,13 +62,13 @@ class CPUUtilPollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking CPU util for instance %s'), instance.id)
LOG.debug('Checking CPU util for instance %s', instance.id)
try:
cpu_info = self.inspector.inspect_cpu_util(
instance, self._inspection_duration)
LOG.debug(_("CPU UTIL: %(instance)s %(util)d"),
({'instance': instance.__dict__,
'util': cpu_info.util}))
LOG.debug("CPU UTIL: %(instance)s %(util)d",
{'instance': instance.__dict__,
'util': cpu_info.util})
yield util.make_sample_from_instance(
instance,
name='cpu_util',
@ -78,10 +78,10 @@ class CPUUtilPollster(pollsters.BaseComputePollster):
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU Util is not implemented for %s'),
LOG.debug('Obtaining CPU Util is not implemented for %s',
self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'),

View File

@ -128,7 +128,7 @@ class _Base(pollsters.BaseComputePollster):
yield s
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
@ -136,8 +136,8 @@ class _Base(pollsters.BaseComputePollster):
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
LOG.debug('%(inspector)s does not provide data for '
' %(pollster)s',
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
@ -338,11 +338,11 @@ class _DiskRatesPollsterBase(pollsters.BaseComputePollster):
yield disk_rate
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
LOG.debug('%(inspector)s does not provide data for '
' %(pollster)s',
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
@ -520,11 +520,11 @@ class _DiskLatencyPollsterBase(pollsters.BaseComputePollster):
yield disk_latency
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
LOG.debug('%(inspector)s does not provide data for '
' %(pollster)s',
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
@ -601,11 +601,11 @@ class _DiskIOPSPollsterBase(pollsters.BaseComputePollster):
yield disk_iops
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
'%(pollster)s'),
LOG.debug('%(inspector)s does not provide data for '
'%(pollster)s',
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
@ -696,7 +696,7 @@ class _DiskInfoPollsterBase(pollsters.BaseComputePollster):
yield disk_info
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
@ -704,10 +704,10 @@ class _DiskInfoPollsterBase(pollsters.BaseComputePollster):
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'), (
LOG.debug('%(inspector)s does not provide data for '
' %(pollster)s',
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__}))
'pollster': self.__class__.__name__})
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s '

View File

@ -30,13 +30,13 @@ class MemoryUsagePollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking memory usage for instance %s'), instance.id)
LOG.debug('Checking memory usage for instance %s', instance.id)
try:
memory_info = self.inspector.inspect_memory_usage(
instance, self._inspection_duration)
LOG.debug(_("MEMORY USAGE: %(instance)s %(usage)f"),
({'instance': instance.__dict__,
'usage': memory_info.usage}))
LOG.debug("MEMORY USAGE: %(instance)s %(usage)f",
{'instance': instance.__dict__,
'usage': memory_info.usage})
yield util.make_sample_from_instance(
instance,
name='memory.usage',
@ -46,7 +46,7 @@ class MemoryUsagePollster(pollsters.BaseComputePollster):
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
@ -59,8 +59,8 @@ class MemoryUsagePollster(pollsters.BaseComputePollster):
'instance_id': instance.id, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining Memory Usage is not implemented for %s'
), self.inspector.__class__.__name__)
LOG.debug('Obtaining Memory Usage is not implemented for %s',
self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('Could not get Memory Usage for '
'%(id)s: %(e)s'), {'id': instance.id,
@ -72,14 +72,14 @@ class MemoryResidentPollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking resident memory for instance %s'),
LOG.debug('Checking resident memory for instance %s',
instance.id)
try:
memory_info = self.inspector.inspect_memory_resident(
instance, self._inspection_duration)
LOG.debug(_("RESIDENT MEMORY: %(instance)s %(resident)f"),
({'instance': instance.__dict__,
'resident': memory_info.resident}))
LOG.debug("RESIDENT MEMORY: %(instance)s %(resident)f",
{'instance': instance.__dict__,
'resident': memory_info.resident})
yield util.make_sample_from_instance(
instance,
name='memory.resident',
@ -89,7 +89,7 @@ class MemoryResidentPollster(pollsters.BaseComputePollster):
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
@ -102,8 +102,8 @@ class MemoryResidentPollster(pollsters.BaseComputePollster):
'instance_id': instance.id, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining Resident Memory is not implemented'
' for %s'), self.inspector.__class__.__name__)
LOG.debug('Obtaining Resident Memory is not implemented'
' for %s', self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_LE('Could not get Resident Memory Usage for '
'%(id)s: %(e)s'), {'id': instance.id,

View File

@ -89,7 +89,7 @@ class _Base(pollsters.BaseComputePollster):
self._inspection_duration = self._record_poll_time()
for instance in resources:
instance_name = util.instance_name(instance)
LOG.debug(_('checking net info for instance %s'), instance.id)
LOG.debug('checking net info for instance %s', instance.id)
try:
vnics = self._get_vnics_for_instance(
cache,
@ -103,7 +103,7 @@ class _Base(pollsters.BaseComputePollster):
yield self._get_sample(instance, vnic, info)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
LOG.debug('Exception while getting samples %s', err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
@ -111,8 +111,8 @@ class _Base(pollsters.BaseComputePollster):
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
LOG.debug('%(inspector)s does not provide data for '
' %(pollster)s',
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:

View File

@ -51,7 +51,7 @@ def retry_on_disconnect(function):
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broken'))
LOG.debug('Connection to libvirt broken')
self.connection = None
return function(self, *args, **kwargs)
else:
@ -76,7 +76,7 @@ class LibvirtInspector(virt_inspector.Inspector):
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
LOG.debug('Connecting to libvirt: %s', self.uri)
self.connection = libvirt.openReadOnly(self.uri)
return self.connection

View File

@ -38,8 +38,7 @@ DISPATCHER_NAMESPACE = 'ceilometer.dispatcher'
def load_dispatcher_manager():
LOG.debug(_('loading dispatchers from %s'),
DISPATCHER_NAMESPACE)
LOG.debug('loading dispatchers from %s', DISPATCHER_NAMESPACE)
# set propagate_map_exceptions to True to enable stevedore
# to propagate exceptions.
dispatcher_manager = named.NamedExtensionManager(

View File

@ -18,7 +18,7 @@ from oslo_utils import timeutils
from ceilometer import dispatcher
from ceilometer.event.storage import models
from ceilometer.i18n import _, _LE, _LW
from ceilometer.i18n import _LE, _LW
from ceilometer.publisher import utils as publisher_utils
from ceilometer import storage
@ -73,13 +73,13 @@ class DatabaseDispatcher(dispatcher.Base):
data = [data]
for meter in data:
LOG.debug(_(
LOG.debug(
'metering data %(counter_name)s '
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s')
% ({'counter_name': meter['counter_name'],
'resource_id': meter['resource_id'],
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
'counter_volume': meter['counter_volume']}))
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s',
{'counter_name': meter['counter_name'],
'resource_id': meter['resource_id'],
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
'counter_volume': meter['counter_volume']})
if publisher_utils.verify_signature(
meter, self.conf.publisher.telemetry_secret):
try:

View File

@ -77,8 +77,7 @@ class LegacyArchivePolicyDefinition(object):
def __init__(self, definition_cfg):
self.cfg = definition_cfg
if self.cfg is None:
LOG.debug(_("No archive policy file found!"
" Using default config."))
LOG.debug("No archive policy file found! Using default config.")
def get(self, metric_name):
if self.cfg is not None:
@ -232,8 +231,7 @@ class GnocchiDispatcher(dispatcher.Base):
LOG.exception('fail to retreive user of Gnocchi service')
raise
self._gnocchi_project_id = project.id
LOG.debug("gnocchi project found: %s" %
self.gnocchi_project_id)
LOG.debug("gnocchi project found: %s", self.gnocchi_project_id)
return self._gnocchi_project_id
def _is_swift_account_sample(self, sample):

View File

@ -109,9 +109,9 @@ class Client(object):
data=json.dumps(measure_attributes))
if r.status_code == 404:
LOG.debug(_("The metric %(metric_name)s of "
"resource %(resource_id)s doesn't exists: "
"%(status_code)d"),
LOG.debug("The metric %(metric_name)s of "
"resource %(resource_id)s doesn't exists: "
"%(status_code)d",
{'metric_name': metric_name,
'resource_id': resource_id,
'status_code': r.status_code})

View File

@ -86,13 +86,13 @@ class HttpDispatcher(dispatcher.Base):
data = [data]
for meter in data:
LOG.debug(_(
LOG.debug(
'metering data %(counter_name)s '
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s')
% ({'counter_name': meter['counter_name'],
'resource_id': meter['resource_id'],
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
'counter_volume': meter['counter_volume']}))
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s',
{'counter_name': meter['counter_name'],
'resource_id': meter['resource_id'],
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
'counter_volume': meter['counter_volume']})
if publisher_utils.verify_signature(
meter, self.conf.publisher.telemetry_secret):
try:
@ -111,8 +111,8 @@ class HttpDispatcher(dispatcher.Base):
data=json.dumps(data),
headers=self.headers,
timeout=self.timeout)
LOG.debug(_('Message posting finished with status code '
'%d.') % res.status_code)
LOG.debug('Message posting finished with status code '
'%d.', res.status_code)
except Exception as err:
LOG.exception(_('Failed to record metering data: %s'),
err)

View File

@ -21,7 +21,6 @@ import requests
import six
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer import sample
@ -85,7 +84,7 @@ class _Base(plugin_base.PollsterBase):
try:
client = self.get_kwapi_client(ksclient, endpoint)
except exceptions.EndpointNotFound:
LOG.debug(_("Kwapi endpoint not found"))
LOG.debug("Kwapi endpoint not found")
return []
return list(client.iter_probes())

View File

@ -369,7 +369,7 @@ def setup_events(trait_plugin_mgr):
"""Setup the event definitions from yaml config file."""
config_file = get_config_file()
if config_file is not None:
LOG.debug(_("Event Definitions configuration file: %s"), config_file)
LOG.debug("Event Definitions configuration file: %s", config_file)
with open(config_file) as cf:
config = cf.read()
@ -392,8 +392,8 @@ def setup_events(trait_plugin_mgr):
raise
else:
LOG.debug(_("No Event Definitions configuration file found!"
" Using default config."))
LOG.debug("No Event Definitions configuration file found!"
" Using default config.")
events_config = []
LOG.info(_("Event Definitions: %s"), events_config)

View File

@ -1,4 +1,3 @@
#
# Copyright 2012-2014 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -21,7 +20,6 @@ import oslo_messaging
from stevedore import extension
from ceilometer.event import converter as event_converter
from ceilometer.i18n import _
from ceilometer import messaging
LOG = logging.getLogger(__name__)
@ -30,7 +28,7 @@ LOG = logging.getLogger(__name__)
class EventsNotificationEndpoint(object):
def __init__(self, manager):
super(EventsNotificationEndpoint, self).__init__()
LOG.debug(_('Loading event definitions'))
LOG.debug('Loading event definitions')
self.ctxt = context.get_admin_context()
self.event_converter = event_converter.setup_events(
extension.ExtensionManager(

View File

@ -17,7 +17,7 @@ from oslo_log import log
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _, _LE
from ceilometer.i18n import _LE
from ceilometer.storage.hbase import base as hbase_base
from ceilometer.storage.hbase import utils as hbase_utils
from ceilometer import utils
@ -77,17 +77,17 @@ class Connection(hbase_base.Connection, base.Connection):
hbase_utils.create_tables(conn, tables, column_families)
def clear(self):
LOG.debug(_('Dropping HBase schema...'))
LOG.debug('Dropping HBase schema...')
with self.conn_pool.connection() as conn:
for table in [self.EVENT_TABLE]:
try:
conn.disable_table(table)
except Exception:
LOG.debug(_('Cannot disable table but ignoring error'))
LOG.debug('Cannot disable table but ignoring error')
try:
conn.delete_table(table)
except Exception:
LOG.debug(_('Cannot delete table but ignoring error'))
LOG.debug('Cannot delete table but ignoring error')
def record_events(self, event_models):
"""Write the events to Hbase.

View File

@ -203,8 +203,8 @@ class NotificationService(service_base.BaseService):
if (cfg.CONF.notification.disable_non_metric_meters and
isinstance(handler, base.NonMetricNotificationBase)):
continue
LOG.debug(_('Event types from %(name)s: %(type)s'
' (ack_on_error=%(error)s)') %
LOG.debug('Event types from %(name)s: %(type)s'
' (ack_on_error=%(error)s)',
{'name': ext.name,
'type': ', '.join(handler.event_types),
'error': ack_on_error})
@ -239,7 +239,7 @@ class NotificationService(service_base.BaseService):
pipelines = self.pipeline_manager.pipelines + ev_pipes
transport = messaging.get_transport()
for pipe in pipelines:
LOG.debug(_('Pipeline endpoint: %s'), pipe.name)
LOG.debug('Pipeline endpoint: %s', pipe.name)
pipe_endpoint = (pipeline.EventPipelineEndpoint
if isinstance(pipe, pipeline.EventPipeline) else
pipeline.SamplePipelineEndpoint)

View File

@ -22,7 +22,6 @@ from oslo_utils import timeutils
import six.moves.urllib.parse as urlparse
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer import sample
LOG = log.getLogger(__name__)
@ -76,7 +75,7 @@ class _Base(plugin_base.PollsterBase):
endpoint_type=conf.os_endpoint_type)
_Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin')
except exceptions.EndpointNotFound:
LOG.debug(_("Radosgw endpoint not found"))
LOG.debug("Radosgw endpoint not found")
return _Base._ENDPOINT
def _iter_accounts(self, ksclient, cache, tenants):

View File

@ -25,7 +25,6 @@ import six.moves.urllib.parse as urlparse
from swiftclient import client as swift
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer import sample
@ -73,7 +72,7 @@ class _Base(plugin_base.PollsterBase):
service_type=cfg.CONF.service_types.swift,
endpoint_type=conf.os_endpoint_type)
except exceptions.EndpointNotFound:
LOG.debug(_("Swift endpoint not found"))
LOG.debug("Swift endpoint not found")
return _Base._ENDPOINT
def _iter_accounts(self, ksclient, cache, tenants):

View File

@ -438,10 +438,10 @@ class SampleSink(Sink):
for transformer in self.transformers[start:]:
sample = transformer.handle_sample(ctxt, sample)
if not sample:
LOG.debug(_(
LOG.debug(
"Pipeline %(pipeline)s: Sample dropped by "
"transformer %(trans)s") % ({'pipeline': self,
'trans': transformer}))
"transformer %(trans)s", {'pipeline': self,
'trans': transformer})
return
return sample
except Exception as err:
@ -469,11 +469,11 @@ class SampleSink(Sink):
transformed_samples = samples
else:
for sample in samples:
LOG.debug(_(
LOG.debug(
"Pipeline %(pipeline)s: Transform sample "
"%(smp)s from %(trans)s transformer") % ({'pipeline': self,
'smp': sample,
'trans': start}))
"%(smp)s from %(trans)s transformer", {'pipeline': self,
'smp': sample,
'trans': start})
sample = self._transform_sample(start, ctxt, sample)
if sample:
transformed_samples.append(sample)
@ -744,7 +744,7 @@ def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE):
if not os.path.exists(cfg_file):
cfg_file = cfg.CONF.find_file(cfg_file)
LOG.debug(_("Pipeline config file: %s"), cfg_file)
LOG.debug("Pipeline config file: %s", cfg_file)
with open(cfg_file) as fap:
data = fap.read()
@ -763,7 +763,7 @@ def _setup_polling_manager(cfg_file):
if not os.path.exists(cfg_file):
cfg_file = cfg.CONF.find_file(cfg_file)
LOG.debug(_("Polling config file: %s"), cfg_file)
LOG.debug("Polling config file: %s", cfg_file)
with open(cfg_file) as fap:
data = fap.read()

View File

@ -57,9 +57,9 @@ class UDPPublisher(publisher.PublisherBase):
sample, cfg.CONF.publisher.telemetry_secret)
host = self.host
port = self.port
LOG.debug(_("Publishing sample %(msg)s over UDP to "
"%(host)s:%(port)d") % {'msg': msg, 'host': host,
'port': port})
LOG.debug("Publishing sample %(msg)s over UDP to "
"%(host)s:%(port)d", {'msg': msg, 'host': host,
'port': port})
try:
self.socket.sendto(msgpack.dumps(msg),
(self.host, self.port))

View File

@ -20,7 +20,7 @@ from oslo_log import log
from oslo_service import service as os_service
import six
from ceilometer.i18n import _, _LE, _LI
from ceilometer.i18n import _LE, _LI
from ceilometer import pipeline
LOG = log.getLogger(__name__)
@ -60,12 +60,12 @@ class BaseService(os_service.Service):
# Polling in the polling agent.
elif hasattr(self, 'polling_manager'):
self.polling_manager = pipeline.setup_polling()
LOG.debug(_("Pipeline has been refreshed. "
"old hash: %(old)s, new hash: %(new)s") %
({'old': self.pipeline_hash,
'new': _hash}))
LOG.debug("Pipeline has been refreshed. "
"old hash: %(old)s, new hash: %(new)s",
{'old': self.pipeline_hash,
'new': _hash})
except Exception as err:
LOG.debug(_("Active pipeline config's hash is %s") %
LOG.debug("Active pipeline config's hash is %s",
self.pipeline_hash)
LOG.exception(_LE('Unable to load changed pipeline: %s')
% err)

View File

@ -18,7 +18,6 @@ from oslo_log import log
from oslo_utils import netutils
from six.moves.urllib import parse as urlparse
from ceilometer.i18n import _
from ceilometer.storage.hbase import inmemory as hbase_inmemory
LOG = log.getLogger(__name__)
@ -42,8 +41,8 @@ class Connection(object):
else:
# This is a in-memory usage for unit tests
if Connection._memory_instance is None:
LOG.debug(_('Creating a new in-memory HBase '
'Connection object'))
LOG.debug('Creating a new in-memory HBase '
'Connection object')
Connection._memory_instance = (hbase_inmemory.
MConnectionPool())
self.conn_pool = Connection._memory_instance
@ -59,8 +58,8 @@ class Connection(object):
The tests use a subclass to override this and return an
in-memory connection pool.
"""
LOG.debug(_('connecting to HBase on %(host)s:%(port)s') % (
{'host': conf['host'], 'port': conf['port']}))
LOG.debug('connecting to HBase on %(host)s:%(port)s',
{'host': conf['host'], 'port': conf['port']})
return happybase.ConnectionPool(size=100, host=conf['host'],
port=conf['port'],
table_prefix=conf['table_prefix'])

View File

@ -21,7 +21,6 @@ from oslo_log import log
import six
import ceilometer
from ceilometer.i18n import _
LOG = log.getLogger(__name__)
@ -265,7 +264,7 @@ class MConnection(object):
@staticmethod
def open():
LOG.debug(_("Opening in-memory HBase connection"))
LOG.debug("Opening in-memory HBase connection")
def create_table(self, n, families=None):
families = families or {}

View File

@ -19,7 +19,6 @@ from oslo_log import log
from oslo_utils import timeutils
import ceilometer
from ceilometer.i18n import _
from ceilometer.storage import base
from ceilometer.storage.hbase import base as hbase_base
from ceilometer.storage.hbase import migration as hbase_migration
@ -128,18 +127,18 @@ class Connection(hbase_base.Connection, base.Connection):
hbase_migration.migrate_tables(conn, tables)
def clear(self):
LOG.debug(_('Dropping HBase schema...'))
LOG.debug('Dropping HBase schema...')
with self.conn_pool.connection() as conn:
for table in [self.RESOURCE_TABLE,
self.METER_TABLE]:
try:
conn.disable_table(table)
except Exception:
LOG.debug(_('Cannot disable table but ignoring error'))
LOG.debug('Cannot disable table but ignoring error')
try:
conn.delete_table(table)
except Exception:
LOG.debug(_('Cannot delete table but ignoring error'))
LOG.debug('Cannot delete table but ignoring error')
def record_metering_data(self, data):
"""Write the data to the backend storage system.
@ -212,7 +211,7 @@ class Connection(hbase_base.Connection, base.Connection):
source, q)
with self.conn_pool.connection() as conn:
resource_table = conn.table(self.RESOURCE_TABLE)
LOG.debug(_("Query Resource table: %s") % q)
LOG.debug("Query Resource table: %s", q)
for resource_id, data in resource_table.scan(filter=q,
limit=limit):
f_res, sources, meters, md = hbase_utils.deserialize_entry(
@ -263,7 +262,7 @@ class Connection(hbase_base.Connection, base.Connection):
project_id=project,
resource_id=resource,
source=source)
LOG.debug(_("Query Resource table: %s") % q)
LOG.debug("Query Resource table: %s", q)
gen = resource_table.scan(filter=q)
# We need result set to be sure that user doesn't receive several
@ -305,7 +304,7 @@ class Connection(hbase_base.Connection, base.Connection):
q, start, stop, columns = (hbase_utils.
make_sample_query_from_filter
(sample_filter, require_meter=False))
LOG.debug(_("Query Meter Table: %s") % q)
LOG.debug("Query Meter Table: %s", q)
gen = meter_table.scan(filter=q, row_start=start, row_stop=stop,
limit=limit, columns=columns)
for ignored, meter in gen:

View File

@ -33,7 +33,6 @@ import pymongo
import six
import ceilometer
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models
@ -312,8 +311,8 @@ class Connection(pymongo_base.Connection):
Clearing occurs with native MongoDB time-to-live feature.
"""
LOG.debug(_("Clearing expired metering data is based on native "
"MongoDB time to live feature and going in background."))
LOG.debug("Clearing expired metering data is based on native "
"MongoDB time to live feature and going in background.")
@staticmethod
def _get_marker(db_collection, marker_pairs):

View File

@ -43,10 +43,9 @@ class ScalingTransformer(transformer.TransformerBase):
self.source = source
self.target = target
self.scale = target.get('scale')
LOG.debug(_('scaling conversion transformer with source:'
' %(source)s target: %(target)s:')
% {'source': source,
'target': target})
LOG.debug('scaling conversion transformer with source:'
' %(source)s target: %(target)s:', {'source': source,
'target': target})
super(ScalingTransformer, self).__init__(**kwargs)
def _scale(self, s):
@ -89,10 +88,10 @@ class ScalingTransformer(transformer.TransformerBase):
def handle_sample(self, context, s):
"""Handle a sample, converting if necessary."""
LOG.debug(_('handling sample %s'), (s,))
LOG.debug('handling sample %s', s)
if self.source.get('unit', s.unit) == s.unit:
s = self._convert(s)
LOG.debug(_('converted to: %s'), (s,))
LOG.debug('converted to: %s', s)
return s
@ -111,7 +110,7 @@ class RateOfChangeTransformer(ScalingTransformer):
def handle_sample(self, context, s):
"""Handle a sample, converting if necessary."""
LOG.debug(_('handling sample %s'), (s,))
LOG.debug('handling sample %s', s)
key = s.name + s.resource_id
prev = self.cache.get(key)
timestamp = timeutils.parse_isotime(s.timestamp)
@ -139,7 +138,7 @@ class RateOfChangeTransformer(ScalingTransformer):
if time_delta else 0.0)
s = self._convert(s, rate_of_change)
LOG.debug(_('converted to: %s'), (s,))
LOG.debug('converted to: %s', s)
else:
LOG.warn(_('dropping sample with no predecessor: %s'),
(s,))