Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: I836a018f971fb71bfa35e76f00433a263440de18
This commit is contained in:
xuqiankun 2017-03-21 14:05:54 +08:00
parent 4c995d35cb
commit a584a85bdb
43 changed files with 221 additions and 259 deletions

View File

@ -15,7 +15,6 @@
from oslo_log import log
from ceilometer.agent import plugin_base as plugin
from ceilometer.i18n import _LW
from ceilometer import keystone_client
LOG = log.getLogger(__name__)
@ -36,7 +35,7 @@ class EndpointDiscovery(plugin.DiscoveryBase):
interface=self.conf.service_credentials.interface,
region_name=self.conf.service_credentials.region_name)
if not endpoints:
LOG.warning(_LW('No endpoints found for service %s'),
LOG.warning('No endpoints found for service %s',
"<all services>" if param is None else param)
return []
return endpoints

View File

@ -35,7 +35,6 @@ from stevedore import extension
from ceilometer.agent import plugin_base
from ceilometer import coordination
from ceilometer.i18n import _LE, _LI, _LW
from ceilometer import keystone_client
from ceilometer import messaging
from ceilometer import pipeline
@ -173,13 +172,13 @@ class PollingTask(object):
# If no resources, skip for this pollster
if not polling_resources:
p_context = 'new ' if history else ''
LOG.info(_LI("Skip pollster %(name)s, no %(p_context)s"
"resources found this cycle"),
LOG.info("Skip pollster %(name)s, no %(p_context)s"
"resources found this cycle",
{'name': pollster.name, 'p_context': p_context})
continue
LOG.info(_LI("Polling pollster %(poll)s in the context of "
"%(src)s"),
LOG.info("Polling pollster %(poll)s in the context of "
"%(src)s",
dict(poll=pollster.name, src=source_name))
try:
polling_timestamp = timeutils.utcnow().isoformat()
@ -206,15 +205,15 @@ class PollingTask(object):
self._send_notification(sample_batch)
except plugin_base.PollsterPermanentError as err:
LOG.error(_LE(
LOG.error(
'Prevent pollster %(name)s from '
'polling %(res_list)s on source %(source)s anymore!')
'polling %(res_list)s on source %(source)s anymore!'
% ({'name': pollster.name, 'source': source_name,
'res_list': err.fail_res_list}))
self.resources[key].blacklist.extend(err.fail_res_list)
except Exception as err:
LOG.error(_LE(
'Continue after error from %(name)s: %(error)s')
LOG.error(
'Continue after error from %(name)s: %(error)s'
% ({'name': pollster.name, 'error': err}),
exc_info=True)
@ -304,13 +303,13 @@ class AgentManager(cotyledon.Service):
# Extension raising ExtensionLoadError can be ignored,
# and ignore anything we can't import as a safety measure.
if isinstance(exc, plugin_base.ExtensionLoadError):
LOG.exception(_LE("Skip loading extension for %s"), ep.name)
LOG.exception("Skip loading extension for %s", ep.name)
return
show_exception = (LOG.isEnabledFor(logging.DEBUG)
and isinstance(exc, ImportError))
LOG.error(_LE("Failed to import extension for %(name)r: "
"%(error)s"),
LOG.error("Failed to import extension for %(name)r: "
"%(error)s",
{'name': ep.name, 'error': exc},
exc_info=show_exception)
if isinstance(exc, ImportError):
@ -486,9 +485,9 @@ class AgentManager(cotyledon.Service):
if not keystone_client.get_service_catalog(
self.keystone).get_endpoints(
service_type=service_type):
LOG.warning(_LW(
LOG.warning(
'Skipping %(name)s, %(service_type)s service '
'is not registered in keystone'),
'is not registered in keystone',
{'name': name, 'service_type': service_type})
continue
@ -505,12 +504,12 @@ class AgentManager(cotyledon.Service):
if discovery_cache is not None:
discovery_cache[url] = partitioned
except ka_exceptions.ClientException as e:
LOG.error(_LE('Skipping %(name)s, keystone issue: '
'%(exc)s'), {'name': name, 'exc': e})
LOG.error('Skipping %(name)s, keystone issue: '
'%(exc)s', {'name': name, 'exc': e})
except Exception as err:
LOG.exception(_LE('Unable to discover resources: %s'), err)
LOG.exception('Unable to discover resources: %s', err)
else:
LOG.warning(_LW('Unknown discovery extension: %s'), name)
LOG.warning('Unknown discovery extension: %s', name)
return resources
def stop_pollsters_tasks(self):

View File

@ -23,7 +23,6 @@ import oslo_messaging
import six
from stevedore import extension
from ceilometer.i18n import _LE
from ceilometer import messaging
LOG = log.getLogger(__name__)
@ -117,7 +116,7 @@ class NotificationBase(PluginBase):
priority, notification)
self.to_samples_and_publish(notification)
except Exception:
LOG.error(_LE('Fail to process notification'), exc_info=True)
LOG.error('Fail to process notification', exc_info=True)
def to_samples_and_publish(self, notification):
"""Return samples produced by *process_notification*.

View File

@ -24,7 +24,6 @@ import pecan
from ceilometer.api import hooks
from ceilometer.api import middleware
from ceilometer.i18n import _LI, _LW
LOG = log.getLogger(__name__)
@ -100,9 +99,9 @@ def load_app(conf):
configkey = str(uuid.uuid4())
APPCONFIGS[configkey] = conf
LOG.info(_LI("Full WSGI config used: %s"), cfg_file)
LOG.warning(_LW("Note: Ceilometer API is deprecated; use APIs from Aodh"
" (alarms), Gnocchi (metrics) and/or Panko (events)."))
LOG.info("Full WSGI config used: %s", cfg_file)
LOG.warning("Note: Ceilometer API is deprecated; use APIs from Aodh"
" (alarms), Gnocchi (metrics) and/or Panko (events).")
return deploy.loadapp("config:" + cfg_file,
global_conf={'configkey': configkey})

View File

@ -32,7 +32,7 @@ from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import samples
from ceilometer.api.controllers.v2 import utils as v2_utils
from ceilometer.api import rbac
from ceilometer.i18n import _, _LE
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer import utils
@ -313,7 +313,7 @@ class ValidatedComplexQuery(object):
date_time = date_time.replace(tzinfo=None)
return date_time
except ValueError:
LOG.exception(_LE("String %s is not a valid isotime") % isotime)
LOG.exception("String %s is not a valid isotime" % isotime)
msg = _('Failed to parse the timestamp value %s') % isotime
raise base.ClientSideError(msg)

View File

@ -29,7 +29,7 @@ from ceilometer.api.controllers.v2 import meters
from ceilometer.api.controllers.v2 import query
from ceilometer.api.controllers.v2 import resources
from ceilometer.api.controllers.v2 import samples
from ceilometer.i18n import _, _LW
from ceilometer.i18n import _
from ceilometer import keystone_client
@ -122,13 +122,13 @@ class V2Controller(object):
except exceptions.EndpointNotFound:
self._gnocchi_is_enabled = False
except exceptions.ClientException:
LOG.warning(_LW("Can't connect to keystone, assuming "
"gnocchi is disabled and retry later"))
LOG.warning("Can't connect to keystone, assuming "
"gnocchi is disabled and retry later")
else:
self._gnocchi_is_enabled = True
LOG.warning(_LW("ceilometer-api started with gnocchi "
"enabled. The resources/meters/samples "
"URLs are disabled."))
LOG.warning("ceilometer-api started with gnocchi "
"enabled. The resources/meters/samples "
"URLs are disabled.")
return self._gnocchi_is_enabled
@property
@ -148,12 +148,12 @@ class V2Controller(object):
except exceptions.EndpointNotFound:
self._aodh_url = ""
except exceptions.ClientException:
LOG.warning(_LW("Can't connect to keystone, assuming aodh "
"is disabled and retry later."))
LOG.warning("Can't connect to keystone, assuming aodh "
"is disabled and retry later.")
else:
LOG.warning(_LW("ceilometer-api started with aodh "
"enabled. Alarms URLs will be redirected "
"to aodh endpoint."))
LOG.warning("ceilometer-api started with aodh "
"enabled. Alarms URLs will be redirected "
"to aodh endpoint.")
return self._aodh_url
@property
@ -174,12 +174,12 @@ class V2Controller(object):
self._panko_url = ""
except exceptions.ClientException:
LOG.warning(
_LW("Can't connect to keystone, assuming Panko "
"is disabled and retry later."))
"Can't connect to keystone, assuming Panko "
"is disabled and retry later.")
else:
LOG.warning(_LW("ceilometer-api started with Panko "
"enabled. Events URLs will be redirected "
"to Panko endpoint."))
LOG.warning("ceilometer-api started with Panko "
"enabled. Events URLs will be redirected "
"to Panko endpoint.")
return self._panko_url
@pecan.expose()

View File

@ -31,7 +31,7 @@ import wsme
from ceilometer.api.controllers.v2 import base
from ceilometer.api import rbac
from ceilometer.i18n import _, _LI
from ceilometer.i18n import _
from ceilometer import utils
LOG = log.getLogger(__name__)
@ -41,8 +41,8 @@ def enforce_limit(limit):
"""Ensure limit is defined and is valid. if not, set a default."""
if limit is None:
limit = pecan.request.cfg.api.default_api_return_limit
LOG.info(_LI('No limit value provided, result set will be'
' limited to %(limit)d.'), {'limit': limit})
LOG.info('No limit value provided, result set will be'
' limited to %(limit)d.', {'limit': limit})
if not limit or limit <= 0:
raise base.ClientSideError(_("Limit must be positive"))
return limit

View File

@ -19,7 +19,6 @@ from oslo_policy import policy
from pecan import hooks
from ceilometer.i18n import _LE
from ceilometer import messaging
from ceilometer import storage
@ -59,7 +58,7 @@ class DBHook(hooks.PecanHook):
try:
return storage.get_connection_from_config(conf)
except Exception as err:
LOG.exception(_LE("Failed to connect to db" "retry later: %s"),
LOG.exception("Failed to connect to db" "retry later: %s",
err)

View File

@ -27,7 +27,6 @@ import six
import webob
from ceilometer import i18n
from ceilometer.i18n import _LE
LOG = log.getLogger(__name__)
@ -99,7 +98,7 @@ class ParsableErrorMiddleware(object):
error_message,
b'</error_message>'))
except etree.XMLSyntaxError as err:
LOG.error(_LE('Error parsing HTTP response: %s'), err)
LOG.error('Error parsing HTTP response: %s', err)
error_message = state['status_code']
body = '<error_message>%s</error_message>' % error_message
if six.PY3:

View File

@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_log import log
from ceilometer.agent import manager
from ceilometer.i18n import _LW
from ceilometer import service
LOG = log.getLogger(__name__)
@ -51,8 +50,8 @@ class DeduplicatedCfgList(cfg.types.List):
result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs)
result_set = set(result)
if len(result) != len(result_set):
LOG.warning(_LW("Duplicated values: %s found in CLI options, "
"auto de-duplicated"), result)
LOG.warning("Duplicated values: %s found in CLI options, "
"auto de-duplicated", result)
result = list(result_set)
if self.choices and not (result_set <= set(self.choices)):
raise Exception('Valid values are %s, but found %s'

View File

@ -20,7 +20,6 @@ from six import moves
import six.moves.urllib.parse as urlparse
import sqlalchemy as sa
from ceilometer.i18n import _LE, _LI
from ceilometer import service
from ceilometer import storage
@ -69,8 +68,8 @@ def expirer():
storage_conn.clear_expired_metering_data(
conf.database.metering_time_to_live)
else:
LOG.info(_LI("Nothing to clean, database metering time to live "
"is disabled"))
LOG.info("Nothing to clean, database metering time to live "
"is disabled")
def db_clean_legacy():
@ -100,8 +99,8 @@ def db_clean_legacy():
masked_url = urlparse.urlunparse(masked_url)
else:
masked_url = url
LOG.info(_LI('Starting to drop event, alarm and alarm history tables in '
'backend: %s'), masked_url)
LOG.info('Starting to drop event, alarm and alarm history tables in '
'backend: %s', masked_url)
connection_scheme = parsed.scheme
conn = storage.get_connection_from_config(conf)
@ -116,10 +115,10 @@ def db_clean_legacy():
if engine.has_table(table_name):
table = sa.Table(table_name, meta, autoload=True)
table.drop()
LOG.info(_LI("Legacy %s table of SQL backend has been "
"dropped."), table_name)
LOG.info("Legacy %s table of SQL backend has been "
"dropped.", table_name)
else:
LOG.info(_LI('%s table does not exist.'), table_name)
LOG.info('%s table does not exist.', table_name)
elif connection_scheme == 'hbase':
with conn.conn_pool.connection() as h_conn:
@ -132,22 +131,22 @@ def db_clean_legacy():
if table_name in tables:
h_conn.disable_table(table_name)
h_conn.delete_table(table_name)
LOG.info(_LI("Legacy %s table of Hbase backend "
"has been dropped."),
LOG.info("Legacy %s table of Hbase backend "
"has been dropped.",
table_name_mapping[table_name])
else:
LOG.info(_LI('%s table does not exist.'),
LOG.info('%s table does not exist.',
table_name_mapping[table_name])
except Exception as e:
LOG.error(_LE('Error occurred while dropping alarm '
'tables of Hbase, %s'), e)
LOG.error('Error occurred while dropping alarm '
'tables of Hbase, %s', e)
elif connection_scheme == 'mongodb':
for table_name in ('alarm', 'alarm_history', 'event'):
if table_name in conn.db.conn.collection_names():
conn.db.conn.drop_collection(table_name)
LOG.info(_LI("Legacy %s table of Mongodb backend has been "
"dropped."), table_name)
LOG.info("Legacy %s table of Mongodb backend has been "
"dropped.", table_name)
else:
LOG.info(_LI('%s table does not exist.'), table_name)
LOG.info('%s table does not exist.', table_name)
LOG.info('Legacy alarm and event tables cleanup done.')

View File

@ -26,7 +26,7 @@ from oslo_utils import netutils
from oslo_utils import units
from ceilometer import dispatcher
from ceilometer.i18n import _, _LE, _LW
from ceilometer.i18n import _
from ceilometer import messaging
from ceilometer.publisher import utils as publisher_utils
from ceilometer import utils
@ -117,9 +117,9 @@ class CollectorService(cotyledon.Service):
# NOTE(zhengwei): linux kernel >= 3.9
udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception:
LOG.warning(_LW("System does not support socket.SO_REUSEPORT "
"option. Only one worker will be able to process "
"incoming data."))
LOG.warning("System does not support socket.SO_REUSEPORT "
"option. Only one worker will be able to process "
"incoming data.")
udp.bind((self.conf.collector.udp_address,
self.conf.collector.udp_port))
@ -146,8 +146,8 @@ class CollectorService(cotyledon.Service):
except Exception:
LOG.exception(_("UDP: Unable to store meter"))
else:
LOG.warning(_LW('sample signature invalid, '
'discarding: %s'), sample)
LOG.warning('sample signature invalid, '
'discarding: %s', sample)
def terminate(self):
if self.sample_listener:
@ -176,13 +176,13 @@ class CollectorEndpoint(object):
if publisher_utils.verify_signature(sample, self.secret):
goods.append(sample)
else:
LOG.warning(_LW('notification signature invalid, '
'discarding: %s'), sample)
LOG.warning('notification signature invalid, '
'discarding: %s', sample)
try:
self.dispatcher_manager.map_method(self.method, goods)
except Exception:
LOG.exception(_LE("Dispatcher failed to handle the notification, "
"re-queuing it."))
LOG.exception("Dispatcher failed to handle the notification, "
"re-queuing it.")
return oslo_messaging.NotificationResult.REQUEUE

View File

@ -21,7 +21,6 @@ import ceilometer
from ceilometer.agent import plugin_base
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _LE, _LW
from ceilometer import sample
LOG = log.getLogger(__name__)
@ -140,8 +139,8 @@ class GenericComputePollster(plugin_base.PollsterBase):
except NoVolumeException:
# FIXME(sileht): This should be a removed... but I will
# not change the test logic for now
LOG.warning(_LW("%(name)s statistic in not available for "
"instance %(instance_id)s") %
LOG.warning("%(name)s statistic in not available for "
"instance %(instance_id)s" %
{'name': self.sample_name,
'instance_id': instance.id})
except virt_inspector.InstanceNotFoundException as err:
@ -153,8 +152,8 @@ class GenericComputePollster(plugin_base.PollsterBase):
{'instance_id': instance.id,
'name': self.sample_name, 'exc': e})
except virt_inspector.NoDataException as e:
LOG.warning(_LW('Cannot inspect data of %(pollster)s for '
'%(instance_id)s, non-fatal reason: %(exc)s'),
LOG.warning('Cannot inspect data of %(pollster)s for '
'%(instance_id)s, non-fatal reason: %(exc)s',
{'pollster': self.__class__.__name__,
'instance_id': instance.id, 'exc': e})
raise plugin_base.PollsterPermanentError(resources)
@ -167,6 +166,6 @@ class GenericComputePollster(plugin_base.PollsterBase):
raise plugin_base.PollsterPermanentError(resources)
except Exception as err:
LOG.error(
_LE('Could not get %(name)s events for %(id)s: %(e)s'), {
'Could not get %(name)s events for %(id)s: %(e)s', {
'name': self.sample_name, 'id': instance.id, 'e': err},
exc_info=True)

View File

@ -21,7 +21,6 @@ from oslo_log import log
from stevedore import driver
import ceilometer
from ceilometer.i18n import _LE
OPTS = [
@ -265,5 +264,5 @@ def get_hypervisor_inspector(conf):
invoke_args=(conf, ))
return mgr.driver
except ImportError as e:
LOG.error(_LE("Unable to load the hypervisor inspector: %s") % e)
LOG.error("Unable to load the hypervisor inspector: %s" % e)
return Inspector(conf)

View File

@ -27,7 +27,7 @@ except ImportError:
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.compute.virt.libvirt import utils as libvirt_utils
from ceilometer.i18n import _LW, _
from ceilometer.i18n import _
LOG = logging.getLogger(__name__)
@ -137,8 +137,8 @@ class LibvirtInspector(virt_inspector.Inspector):
if disk_type:
if disk_type == 'network':
LOG.warning(
_LW('Inspection disk usage of network disk '
'%(instance_uuid)s unsupported by libvirt') % {
'Inspection disk usage of network disk '
'%(instance_uuid)s unsupported by libvirt' % {
'instance_uuid': instance.id})
continue
# NOTE(lhx): "cdrom" device associated to the configdrive

View File

@ -23,7 +23,7 @@ except ImportError:
libvirt = None
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _LE
from ceilometer.i18n import _
LOG = logging.getLogger(__name__)
@ -116,9 +116,9 @@ def raise_nodata_if_unsupported(method):
except libvirt.libvirtError as e:
# NOTE(sileht): At this point libvirt connection error
# have been reraise as tenacity.RetryError()
msg = _LE('Failed to inspect instance %(instance_uuid)s stats, '
'can not get info from libvirt: %(error)s') % {
"instance_uuid": instance.id,
"error": e}
msg = _('Failed to inspect instance %(instance_uuid)s stats, '
'can not get info from libvirt: %(error)s') % {
"instance_uuid": instance.id,
"error": e}
raise virt_inspector.NoDataException(msg)
return inner

View File

@ -21,7 +21,6 @@ import tenacity
import tooz.coordination
from tooz import hashring
from ceilometer.i18n import _LE, _LI
LOG = log.getLogger(__name__)
@ -67,15 +66,15 @@ class PartitionCoordinator(object):
def start(self):
try:
self._coordinator.start(start_heart=True)
LOG.info(_LI('Coordination backend started successfully.'))
LOG.info('Coordination backend started successfully.')
except tooz.coordination.ToozError:
LOG.exception(_LE('Error connecting to coordination backend.'))
LOG.exception('Error connecting to coordination backend.')
def stop(self):
try:
self._coordinator.stop()
except tooz.coordination.ToozError:
LOG.exception(_LE('Error connecting to coordination backend.'))
LOG.exception('Error connecting to coordination backend.')
finally:
del self._coordinator
@ -99,10 +98,10 @@ class PartitionCoordinator(object):
except tooz.coordination.MemberAlreadyExist:
pass
except tooz.coordination.ToozError:
LOG.exception(_LE('Error joining partitioning group %s,'
' re-trying'), group_id)
LOG.exception('Error joining partitioning group %s,'
' re-trying', group_id)
raise tenacity.TryAgain
LOG.info(_LI('Joined partitioning group %s'), group_id)
LOG.info('Joined partitioning group %s', group_id)
return _inner()
@ -132,8 +131,8 @@ class PartitionCoordinator(object):
[six.text_type(f) for f in filtered])
return filtered
except tooz.coordination.ToozError:
LOG.exception(_LE('Error getting group membership info from '
'coordination backend.'))
LOG.exception('Error getting group membership info from '
'coordination backend.')
return []
@staticmethod

View File

@ -18,7 +18,7 @@ from oslo_log import log
import six
import yaml
from ceilometer.i18n import _, _LI
from ceilometer.i18n import _
LOG = log.getLogger(__name__)
@ -183,5 +183,5 @@ def load_definitions(conf, defaults, config_file, fallback_file=None):
"Using default config.")
definition_cfg = defaults
LOG.info(_LI("Definitions: %s"), definition_cfg)
LOG.info("Definitions: %s", definition_cfg)
return definition_cfg

View File

@ -20,8 +20,6 @@ from oslo_log import log
import six
from stevedore import named
from ceilometer.i18n import _LW
LOG = log.getLogger(__name__)
@ -65,7 +63,7 @@ def _load_dispatcher_manager(conf, dispatcher_type):
invoke_args=[conf],
propagate_map_exceptions=True)
if not list(dispatcher_manager):
LOG.warning(_LW('Failed to load any dispatchers for %s'),
LOG.warning('Failed to load any dispatchers for %s',
namespace)
return dispatcher_manager

View File

@ -17,7 +17,6 @@ from oslo_log import log
from oslo_utils import timeutils
from ceilometer import dispatcher
from ceilometer.i18n import _LE
from ceilometer import storage
LOG = log.getLogger(__name__)
@ -67,6 +66,6 @@ class MeterDatabaseDispatcher(dispatcher.MeterDispatcherBase):
try:
self.conn.record_metering_data_batch(data)
except Exception as err:
LOG.error(_LE('Failed to record %(len)s: %(err)s.'),
LOG.error('Failed to record %(len)s: %(err)s.',
{'len': len(data), 'err': err})
raise

View File

@ -31,8 +31,8 @@ from stevedore import extension
from ceilometer import declarative
from ceilometer import dispatcher
from ceilometer.i18n import _LE, _LW
from ceilometer import gnocchi_client
from ceilometer.i18n import _
from ceilometer import keystone_client
NAME_ENCODED = __name__.encode('utf-8')
@ -95,10 +95,10 @@ class ResourcesDefinition(object):
for field, field_type in expected.items():
if field not in definition:
raise declarative.ResourceDefinitionException(
_LE("Required field %s not specified") % field, definition)
_("Required field %s not specified") % field, definition)
if not isinstance(definition[field], field_type):
raise declarative.ResourceDefinitionException(
_LE("Required field %(field)s should be a %(type)s") %
_("Required field %(field)s should be a %(type)s") %
{'field': field, 'type': field_type}, definition)
@staticmethod
@ -214,7 +214,7 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase,
except ImportError:
pass
except oslo_cache.exception.ConfigurationError as exc:
LOG.warning(_LW('unable to configure oslo_cache: %s'), exc)
LOG.warning('unable to configure oslo_cache: %s', exc)
self._gnocchi_project_id = None
self._gnocchi_project_id_lock = threading.Lock()
@ -237,7 +237,7 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase,
resource,
conf.dispatcher_gnocchi.archive_policy, plugin_manager))
except Exception as exc:
LOG.error(_LE("Failed to load resource due to error %s") %
LOG.error("Failed to load resource due to error %s" %
exc)
return resource_defs
@ -251,14 +251,14 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase,
project = self._ks_client.projects.find(
name=self.conf.dispatcher_gnocchi.filter_project)
except ka_exceptions.NotFound:
LOG.warning(_LW('gnocchi project not found in keystone,'
' ignoring the filter_service_activity '
'option'))
LOG.warning('gnocchi project not found in keystone,'
' ignoring the filter_service_activity '
'option')
self.filter_service_activity = False
return None
except Exception:
LOG.exception(_LE('fail to retrieve user of Gnocchi '
'service'))
LOG.exception('fail to retrieve user of Gnocchi '
'service')
raise
self._gnocchi_project_id = project.id
LOG.debug("gnocchi project found: %s", self.gnocchi_project_id)
@ -319,7 +319,7 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase,
samples = list(samples)
rd = self._get_resource_definition_from_metric(metric_name)
if rd is None:
LOG.warning(_LW("metric %s is not handled by Gnocchi") %
LOG.warning("metric %s is not handled by Gnocchi" %
metric_name)
continue
if rd.cfg.get("ignore"):
@ -400,7 +400,7 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase,
# NOTE(sileht): resource created in the meantime
pass
except gnocchi_exc.ClientException as e:
LOG.error(_LE('Error creating resource %(id)s: %(err)s'),
LOG.error('Error creating resource %(id)s: %(err)s',
{'id': resource['id'], 'err': six.text_type(e)})
# We cannot post measures for this resource
# and we can't patch it later
@ -498,8 +498,8 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase,
return self._gnocchi.resource.search(
resource_type, jsonutils.loads(query))
except Exception:
LOG.error(_LE("Fail to search resource type %{resource_type}s "
"with '%{query}s'"),
LOG.error("Fail to search resource type %{resource_type}s "
"with '%{query}s'",
{'resource_type': resource_type, 'query': query},
exc_info=True)
return []
@ -512,6 +512,6 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase,
LOG.debug("Delete event received on unexisting resource (%s), "
"ignore it.", resource['id'])
except Exception:
LOG.error(_LE("Fail to update the resource %s"), resource,
LOG.error("Fail to update the resource %s", resource,
exc_info=True)
LOG.debug('Resource %s ended at %s' % (resource["id"], ended_at))

View File

@ -21,7 +21,6 @@ from oslo_utils import strutils
import requests
from ceilometer import dispatcher
from ceilometer.i18n import _LE
LOG = log.getLogger(__name__)
@ -106,9 +105,9 @@ class HttpDispatcher(dispatcher.MeterDispatcherBase,
def record_metering_data(self, data):
if self.target == '':
# if the target was not set, do not do anything
LOG.error(_LE('Dispatcher target was not set, no meter will '
'be posted. Set the target in the ceilometer.conf '
'file.'))
LOG.error('Dispatcher target was not set, no meter will '
'be posted. Set the target in the ceilometer.conf '
'file.')
return
# We may have receive only one counter on the wire
@ -136,16 +135,16 @@ class HttpDispatcher(dispatcher.MeterDispatcherBase,
res.raise_for_status()
except requests.exceptions.HTTPError:
LOG.exception(_LE('Status Code: %(code)s. '
'Failed to dispatch meter: %(meter)s') %
LOG.exception('Status Code: %(code)s. '
'Failed to dispatch meter: %(meter)s' %
{'code': res.status_code, 'meter': meter_json})
def record_events(self, events):
if self.event_target == '':
# if the event target was not set, do not do anything
LOG.error(_LE('Dispatcher event target was not set, no event will '
'be posted. Set event_target in the ceilometer.conf '
'file.'))
LOG.error('Dispatcher event target was not set, no event will '
'be posted. Set event_target in the ceilometer.conf '
'file.')
return
if not isinstance(events, list):
@ -171,6 +170,6 @@ class HttpDispatcher(dispatcher.MeterDispatcherBase,
self.event_target, res.status_code)
res.raise_for_status()
except requests.exceptions.HTTPError:
LOG.exception(_LE('Status Code: %(code)s. '
'Failed to dispatch event: %(event)s') %
LOG.exception('Status Code: %(code)s. '
'Failed to dispatch event: %(event)s' %
{'code': res.status_code, 'event': event_json})

View File

@ -17,7 +17,6 @@ import oslo_messaging
from stevedore import extension
from ceilometer.event import converter as event_converter
from ceilometer.i18n import _LE
from ceilometer import messaging
LOG = log.getLogger(__name__)
@ -63,5 +62,5 @@ class EventsNotificationEndpoint(object):
except Exception:
if not self.manager.conf.notification.ack_on_event_error:
return oslo_messaging.NotificationResult.REQUEUE
LOG.error(_LE('Fail to process a notification'), exc_info=True)
LOG.error('Fail to process a notification', exc_info=True)
return oslo_messaging.NotificationResult.HANDLED

View File

@ -20,8 +20,6 @@ from oslo_log import log
from oslo_utils import timeutils
import six
from ceilometer.i18n import _LW
LOG = log.getLogger(__name__)
@ -123,9 +121,9 @@ class SplitterTraitPlugin(TraitPluginBase):
:param segment: Which segment to return. (int) default 0
:param max_split: Limit number of splits. Default: None (no limit)
"""
LOG.warning(_LW('split plugin is deprecated, '
'add ".`split(%(sep)s, %(segment)d, '
'%(max_split)d)`" to your jsonpath instead') %
LOG.warning('split plugin is deprecated, '
'add ".`split(%(sep)s, %(segment)d, '
'%(max_split)d)`" to your jsonpath instead' %
dict(sep=separator,
segment=segment,
max_split=(-1 if max_split is None
@ -213,17 +211,17 @@ class TimedeltaPlugin(TraitPluginBase):
def trait_value(self, match_list):
if len(match_list) != 2:
LOG.warning(_LW('Timedelta plugin is required two timestamp fields'
' to create timedelta value.'))
LOG.warning('Timedelta plugin is required two timestamp fields'
' to create timedelta value.')
return
start, end = match_list
try:
start_time = timeutils.parse_isotime(start[1])
end_time = timeutils.parse_isotime(end[1])
except Exception as err:
LOG.warning(_LW('Failed to parse date from set fields, both '
'fields %(start)s and %(end)s must be datetime: '
'%(err)s') %
LOG.warning('Failed to parse date from set fields, both '
'fields %(start)s and %(end)s must be datetime: '
'%(err)s' %
dict(start=start[0], end=end[0], err=err)
)
return

View File

@ -17,7 +17,6 @@ from oslo_log import log
from oslo_utils import timeutils
from ceilometer.agent import plugin_base
from ceilometer.i18n import _LE
from ceilometer import nova_client
@ -129,7 +128,7 @@ class NodesDiscoveryTripleO(plugin_base.DiscoveryBase):
resources.append(resource)
except KeyError:
LOG.error(_LE("Couldn't obtain IP address of "
"instance %s") % instance.id)
LOG.error("Couldn't obtain IP address of "
"instance %s" % instance.id)
return resources

View File

@ -25,7 +25,7 @@ from ceilometer.agent import plugin_base
from ceilometer import declarative
from ceilometer.hardware import inspector as insloader
from ceilometer.hardware.pollsters import util
from ceilometer.i18n import _LE, _LW
from ceilometer.i18n import _
from ceilometer import sample
OPTS = [
@ -49,14 +49,14 @@ class MeterDefinition(object):
fname.endswith('_inspector'))):
setattr(self, fname, fval)
else:
LOG.warning(_LW("Ignore unrecognized field %s"), fname)
LOG.warning("Ignore unrecognized field %s", fname)
for fname in self.required_fields:
if not getattr(self, fname, None):
raise declarative.MeterDefinitionException(
_LE("Missing field %s") % fname, self.cfg)
_("Missing field %s") % fname, self.cfg)
if self.type not in sample.TYPES:
raise declarative.MeterDefinitionException(
_LE("Unrecognized type value %s") % self.type, self.cfg)
_("Unrecognized type value %s") % self.type, self.cfg)
class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
@ -91,8 +91,8 @@ class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
parsed_url, resource_id, metadata = (None, None, None)
if isinstance(res, dict):
if 'resource_url' not in res or 'resource_id' not in res:
LOG.error(_LE('Passed resource dict must contain keys '
'resource_id and resource_url.'))
LOG.error('Passed resource dict must contain keys '
'resource_id and resource_url.')
else:
metadata = res
parsed_url = netutils.urlsplit(res['resource_url'])
@ -110,7 +110,7 @@ class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
driver = insloader.get_inspector(parsed_url)
self.inspectors[parsed_url.scheme] = driver
except Exception as err:
LOG.exception(_LE("Cannot load inspector %(name)s: %(err)s"),
LOG.exception("Cannot load inspector %(name)s: %(err)s",
dict(name=parsed_url.scheme,
err=err))
raise
@ -133,7 +133,7 @@ class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
for resource in resources:
parsed_url, res, extra_metadata = self._parse_resource(resource)
if parsed_url is None:
LOG.error(_LE("Skip invalid resource %s"), resource)
LOG.error("Skip invalid resource %s", resource)
continue
ins = self._get_inspector(parsed_url)
try:
@ -161,8 +161,8 @@ class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
parsed_url,
i_cache[identifier]))
except Exception as err:
LOG.exception(_LE('inspector call failed for %(ident)s '
'host %(host)s: %(err)s'),
LOG.exception('inspector call failed for %(ident)s '
'host %(host)s: %(err)s',
dict(ident=identifier,
host=parsed_url.hostname,
err=err))
@ -211,6 +211,6 @@ def load_definition(config_def):
meter = MeterDefinition(meter_def)
mappings[meter.name] = meter
except declarative.DefinitionException as e:
errmsg = _LE("Error loading meter definition: %s")
errmsg = "Error loading meter definition: %s"
LOG.error(errmsg, e.brief_message)
return mappings

View File

@ -27,16 +27,6 @@ _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def translate(value, user_locale):
return oslo_i18n.translate(value, user_locale)

View File

@ -21,8 +21,8 @@ from oslo_utils import fnmatch
from stevedore import extension
from ceilometer import declarative
from ceilometer.i18n import _
from ceilometer import notification
from ceilometer.i18n import _LE, _LW
from ceilometer import sample as sample_util
OPTS = [
@ -50,7 +50,7 @@ class MeterDefinition(object):
if not self.cfg.get(field)]
if missing:
raise declarative.MeterDefinitionException(
_LE("Required fields %s not specified") % missing, self.cfg)
_("Required fields %s not specified") % missing, self.cfg)
self._event_type = self.cfg.get('event_type')
if isinstance(self._event_type, six.string_types):
@ -59,7 +59,7 @@ class MeterDefinition(object):
if ('type' not in self.cfg.get('lookup', []) and
self.cfg['type'] not in sample_util.TYPES):
raise declarative.MeterDefinitionException(
_LE("Invalid type %s specified") % self.cfg['type'], self.cfg)
_("Invalid type %s specified") % self.cfg['type'], self.cfg)
self._fallback_user_id = declarative.Definition(
'user_id', "_context_user_id|_context_user", plugin_manager)
@ -185,14 +185,14 @@ class ProcessMeterNotifications(notification.NotificationProcessBase):
for meter_cfg in reversed(meters_cfg['metric']):
if meter_cfg.get('name') in definitions:
# skip duplicate meters
LOG.warning(_LW("Skipping duplicate meter definition %s")
LOG.warning("Skipping duplicate meter definition %s"
% meter_cfg)
continue
try:
md = MeterDefinition(meter_cfg, self.manager.conf,
plugin_manager)
except declarative.DefinitionException as e:
errmsg = _LE("Error loading meter definition: %s")
errmsg = "Error loading meter definition: %s"
LOG.error(errmsg, six.text_type(e))
else:
definitions[meter_cfg['name']] = md

View File

@ -18,7 +18,6 @@
from oslo_log import log
from ceilometer.i18n import _LW
from ceilometer.network.services import base
from ceilometer import sample
@ -43,7 +42,7 @@ class FloatingIPPollster(base.BaseServicesPollster):
for fip in resources or []:
if fip['status'] is None:
LOG.warning(_LW("Invalid status, skipping IP address %s") %
LOG.warning("Invalid status, skipping IP address %s" %
fip['floating_ip_address'])
continue
status = self.get_status_id(fip['status'])

View File

@ -19,7 +19,7 @@ import collections
from oslo_log import log
import six
from ceilometer.i18n import _, _LE
from ceilometer.i18n import _
from ceilometer.network.services import base
from ceilometer import neutron_client
from ceilometer import sample
@ -288,7 +288,7 @@ class _LBStatsPollster(base.BaseServicesPollster):
c_data = self._populate_stats_cache(pool['id'], cache)
yield self._get_sample(pool, c_data)
except Exception:
LOG.exception(_LE('Ignoring pool %(pool_id)s'),
LOG.exception('Ignoring pool %(pool_id)s',
{'pool_id': pool['id']})
elif self.lb_version == 'v2':
for loadbalancer in resources:
@ -298,7 +298,7 @@ class _LBStatsPollster(base.BaseServicesPollster):
yield self._get_sample(loadbalancer, c_data)
except Exception:
LOG.exception(
_LE('Ignoring loadbalancer %(loadbalancer_id)s'),
'Ignoring loadbalancer %(loadbalancer_id)s',
{'loadbalancer_id': loadbalancer['id']})

View File

@ -18,7 +18,6 @@ import six
from six import moves
from six.moves.urllib import parse as urlparse
from ceilometer.i18n import _LE
from ceilometer.network.statistics import driver
from ceilometer.network.statistics.opendaylight import client
from ceilometer import utils
@ -161,8 +160,8 @@ class OpenDayLightDriver(driver.Driver):
cs.host_tracker.get_inactive_hosts(container_name))
data[container_name] = container_data
except Exception:
LOG.exception(_LE('Request failed to connect to OpenDaylight'
' with NorthBound REST API'))
LOG.exception('Request failed to connect to OpenDaylight'
' with NorthBound REST API')
cache['network.statistics.opendaylight'] = data

View File

@ -25,7 +25,6 @@ from swiftclient import client as swift
from swiftclient.exceptions import ClientException
from ceilometer.agent import plugin_base
from ceilometer.i18n import _LI
from ceilometer import keystone_client
from ceilometer import sample
@ -72,7 +71,7 @@ class _Base(plugin_base.PollsterBase):
interface=creds.interface,
region_name=creds.region_name)
except exceptions.EndpointNotFound as e:
LOG.info(_LI("Swift endpoint not found: %s"), e)
LOG.info("Swift endpoint not found: %s", e)
return _Base._ENDPOINT
def _iter_accounts(self, ksclient, cache, tenants):

View File

@ -31,7 +31,6 @@ from stevedore import extension
import yaml
from ceilometer.event.storage import models
from ceilometer.i18n import _LI, _LW, _LE
from ceilometer import publisher
from ceilometer.publisher import utils as publisher_utils
from ceilometer import sample as sample_util
@ -416,7 +415,7 @@ class Sink(object):
try:
self.publishers.append(publisher_manager.get(p))
except Exception:
LOG.error(_LE("Unable to load publisher %s"), p,
LOG.error("Unable to load publisher %s", p,
exc_info=True)
self.multi_publish = True if len(self.publishers) > 1 else False
@ -436,11 +435,11 @@ class Sink(object):
"No transformer named %s loaded" % transformer['name'],
cfg)
transformers.append(ext.plugin(**parameter))
LOG.info(_LI(
LOG.info(
"Pipeline %(pipeline)s: Setup transformer instance %(name)s "
"with parameter %(param)s") % ({'pipeline': self,
'name': transformer['name'],
'param': parameter}))
"with parameter %(param)s" % ({'pipeline': self,
'name': transformer['name'],
'param': parameter}))
return transformers
@ -455,8 +454,8 @@ class EventSink(Sink):
try:
p.publish_events(events)
except Exception:
LOG.error(_LE("Pipeline %(pipeline)s: %(status)s "
"after error from publisher %(pub)s") %
LOG.error("Pipeline %(pipeline)s: %(status)s "
"after error from publisher %(pub)s" %
{'pipeline': self,
'status': 'Continue' if
self.multi_publish else 'Exit', 'pub': p},
@ -485,11 +484,11 @@ class SampleSink(Sink):
return
return sample
except Exception:
LOG.error(_LE("Pipeline %(pipeline)s: Exit after error "
"from transformer %(trans)s "
"for %(smp)s") % {'pipeline': self,
'trans': transformer,
'smp': sample},
LOG.error("Pipeline %(pipeline)s: Exit after error "
"from transformer %(trans)s "
"for %(smp)s" % {'pipeline': self,
'trans': transformer,
'smp': sample},
exc_info=True)
def _publish_samples(self, start, samples):
@ -521,8 +520,8 @@ class SampleSink(Sink):
try:
p.publish_samples(transformed_samples)
except Exception:
LOG.error(_LE("Pipeline %(pipeline)s: Continue after "
"error from publisher %(pub)s")
LOG.error("Pipeline %(pipeline)s: Continue after "
"error from publisher %(pub)s"
% {'pipeline': self, 'pub': p},
exc_info=True)
@ -537,8 +536,8 @@ class SampleSink(Sink):
self._publish_samples(i + 1,
list(transformer.flush()))
except Exception:
LOG.error(_LE("Pipeline %(pipeline)s: Error "
"flushing transformer %(trans)s")
LOG.error("Pipeline %(pipeline)s: Error "
"flushing transformer %(trans)s"
% {'pipeline': self, 'trans': transformer},
exc_info=True)
@ -597,10 +596,10 @@ class SamplePipeline(Pipeline):
def _validate_volume(self, s):
volume = s.volume
if volume is None:
LOG.warning(_LW(
LOG.warning(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has no volume (volume: None), the sample will'
' be dropped')
' be dropped'
% {'counter_name': s.name,
'resource_id': s.resource_id,
'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'}
@ -610,10 +609,10 @@ class SamplePipeline(Pipeline):
try:
volume = float(volume)
except ValueError:
LOG.warning(_LW(
LOG.warning(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has volume which is not a number '
'(volume: %(counter_volume)s), the sample will be dropped')
'(volume: %(counter_volume)s), the sample will be dropped'
% {'counter_name': s.name,
'resource_id': s.resource_id,
'timestamp': (
@ -689,11 +688,11 @@ class ConfigManagerBase(object):
"""Returns hash of changed cfg else False."""
mtime = self.get_cfg_mtime()
if mtime > self.cfg_mtime:
LOG.info(_LI('Configuration file has been updated.'))
LOG.info('Configuration file has been updated.')
self.cfg_mtime = mtime
_hash = self.get_cfg_hash()
if _hash != self.cfg_hash:
LOG.info(_LI("Detected change in configuration."))
LOG.info("Detected change in configuration.")
return _hash
return False
@ -779,7 +778,7 @@ class PipelineManager(ConfigManagerBase):
if not ('sources' in cfg and 'sinks' in cfg):
raise PipelineException("Both sources & sinks are required",
cfg)
LOG.info(_LI('detected decoupled pipeline config format'))
LOG.info('detected decoupled pipeline config format')
publisher_manager = PublisherManager(self.conf, p_type['name'])
unique_names = set()
@ -872,9 +871,9 @@ class PollingManager(ConfigManagerBase):
try:
cfg = self.load_config(cfg_file)
except (TypeError, IOError):
LOG.warning(_LW('Using the pipeline configuration for polling '
'is deprecated. %s should '
'be used instead.'), cfg_file)
LOG.warning('Using the pipeline configuration for polling '
'is deprecated. %s should '
'be used instead.', cfg_file)
cfg = self.load_config(conf.pipeline_cfg_file)
self.sources = []
if 'sources' not in cfg:

View File

@ -17,7 +17,6 @@ import six.moves.urllib.parse as urlparse
from stevedore import driver
import stevedore.exception
from ceilometer.i18n import _LE, _LW
from ceilometer import publisher
from ceilometer.publisher import utils
@ -39,9 +38,9 @@ class DirectPublisher(publisher.ConfigPublisherBase):
super(DirectPublisher, self).__init__(conf, parsed_url)
default_dispatcher = parsed_url.scheme
if default_dispatcher == 'direct':
LOG.warning(_LW('Direct publisher is deprecated for removal. Use '
'an explicit publisher instead, e.g. "gnocchi", '
'"database", "file", ...'))
LOG.warning('Direct publisher is deprecated for removal. Use '
'an explicit publisher instead, e.g. "gnocchi", '
'"database", "file", ...')
default_dispatcher = 'database'
options = urlparse.parse_qs(parsed_url.query)
self.dispatcher_name = options.get('dispatcher',
@ -76,8 +75,8 @@ class DirectPublisher(publisher.ConfigPublisherBase):
def publish_samples(self, samples):
if not self.sample_driver:
LOG.error(_LE("Can't publish samples to a non-existing dispatcher "
"'%s'"), self.dispatcher_name)
LOG.error("Can't publish samples to a non-existing dispatcher "
"'%s'", self.dispatcher_name)
return
if not isinstance(samples, list):
@ -89,8 +88,8 @@ class DirectPublisher(publisher.ConfigPublisherBase):
def publish_events(self, events):
if not self.event_driver:
LOG.error(_LE("Can't publish events to a non-existing dispatcher "
"'%s'"), self.dispatcher_name)
LOG.error("Can't publish events to a non-existing dispatcher "
"'%s'", self.dispatcher_name)
return
if not isinstance(events, list):

View File

@ -19,7 +19,6 @@ import logging.handlers
from oslo_log import log
from six.moves.urllib import parse as urlparse
from ceilometer.i18n import _LE
from ceilometer import publisher
LOG = log.getLogger(__name__)
@ -58,7 +57,7 @@ class FilePublisher(publisher.ConfigPublisherBase):
self.publisher_logger = None
path = parsed_url.path
if not path:
LOG.error(_LE('The path for the file publisher is required'))
LOG.error('The path for the file publisher is required')
return
rfh = None
@ -72,8 +71,8 @@ class FilePublisher(publisher.ConfigPublisherBase):
max_bytes = int(params.get('max_bytes')[0])
backup_count = int(params.get('backup_count')[0])
except ValueError:
LOG.error(_LE('max_bytes and backup_count should be '
'numbers.'))
LOG.error('max_bytes and backup_count should be '
'numbers.')
return
# create rotating file handler
rfh = logging.handlers.RotatingFileHandler(

View File

@ -20,7 +20,6 @@ import requests
from requests import adapters
from six.moves.urllib import parse as urlparse
from ceilometer.i18n import _LE
from ceilometer import publisher
LOG = log.getLogger(__name__)
@ -170,8 +169,8 @@ class HttpPublisher(publisher.ConfigPublisherBase):
LOG.debug('Message posting to %s: status code %d.',
self.target, res.status_code)
except requests.exceptions.HTTPError:
LOG.exception(_LE('Status Code: %(code)s. '
'Failed to dispatch message: %(data)s') %
LOG.exception('Status Code: %(code)s. '
'Failed to dispatch message: %(data)s' %
{'code': res.status_code, 'data': data})
def publish_samples(self, samples):

View File

@ -19,7 +19,6 @@ from oslo_serialization import jsonutils
from oslo_utils import netutils
from six.moves.urllib import parse as urlparse
from ceilometer.i18n import _LE
from ceilometer.publisher import messaging
LOG = log.getLogger(__name__)
@ -80,11 +79,11 @@ class KafkaBrokerPublisher(messaging.MessagingPublisher):
self._producer = kafka.KafkaProducer(
bootstrap_servers=["%s:%s" % (self._host, self._port)])
except kafka.errors.KafkaError as e:
LOG.exception(_LE("Failed to connect to Kafka service: %s"), e)
LOG.exception("Failed to connect to Kafka service: %s", e)
raise messaging.DeliveryFailure('Kafka Client is not available, '
'please restart Kafka client')
except Exception as e:
LOG.exception(_LE("Failed to connect to Kafka service: %s"), e)
LOG.exception("Failed to connect to Kafka service: %s", e)
raise messaging.DeliveryFailure('Kafka Client is not available, '
'please restart Kafka client')

View File

@ -27,7 +27,7 @@ from oslo_utils import excutils
import six
import six.moves.urllib.parse as urlparse
from ceilometer.i18n import _, _LE, _LI
from ceilometer.i18n import _
from ceilometer import messaging
from ceilometer import publisher
from ceilometer.publisher import utils
@ -87,7 +87,7 @@ class MessagingPublisher(publisher.ConfigPublisherBase):
self.local_queue = []
if self.policy in ['default', 'queue', 'drop']:
LOG.info(_LI('Publishing policy set to %s'), self.policy)
LOG.info('Publishing policy set to %s', self.policy)
else:
LOG.warning(_('Publishing policy is unknown (%s) force to '
'default'), self.policy)
@ -161,8 +161,8 @@ class MessagingPublisher(publisher.ConfigPublisherBase):
return []
current_retry += 1
if current_retry >= self.max_retry:
LOG.exception(_LE("Failed to retry to send sample data "
"with max_retry times"))
LOG.exception("Failed to retry to send sample data "
"with max_retry times")
raise
else:
queue.pop(0)

View File

@ -22,7 +22,7 @@ from oslo_log import log
from oslo_utils import netutils
import ceilometer
from ceilometer.i18n import _, _LW
from ceilometer.i18n import _
from ceilometer import publisher
from ceilometer.publisher import utils
@ -48,9 +48,9 @@ class UDPPublisher(publisher.ConfigPublisherBase):
if addrinfo:
addr_family = addrinfo[0]
else:
LOG.warning(_LW(
"Cannot resolve host %s, creating AF_INET socket..."),
self.host)
LOG.warning(
"Cannot resolve host %s, creating AF_INET socket...",
self.host)
addr_family = socket.AF_INET
self.socket = socket.socket(addr_family,
socket.SOCK_DGRAM)

View File

@ -17,7 +17,6 @@
from oslo_log import log
from ceilometer.i18n import _LI
from ceilometer.storage import base
LOG = log.getLogger(__name__)
@ -38,8 +37,8 @@ class Connection(base.Connection):
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter.
"""
LOG.info(_LI('metering data %(counter_name)s for %(resource_id)s: '
'%(counter_volume)s')
LOG.info('metering data %(counter_name)s for %(resource_id)s: '
'%(counter_volume)s'
% ({'counter_name': data['counter_name'],
'resource_id': data['resource_id'],
'counter_volume': data['counter_volume']}))
@ -50,7 +49,7 @@ class Connection(base.Connection):
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
LOG.info(_LI("Dropping metering data with TTL %d"), ttl)
LOG.info("Dropping metering data with TTL %d", ttl)
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,

View File

@ -33,7 +33,7 @@ from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import cast
import ceilometer
from ceilometer.i18n import _, _LI
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models as api_models
@ -385,7 +385,7 @@ class Connection(base.Connection):
sample_q = (session.query(models.Sample)
.filter(models.Sample.timestamp < end))
rows = sample_q.delete()
LOG.info(_LI("%d samples removed from database"), rows)
LOG.info("%d samples removed from database", rows)
if not self.conf.database.sql_expire_samples_only:
with session.begin():
@ -421,8 +421,8 @@ class Connection(base.Connection):
.filter(models.Resource.metadata_hash
.like('delete_%')))
resource_q.delete(synchronize_session=False)
LOG.info(_LI("Expired residual resource and"
" meter definition data"))
LOG.info("Expired residual resource and"
" meter definition data")
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,

View File

@ -26,7 +26,7 @@ import pymongo.errors
import six
from six.moves.urllib import parse
from ceilometer.i18n import _, _LI, _LE
from ceilometer.i18n import _
ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86
@ -201,7 +201,7 @@ class ConnectionPool(object):
splitted_url = netutils.urlsplit(url)
log_data = {'db': splitted_url.scheme,
'nodelist': connection_options['nodelist']}
LOG.info(_LI('Connecting to %(db)s on %(nodelist)s') % log_data)
LOG.info('Connecting to %(db)s on %(nodelist)s' % log_data)
client = self._mongo_connect(conf, url)
self._pool[pool_key] = weakref.ref(client)
return client
@ -353,8 +353,8 @@ def safe_mongo_call(call):
return call(self, *args, **kwargs)
except pymongo.errors.AutoReconnect as err:
if 0 <= max_retries <= attempts:
LOG.error(_LE('Unable to reconnect to the primary mongodb '
'after %(retries)d retries. Giving up.') %
LOG.error('Unable to reconnect to the primary mongodb '
'after %(retries)d retries. Giving up.' %
{'retries': max_retries})
raise
LOG.warning(_('Unable to reconnect to the primary '
@ -406,7 +406,7 @@ class MongoProxy(object):
self.conn.create_index(keys, name=name, *args, **kwargs)
except pymongo.errors.OperationFailure as e:
if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS:
LOG.info(_LI("Index %s will be recreate."), name)
LOG.info("Index %s will be recreate.", name)
self._recreate_index(keys, name, *args, **kwargs)
@safe_mongo_call

View File

@ -20,7 +20,7 @@ from oslo_log import log
from oslo_utils import timeutils
import six
from ceilometer.i18n import _, _LW
from ceilometer.i18n import _
from ceilometer import sample
from ceilometer import transformer
@ -83,19 +83,19 @@ class DeltaTransformer(BaseConversionTransformer):
time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
# disallow violations of the arrow of time
if time_delta < 0:
LOG.warning(_LW('Dropping out of time order sample: %s'), (s,))
LOG.warning('Dropping out of time order sample: %s', (s,))
# Reset the cache to the newer sample.
self.cache[key] = prev
return None
volume_delta = s.volume - prev_volume
if self.growth_only and volume_delta < 0:
LOG.warning(_LW('Negative delta detected, dropping value'))
LOG.warning('Negative delta detected, dropping value')
s = None
else:
s = self._convert(s, volume_delta)
LOG.debug('Converted to: %s', s)
else:
LOG.warning(_LW('Dropping sample with no predecessor: %s'), (s,))
LOG.warning('Dropping sample with no predecessor: %s', (s,))
s = None
return s