Remove log translations.

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: I51c0f921ab5153bba7fdce02fb16145ce8e27ef7
This commit is contained in:
liyi 2017-03-21 12:32:04 +08:00 committed by liyi
parent 85f4fda458
commit 669dc34d79
16 changed files with 85 additions and 106 deletions

View File

@ -24,7 +24,6 @@ import pecan
from aodh.api import hooks
from aodh.api import middleware
from aodh.i18n import _LI
from aodh import service
from aodh import storage
@ -69,7 +68,7 @@ def load_app(conf):
configkey = str(uuid.uuid4())
APPCONFIGS[configkey] = config
LOG.info(_LI("WSGI config used: %s"), cfg_path)
LOG.info("WSGI config used: %s", cfg_path)
return deploy.loadapp("config:" + cfg_path,
name="aodh+" + (
conf.api.auth_mode

View File

@ -44,7 +44,7 @@ import aodh
from aodh.api.controllers.v2 import base
from aodh.api.controllers.v2 import utils as v2_utils
from aodh.api import rbac
from aodh.i18n import _, _LI, _LE
from aodh.i18n import _
from aodh import keystone_client
from aodh import messaging
from aodh import notifier
@ -318,8 +318,8 @@ class Alarm(base.Base):
action_set = set(actions)
if len(actions) != len(action_set):
LOG.info(_LI('duplicate actions are found: %s, '
'remove duplicate ones'), actions)
LOG.info('duplicate actions are found: %s, '
'remove duplicate ones', actions)
actions = list(action_set)
setattr(alarm, actions_name, actions)
@ -632,7 +632,7 @@ class AlarmController(rest.RestController):
try:
alarm_in = models.Alarm(**updated_alarm)
except Exception:
LOG.exception(_LE("Error while putting alarm: %s"), updated_alarm)
LOG.exception("Error while putting alarm: %s", updated_alarm)
raise base.ClientSideError(_("Alarm incorrect"))
alarm = pecan.request.storage.update_alarm(alarm_in)
@ -795,7 +795,7 @@ class AlarmsController(rest.RestController):
try:
alarm_in = models.Alarm(**change)
except Exception:
LOG.exception(_LE("Error while posting alarm: %s"), change)
LOG.exception("Error while posting alarm: %s", change)
raise base.ClientSideError(_("Alarm incorrect"))
alarm = conn.create_alarm(alarm_in)

View File

@ -31,7 +31,7 @@ import wsmeext.pecan as wsme_pecan
from aodh.api.controllers.v2 import alarms
from aodh.api.controllers.v2 import base
from aodh.api import rbac
from aodh.i18n import _, _LE
from aodh.i18n import _
from aodh.storage import models
LOG = log.getLogger(__name__)
@ -330,7 +330,7 @@ class ValidatedComplexQuery(object):
date_time = date_time.replace(tzinfo=None)
return date_time
except ValueError:
LOG.exception(_LE("String %s is not a valid isotime"), isotime)
LOG.exception("String %s is not a valid isotime", isotime)
msg = _('Failed to parse the timestamp value %s') % isotime
raise base.ClientSideError(msg)

View File

@ -27,7 +27,6 @@ import six
import webob
from aodh import i18n
from aodh.i18n import _LE
LOG = log.getLogger(__name__)
@ -99,7 +98,7 @@ class ParsableErrorMiddleware(object):
error_message,
b'</error_message>'))
except etree.XMLSyntaxError as err:
LOG.error(_LE('Error parsing HTTP response: %s'), err)
LOG.error('Error parsing HTTP response: %s', err)
error_message = state['status_code']
body = '<error_message>%s</error_message>' % error_message
if six.PY3:

View File

@ -16,7 +16,6 @@
from oslo_log import log
from aodh.i18n import _LI
from aodh import service
from aodh import storage
@ -38,5 +37,5 @@ def expirer():
storage_conn.clear_expired_alarm_history_data(
conf.database.alarm_history_time_to_live)
else:
LOG.info(_LI("Nothing to clean, database alarm history time to live "
"is disabled"))
LOG.info("Nothing to clean, database alarm history time to live "
"is disabled")

View File

@ -23,7 +23,6 @@ import six
import tenacity
import tooz.coordination
from aodh.i18n import _LE, _LI, _LW
LOG = log.getLogger(__name__)
@ -55,13 +54,13 @@ OPTS = [
class ErrorJoiningPartitioningGroup(Exception):
def __init__(self):
super(ErrorJoiningPartitioningGroup, self).__init__(_LE(
super(ErrorJoiningPartitioningGroup, self).__init__((
'Error occurred when joining partitioning group'))
class MemberNotInGroupError(Exception):
def __init__(self, group_id, members, my_id):
super(MemberNotInGroupError, self).__init__(_LE(
super(MemberNotInGroupError, self).__init__((
'Group ID: %(group_id)s, Members: %(members)s, Me: %(me)s: '
'Current agent is not part of group and cannot take tasks') %
{'group_id': group_id, 'members': members, 'me': my_id})
@ -124,9 +123,9 @@ class PartitionCoordinator(object):
self._coordinator = tooz.coordination.get_coordinator(
self.backend_url, self._my_id)
self._coordinator.start()
LOG.info(_LI('Coordination backend started successfully.'))
LOG.info('Coordination backend started successfully.')
except tooz.coordination.ToozError:
LOG.exception(_LE('Error connecting to coordination backend.'))
LOG.exception('Error connecting to coordination backend.')
def stop(self):
if not self._coordinator:
@ -138,7 +137,7 @@ class PartitionCoordinator(object):
try:
self._coordinator.stop()
except tooz.coordination.ToozError:
LOG.exception(_LE('Error connecting to coordination backend.'))
LOG.exception('Error connecting to coordination backend.')
finally:
self._coordinator = None
@ -153,8 +152,8 @@ class PartitionCoordinator(object):
try:
self._coordinator.heartbeat()
except tooz.coordination.ToozError:
LOG.exception(_LE('Error sending a heartbeat to coordination '
'backend.'))
LOG.exception('Error sending a heartbeat to coordination '
'backend.')
def join_group(self, group_id):
if (not self._coordinator or not self._coordinator.is_started
@ -171,7 +170,7 @@ class PartitionCoordinator(object):
try:
join_req = self._coordinator.join_group(group_id)
join_req.get()
LOG.info(_LI('Joined partitioning group %s'), group_id)
LOG.info('Joined partitioning group %s', group_id)
except tooz.coordination.MemberAlreadyExist:
return
except tooz.coordination.GroupNotCreated:
@ -182,8 +181,8 @@ class PartitionCoordinator(object):
pass
raise ErrorJoiningPartitioningGroup()
except tooz.coordination.ToozError:
LOG.exception(_LE('Error joining partitioning group %s,'
' re-trying'), group_id)
LOG.exception('Error joining partitioning group %s,'
' re-trying', group_id)
raise ErrorJoiningPartitioningGroup()
self._groups.add(group_id)
@ -195,7 +194,7 @@ class PartitionCoordinator(object):
if self._coordinator:
self._coordinator.leave_group(group_id)
self._groups.remove(group_id)
LOG.info(_LI('Left partitioning group %s'), group_id)
LOG.info('Left partitioning group %s', group_id)
def _get_members(self, group_id):
if not self._coordinator:
@ -228,8 +227,8 @@ class PartitionCoordinator(object):
members = self._get_members(group_id)
LOG.debug('Members of group: %s, Me: %s', members, self._my_id)
if self._my_id not in members:
LOG.warning(_LW('Cannot extract tasks because agent failed to '
'join group properly. Rejoining group.'))
LOG.warning('Cannot extract tasks because agent failed to '
'join group properly. Rejoining group.')
self.join_group(group_id)
members = self._get_members(group_id)
if self._my_id not in members:
@ -242,6 +241,6 @@ class PartitionCoordinator(object):
LOG.debug('My subset: %s', my_subset)
return my_subset
except tooz.coordination.ToozError:
LOG.exception(_LE('Error getting group membership info from '
'coordination backend.'))
LOG.exception('Error getting group membership info from '
'coordination backend.')
return []

View File

@ -33,7 +33,6 @@ from stevedore import extension
import aodh
from aodh import coordination
from aodh.i18n import _LI, _LE, _LW
from aodh import keystone_client
from aodh import messaging
from aodh import queue
@ -118,15 +117,15 @@ class Evaluator(object):
previous = alarm.state
alarm.state = state
if previous != state or always_record:
LOG.info(_LI('alarm %(id)s transitioning to %(state)s because '
'%(reason)s'), {'id': alarm.alarm_id,
'state': state,
'reason': reason})
LOG.info('alarm %(id)s transitioning to %(state)s because '
'%(reason)s', {'id': alarm.alarm_id,
'state': state,
'reason': reason})
try:
self._storage_conn.update_alarm(alarm)
except storage.AlarmNotFound:
LOG.warning(_LW("Skip updating this alarm's state, the"
"alarm: %s has been deleted"),
LOG.warning("Skip updating this alarm's state, the"
"alarm: %s has been deleted",
alarm.alarm_id)
else:
self._record_change(alarm, reason)
@ -136,7 +135,7 @@ class Evaluator(object):
except Exception:
# retry will occur naturally on the next evaluation
# cycle (unless alarm state reverts in the meantime)
LOG.exception(_LE('alarm state update failed'))
LOG.exception('alarm state update failed')
@classmethod
def within_time_constraint(cls, alarm):
@ -245,12 +244,12 @@ class AlarmEvaluationService(cotyledon.Service):
def _evaluate_assigned_alarms(self):
try:
alarms = self._assigned_alarms()
LOG.info(_LI('initiating evaluation cycle on %d alarms'),
LOG.info('initiating evaluation cycle on %d alarms',
len(alarms))
for alarm in alarms:
self._evaluate_alarm(alarm)
except Exception:
LOG.exception(_LE('alarm evaluation cycle failed'))
LOG.exception('alarm evaluation cycle failed')
def _evaluate_alarm(self, alarm):
"""Evaluate the alarms assigned to this evaluator."""
@ -262,7 +261,7 @@ class AlarmEvaluationService(cotyledon.Service):
try:
self.evaluators[alarm.type].obj.evaluate(alarm)
except Exception:
LOG.exception(_LE('Failed to evaluate alarm %s'), alarm.alarm_id)
LOG.exception('Failed to evaluate alarm %s', alarm.alarm_id)
def _assigned_alarms(self):
# NOTE(r-mibu): The 'event' type alarms will be evaluated by the

View File

@ -23,7 +23,7 @@ from oslo_utils import timeutils
import six
from aodh import evaluator
from aodh.i18n import _, _LE, _LW
from aodh.i18n import _
LOG = log.getLogger(__name__)
@ -76,16 +76,16 @@ class Event(object):
"""Validate received event has mandatory parameters."""
if not self.obj:
LOG.error(_LE('Received invalid event (empty or None)'))
LOG.error('Received invalid event (empty or None)')
raise InvalidEvent()
if not self.obj.get('event_type'):
LOG.error(_LE('Failed to extract event_type from event = %s'),
LOG.error('Failed to extract event_type from event = %s',
self.obj)
raise InvalidEvent()
if not self.obj.get('message_id'):
LOG.error(_LE('Failed to extract message_id from event = %s'),
LOG.error('Failed to extract message_id from event = %s',
self.obj)
raise InvalidEvent()
@ -168,8 +168,8 @@ class EventAlarmEvaluator(evaluator.Evaluator):
try:
event = Event(e)
except InvalidEvent:
LOG.warning(_LW('Event <%s> is invalid, aborting evaluation '
'for it.'), e)
LOG.warning('Event <%s> is invalid, aborting evaluation '
'for it.', e)
continue
for id, alarm in six.iteritems(
@ -177,8 +177,8 @@ class EventAlarmEvaluator(evaluator.Evaluator):
try:
self._evaluate_alarm(alarm, event)
except Exception:
LOG.exception(_LE('Failed to evaluate alarm (id=%(a)s) '
'triggered by event = %(e)s.'),
LOG.exception('Failed to evaluate alarm (id=%(a)s) '
'triggered by event = %(e)s.',
{'a': id, 'e': e})
LOG.debug('Finished event alarm evaluation.')

View File

@ -19,7 +19,6 @@ from oslo_log import log
from oslo_serialization import jsonutils
from aodh.evaluator import threshold
from aodh.i18n import _LW
from aodh import keystone_client
LOG = log.getLogger(__name__)
@ -67,7 +66,7 @@ class GnocchiResourceThresholdEvaluator(GnocchiBase):
rule['metric'], rule['resource_id'])
return []
except Exception as e:
LOG.warning(_LW('alarm stats retrieval failed: %s'), e)
LOG.warning('alarm stats retrieval failed: %s', e)
return []
@ -91,7 +90,7 @@ class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
LOG.debug('metrics %s does not exists', rule['metrics'])
return []
except Exception as e:
LOG.warning(_LW('alarm stats retrieval failed: %s'), e)
LOG.warning('alarm stats retrieval failed: %s', e)
return []
@ -118,5 +117,5 @@ class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase):
LOG.debug('metric %s does not exists', rule['metric'])
return []
except Exception as e:
LOG.warning(_LW('alarm stats retrieval failed: %s'), e)
LOG.warning('alarm stats retrieval failed: %s', e)
return []

View File

@ -26,7 +26,7 @@ from oslo_utils import timeutils
from aodh import evaluator
from aodh.evaluator import utils
from aodh.i18n import _, _LW
from aodh.i18n import _
from aodh import keystone_client
LOG = log.getLogger(__name__)
@ -207,10 +207,10 @@ class ThresholdEvaluator(evaluator.Evaluator):
return
if state == evaluator.UNKNOWN and not unknown:
LOG.warning(_LW('Expecting %(expected)d datapoints but only get '
'%(actual)d') % {
'expected': alarm.rule['evaluation_periods'],
'actual': len(statistics)})
LOG.warning('Expecting %(expected)d datapoints but only get '
'%(actual)d'
% {'expected': alarm.rule['evaluation_periods'],
'actual': len(statistics)})
# Reason is not same as log message because we want to keep
# consistent since thirdparty software may depend on old format.
reason = _('%d datapoints are unknown') % alarm.rule[

View File

@ -27,16 +27,6 @@ _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def translate(value, user_locale):
return oslo_i18n.translate(value, user_locale)

View File

@ -23,7 +23,6 @@ from oslo_utils import netutils
import six
from stevedore import extension
from aodh.i18n import _LE
from aodh import messaging
@ -119,8 +118,8 @@ class AlarmEndpoint(object):
action = netutils.urlsplit(action)
except Exception:
LOG.error(
_LE("Unable to parse action %(action)s for alarm "
"%(alarm_id)s"), {'action': action, 'alarm_id': alarm_id})
("Unable to parse action %(action)s for alarm "
"%(alarm_id)s"), {'action': action, 'alarm_id': alarm_id})
return
try:
@ -128,8 +127,8 @@ class AlarmEndpoint(object):
except KeyError:
scheme = action.scheme
LOG.error(
_LE("Action %(scheme)s for alarm %(alarm_id)s is unknown, "
"cannot notify"),
("Action %(scheme)s for alarm %(alarm_id)s is unknown, "
"cannot notify"),
{'scheme': scheme, 'alarm_id': alarm_id})
return
@ -139,7 +138,7 @@ class AlarmEndpoint(object):
notifier.notify(action, alarm_id, alarm_name, severity,
previous, current, reason, reason_data)
except Exception:
LOG.exception(_LE("Unable to notify alarm %s"), alarm_id)
LOG.exception("Unable to notify alarm %s", alarm_id)
@staticmethod
def _process_alarm(notifiers, data):
@ -151,7 +150,7 @@ class AlarmEndpoint(object):
actions = data.get('actions')
if not actions:
LOG.error(_LE("Unable to notify for an alarm with no action"))
LOG.error("Unable to notify for an alarm with no action")
return
for action in actions:

View File

@ -21,7 +21,6 @@ from oslo_utils import uuidutils
import requests
import six.moves.urllib.parse as urlparse
from aodh.i18n import _LI
from aodh import notifier
LOG = log.getLogger(__name__)
@ -65,10 +64,10 @@ class RestAlarmNotifier(notifier.AlarmNotifier):
headers['x-openstack-request-id'] = b'req-' + \
uuidutils.generate_uuid().encode('ascii')
LOG.info(_LI(
LOG.info(
"Notifying alarm %(alarm_name)s %(alarm_id)s with severity"
" %(severity)s from %(previous)s to %(current)s with action "
"%(action)s because %(reason)s. request-id: %(request_id)s ") %
"%(action)s because %(reason)s. request-id: %(request_id)s " %
({'alarm_name': alarm_name, 'alarm_id': alarm_id,
'severity': severity, 'previous': previous,
'current': current, 'action': action, 'reason': reason,
@ -104,7 +103,7 @@ class RestAlarmNotifier(notifier.AlarmNotifier):
session.mount(action.geturl(),
requests.adapters.HTTPAdapter(max_retries=max_retries))
resp = session.post(action.geturl(), **kwargs)
LOG.info(_LI('Notifying alarm <%(id)s> gets response: %(status_code)s '
'%(reason)s.'), {'id': alarm_id,
'status_code': resp.status_code,
'reason': resp.reason})
LOG.info('Notifying alarm <%(id)s> gets response: %(status_code)s '
'%(reason)s.', {'id': alarm_id,
'status_code': resp.status_code,
'reason': resp.reason})

View File

@ -19,7 +19,6 @@ from oslo_config import cfg
from oslo_log import log
import six.moves.urllib.parse as urlparse
from aodh.i18n import _LE, _LI
from aodh import keystone_client
from aodh import notifier
from aodh.notifier import trust
@ -75,9 +74,9 @@ class ZaqarAlarmNotifier(notifier.AlarmNotifier):
interface=endpoint_type)
self._zendpoint = z_endpoint.url
except Exception:
LOG.error(_LE("Aodh was configured to use zaqar:// action,"
" but Zaqar endpoint could not be found in"
" Keystone service catalog."))
LOG.error("Aodh was configured to use zaqar:// action,"
" but Zaqar endpoint could not be found in"
" Keystone service catalog.")
return self._zendpoint
def _get_client_conf(self):
@ -101,7 +100,7 @@ class ZaqarAlarmNotifier(notifier.AlarmNotifier):
return zaqar_client.Client(self._get_endpoint(),
version=2, conf=conf)
except Exception:
LOG.error(_LE("Failed to connect to Zaqar service "),
LOG.error("Failed to connect to Zaqar service ",
exc_info=True)
def _get_presigned_client_conf(self, queue_info):
@ -130,16 +129,16 @@ class ZaqarAlarmNotifier(notifier.AlarmNotifier):
def notify(self, action, alarm_id, alarm_name, severity, previous,
current, reason, reason_data, headers=None):
LOG.info(_LI(
LOG.info(
"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s "
"priority from %(previous)s to %(current)s with action %(action)s"
" because %(reason)s.") % ({'alarm_name': alarm_name,
'alarm_id': alarm_id,
'severity': severity,
'previous': previous,
'current': current,
'action': action,
'reason': reason}))
" because %(reason)s." % ({'alarm_name': alarm_name,
'alarm_id': alarm_id,
'severity': severity,
'previous': previous,
'current': current,
'action': action,
'reason': reason}))
body = {'alarm_name': alarm_name, 'alarm_id': alarm_id,
'severity': severity, 'previous': previous,
'current': current, 'reason': reason,
@ -182,11 +181,11 @@ class ZaqarAlarmNotifier(notifier.AlarmNotifier):
# post the message to the queue
queue.post(message)
except IndexError:
LOG.error(_LE("Required query option missing in action %s"),
LOG.error("Required query option missing in action %s",
action)
except Exception:
LOG.error(_LE("Unknown error occurred; Failed to post message to"
" Zaqar queue"),
LOG.error("Unknown error occurred; Failed to post message to"
" Zaqar queue",
exc_info=True)
@ -212,8 +211,8 @@ class TrustZaqarAlarmNotifier(trust.TrustAlarmNotifierMixin,
try:
queue_name = queue_info.get('queue_name')[-1]
except IndexError:
LOG.error(_LE("Required 'queue_name' query option missing in"
" action %s"),
LOG.error("Required 'queue_name' query option missing in"
" action %s",
action)
return
@ -223,6 +222,6 @@ class TrustZaqarAlarmNotifier(trust.TrustAlarmNotifierMixin,
queue = client.queue(queue_name)
queue.post(message)
except Exception:
LOG.error(_LE("Unknown error occurred; Failed to post message to"
" Zaqar queue"),
LOG.error("Unknown error occurred; Failed to post message to"
" Zaqar queue",
exc_info=True)

View File

@ -17,7 +17,6 @@
from oslo_log import log
from aodh.i18n import _LI
from aodh.storage import base
LOG = log.getLogger(__name__)
@ -65,5 +64,5 @@ class Connection(base.Connection):
:param alarm_history_ttl: Number of seconds to keep alarm history
records for.
"""
LOG.info(_LI('Dropping alarm history data with TTL %d'),
LOG.info('Dropping alarm history data with TTL %d',
alarm_history_ttl)

View File

@ -31,7 +31,6 @@ from sqlalchemy import func
from sqlalchemy.orm import exc
import aodh
from aodh.i18n import _LI
from aodh import storage
from aodh.storage import base
from aodh.storage import models as alarm_api_models
@ -392,5 +391,5 @@ class Connection(base.Connection):
deleted_rows = (session.query(models.AlarmChange)
.filter(models.AlarmChange.timestamp < valid_start)
.delete())
LOG.info(_LI("%d alarm histories are removed from database"),
LOG.info("%d alarm histories are removed from database",
deleted_rows)