Replacement `_` on `_LE` in all LOG.exception
oslo.i18n uses different marker functions to separate the translatable messages into different catalogs, which the translation teams can prioritize translating. For details, please refer to: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#guidelines-for-use-in-openstack There were not marker fuctions some places in directory network. This commit makes changes: * Add missing marker functions * Use ',' instead of '%' while adding variables to log messages Added a hacking rule for the log exception about checking translation for it. Change-Id: If80ea6f177bb65afcdffce71550bb38fedcc54eb
This commit is contained in:
parent
5ec332c197
commit
b7535793af
|
@ -41,6 +41,7 @@ Nova Specific Commandments
|
|||
- [N326] Translated messages cannot be concatenated. String should be included in translated message.
|
||||
- [N327] assert_called_once() is not a valid method
|
||||
- [N328] Validate that LOG.info messages use _LI.
|
||||
- [N329] Validate that LOG.exception messages use _LE.
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
|
|
|
@ -87,7 +87,7 @@ class FaultWrapper(wsgi.Middleware):
|
|||
try:
|
||||
return req.get_response(self.application)
|
||||
except Exception as ex:
|
||||
LOG.exception(_("FaultWrapper: %s"), ex)
|
||||
LOG.exception(_LE("FaultWrapper: %s"), ex)
|
||||
return faults.Fault(webob.exc.HTTPInternalServerError())
|
||||
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ class MetadataRequestHandler(wsgi.Application):
|
|||
try:
|
||||
meta_data = self.get_metadata_by_remote_address(remote_address)
|
||||
except Exception:
|
||||
LOG.exception(_('Failed to get metadata for ip: %s'),
|
||||
LOG.exception(_LE('Failed to get metadata for ip: %s'),
|
||||
remote_address)
|
||||
msg = _('An unknown error has occurred. '
|
||||
'Please try your request again.')
|
||||
|
@ -202,7 +202,7 @@ class MetadataRequestHandler(wsgi.Application):
|
|||
meta_data = self.get_metadata_by_instance_id(instance_id,
|
||||
remote_address)
|
||||
except Exception:
|
||||
LOG.exception(_('Failed to get metadata for instance id: %s'),
|
||||
LOG.exception(_LE('Failed to get metadata for instance id: %s'),
|
||||
instance_id)
|
||||
msg = _('An unknown error has occurred. '
|
||||
'Please try your request again.')
|
||||
|
|
|
@ -29,6 +29,7 @@ from nova.api.openstack import wsgi
|
|||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LC
|
||||
from nova.i18n import _LE
|
||||
from nova.i18n import _LI
|
||||
from nova.i18n import _LW
|
||||
from nova.i18n import translate
|
||||
|
@ -89,7 +90,7 @@ class FaultWrapper(base_wsgi.Middleware):
|
|||
status, webob.exc.HTTPInternalServerError)()
|
||||
|
||||
def _error(self, inner, req):
|
||||
LOG.exception(_("Caught error: %s"), unicode(inner))
|
||||
LOG.exception(_LE("Caught error: %s"), unicode(inner))
|
||||
|
||||
safe = getattr(inner, 'safe', False)
|
||||
headers = getattr(inner, 'headers', None)
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LE
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -48,7 +48,7 @@ class LoadedExtensionInfo(object):
|
|||
' '.join(extension.__doc__.strip().split()))
|
||||
LOG.debug('Ext version: %i', extension.version)
|
||||
except AttributeError as ex:
|
||||
LOG.exception(_("Exception loading extension: %s"), ex)
|
||||
LOG.exception(_LE("Exception loading extension: %s"), ex)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
@ -28,6 +28,7 @@ from nova.api.openstack import wsgi
|
|||
from nova.api.openstack import xmlutil
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.i18n import _LW
|
||||
from nova.openstack.common import log as logging
|
||||
import nova.policy
|
||||
|
@ -239,7 +240,7 @@ class ExtensionManager(object):
|
|||
LOG.debug('Ext namespace: %s', extension.namespace)
|
||||
LOG.debug('Ext updated: %s', extension.updated)
|
||||
except AttributeError as ex:
|
||||
LOG.exception(_("Exception loading extension: %s"), ex)
|
||||
LOG.exception(_LE("Exception loading extension: %s"), ex)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -498,7 +499,7 @@ def expected_errors(errors):
|
|||
# expected error.
|
||||
raise
|
||||
|
||||
LOG.exception(_("Unexpected exception in API method"))
|
||||
LOG.exception(_LE("Unexpected exception in API method"))
|
||||
msg = _('Unexpected API Error. Please report this at '
|
||||
'http://bugs.launchpad.net/nova/ and attach the Nova '
|
||||
'API log if possible.\n%s') % type(exc)
|
||||
|
|
|
@ -202,7 +202,7 @@ class _BaseMessage(object):
|
|||
except Exception as exc:
|
||||
resp_value = sys.exc_info()
|
||||
failure = True
|
||||
LOG.exception(_("Error processing message locally: %(exc)s"),
|
||||
LOG.exception(_LE("Error processing message locally: %(exc)s"),
|
||||
{'exc': exc})
|
||||
return Response(self.routing_path, resp_value, failure)
|
||||
|
||||
|
@ -407,7 +407,7 @@ class _TargetedMessage(_BaseMessage):
|
|||
next_hop = self._get_next_hop()
|
||||
except Exception as exc:
|
||||
exc_info = sys.exc_info()
|
||||
LOG.exception(_("Error locating next hop for message: %(exc)s"),
|
||||
LOG.exception(_LE("Error locating next hop for message: %(exc)s"),
|
||||
{'exc': exc})
|
||||
return self._send_response_from_exception(exc_info)
|
||||
|
||||
|
@ -513,7 +513,7 @@ class _BroadcastMessage(_BaseMessage):
|
|||
next_hops = self._get_next_hops()
|
||||
except Exception as exc:
|
||||
exc_info = sys.exc_info()
|
||||
LOG.exception(_("Error locating next hops for message: %(exc)s"),
|
||||
LOG.exception(_LE("Error locating next hops for message: %(exc)s"),
|
||||
{'exc': exc})
|
||||
return self._send_response_from_exception(exc_info)
|
||||
|
||||
|
@ -533,7 +533,7 @@ class _BroadcastMessage(_BaseMessage):
|
|||
# Error just trying to send to cells. Send a single response
|
||||
# with the failure.
|
||||
exc_info = sys.exc_info()
|
||||
LOG.exception(_("Error sending message to next hops: %(exc)s"),
|
||||
LOG.exception(_LE("Error sending message to next hops: %(exc)s"),
|
||||
{'exc': exc})
|
||||
self._cleanup_response_queue()
|
||||
return self._send_response_from_exception(exc_info)
|
||||
|
|
|
@ -28,7 +28,7 @@ from oslo import messaging
|
|||
from oslo.serialization import jsonutils
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.objects import base as objects_base
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import rpc
|
||||
|
@ -385,7 +385,7 @@ class CellsAPI(object):
|
|||
cctxt.cast(ctxt, 'bdm_update_or_create_at_top',
|
||||
bdm=bdm, create=create)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to notify cells of BDM update/create."))
|
||||
LOG.exception(_LE("Failed to notify cells of BDM update/create."))
|
||||
|
||||
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
|
||||
volume_id=None):
|
||||
|
@ -401,7 +401,7 @@ class CellsAPI(object):
|
|||
device_name=device_name,
|
||||
volume_id=volume_id)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to notify cells of BDM destroy."))
|
||||
LOG.exception(_LE("Failed to notify cells of BDM destroy."))
|
||||
|
||||
def get_migrations(self, ctxt, filters):
|
||||
"""Get all migrations applying the filters."""
|
||||
|
|
|
@ -30,7 +30,7 @@ from nova.compute import vm_states
|
|||
from nova import conductor
|
||||
from nova.db import base
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.i18n import _LE, _LI
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
from nova.openstack.common import log as logging
|
||||
|
@ -195,8 +195,8 @@ class CellsScheduler(base.Base):
|
|||
build_inst_kwargs)
|
||||
return
|
||||
except Exception:
|
||||
LOG.exception(_("Couldn't communicate with cell '%s'") %
|
||||
target_cell.name)
|
||||
LOG.exception(_LE("Couldn't communicate with cell '%s'"),
|
||||
target_cell.name)
|
||||
# FIXME(comstud): Would be nice to kick this back up so that
|
||||
# the parent cell could retry, if we had a parent.
|
||||
LOG.error(_LE("Couldn't communicate with any cells"))
|
||||
|
@ -242,7 +242,7 @@ class CellsScheduler(base.Base):
|
|||
time.sleep(sleep_time)
|
||||
continue
|
||||
except Exception:
|
||||
LOG.exception(_("Error scheduling instances %(instance_uuids)s"),
|
||||
LOG.exception(_LE("Error scheduling instances %(instance_uuids)s"),
|
||||
{'instance_uuids': instance_uuids})
|
||||
ctxt = message.ctxt
|
||||
for instance_uuid in instance_uuids:
|
||||
|
|
|
@ -31,7 +31,7 @@ from nova.cells import rpc_driver
|
|||
from nova import context
|
||||
from nova.db import base
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _LE
|
||||
from nova.openstack.common import fileutils
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import rpc
|
||||
|
@ -176,7 +176,7 @@ class CellStateManager(base.Base):
|
|||
attempts += 1
|
||||
if attempts > 120:
|
||||
raise
|
||||
LOG.exception(_('DB error: %s') % e)
|
||||
LOG.exception(_LE('DB error: %s'), e)
|
||||
time.sleep(30)
|
||||
|
||||
my_cell_capabs = {}
|
||||
|
|
|
@ -29,7 +29,7 @@ import sys
|
|||
from oslo.config import cfg
|
||||
|
||||
from nova import config
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.objectstore import s3server
|
||||
from nova.openstack.common import log as logging
|
||||
|
@ -60,13 +60,13 @@ def main():
|
|||
server = service.WSGIService(api, use_ssl=should_use_ssl)
|
||||
launcher.launch_service(server, workers=server.workers or 1)
|
||||
except (Exception, SystemExit):
|
||||
LOG.exception(_('Failed to load %s') % '%s-api' % api)
|
||||
LOG.exception(_LE('Failed to load %s-api'), api)
|
||||
|
||||
for mod in [s3server, xvp_proxy]:
|
||||
try:
|
||||
launcher.launch_service(mod.get_wsgi_server())
|
||||
except (Exception, SystemExit):
|
||||
LOG.exception(_('Failed to load %s') % mod.__name__)
|
||||
LOG.exception(_LE('Failed to load %s'), mod.__name__)
|
||||
|
||||
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
|
||||
'nova-cert', 'nova-conductor']:
|
||||
|
@ -89,5 +89,5 @@ def main():
|
|||
topic=topic,
|
||||
manager=manager))
|
||||
except (Exception, SystemExit):
|
||||
LOG.exception(_('Failed to load %s'), binary)
|
||||
LOG.exception(_LE('Failed to load %s'), binary)
|
||||
launcher.wait()
|
||||
|
|
|
@ -23,7 +23,7 @@ from oslo.utils import timeutils
|
|||
from nova.compute import monitors
|
||||
from nova.compute.monitors import cpu_monitor as monitor
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
@ -109,8 +109,8 @@ class ComputeDriverCPUMonitor(monitor._CPUMonitorBase):
|
|||
self._data["cpu.iowait.time"] = stats["iowait"]
|
||||
self._data["cpu.frequency"] = stats["frequency"]
|
||||
except (NotImplementedError, TypeError, KeyError) as ex:
|
||||
LOG.exception(_("Not all properties needed are implemented "
|
||||
"in the compute driver: %s"), ex)
|
||||
LOG.exception(_LE("Not all properties needed are implemented "
|
||||
"in the compute driver: %s"), ex)
|
||||
raise exception.ResourceMonitorError(
|
||||
monitor=self.__class__.__name__)
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ class XVPConsoleProxy(object):
|
|||
cfile.write(config)
|
||||
except IOError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Failed to write configuration file"))
|
||||
LOG.exception(_LE("Failed to write configuration file"))
|
||||
|
||||
def _xvp_stop(self):
|
||||
LOG.debug('Stopping xvp')
|
||||
|
|
|
@ -38,7 +38,7 @@ from pyasn1.type import univ
|
|||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LE
|
||||
from nova.openstack.common import fileutils
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import paths
|
||||
|
@ -399,7 +399,7 @@ def _sign_csr(csr_text, ca_folder):
|
|||
csrfile.write(csr_text)
|
||||
except IOError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_('Failed to write inbound.csr'))
|
||||
LOG.exception(_LE('Failed to write inbound.csr'))
|
||||
|
||||
LOG.debug('Flags path: %s', ca_folder)
|
||||
start = os.getcwd()
|
||||
|
|
|
@ -31,7 +31,7 @@ from oslo.config import cfg
|
|||
from oslo.db import concurrency
|
||||
|
||||
from nova.cells import rpcapi as cells_rpcapi
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
||||
|
@ -625,7 +625,7 @@ def instance_destroy(context, instance_uuid, constraint=None,
|
|||
try:
|
||||
cells_rpcapi.CellsAPI().instance_destroy_at_top(context, rv)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to notify cells of instance destroy"))
|
||||
LOG.exception(_LE("Failed to notify cells of instance destroy"))
|
||||
return rv
|
||||
|
||||
|
||||
|
@ -737,7 +737,7 @@ def instance_update(context, instance_uuid, values, update_cells=True):
|
|||
try:
|
||||
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to notify cells of instance update"))
|
||||
LOG.exception(_LE("Failed to notify cells of instance update"))
|
||||
return rv
|
||||
|
||||
|
||||
|
@ -765,7 +765,7 @@ def instance_update_and_get_original(context, instance_uuid, values,
|
|||
try:
|
||||
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv[1])
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to notify cells of instance update"))
|
||||
LOG.exception(_LE("Failed to notify cells of instance update"))
|
||||
return rv
|
||||
|
||||
|
||||
|
@ -1688,7 +1688,7 @@ def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
|
|||
uuid, mac, start_period, bw_in, bw_out,
|
||||
last_ctr_in, last_ctr_out, last_refreshed)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to notify cells of bw_usage update"))
|
||||
LOG.exception(_LE("Failed to notify cells of bw_usage update"))
|
||||
return rv
|
||||
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ from nova.compute import vm_states
|
|||
import nova.context
|
||||
from nova.db.sqlalchemy import models
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LI
|
||||
from nova.i18n import _, _LI, _LE
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import uuidutils
|
||||
from nova import quota
|
||||
|
@ -844,8 +844,8 @@ def floating_ip_bulk_destroy(context, ips):
|
|||
project_id=project_id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Failed to update usages bulk "
|
||||
"deallocating floating IP"))
|
||||
LOG.exception(_LE("Failed to update usages bulk "
|
||||
"deallocating floating IP"))
|
||||
|
||||
|
||||
@require_context
|
||||
|
|
|
@ -20,7 +20,7 @@ from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
|
|||
from sqlalchemy import Text
|
||||
from sqlalchemy.types import NullType
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -81,7 +81,7 @@ def _create_shadow_tables(migrate_engine):
|
|||
shadow_table.create()
|
||||
except Exception:
|
||||
LOG.info(repr(shadow_table))
|
||||
LOG.exception(_('Exception while creating table.'))
|
||||
LOG.exception(_LE('Exception while creating table.'))
|
||||
raise
|
||||
|
||||
|
||||
|
@ -108,7 +108,7 @@ def _populate_instance_types(instance_types_table):
|
|||
'is_public': True})
|
||||
except Exception:
|
||||
LOG.info(repr(instance_types_table))
|
||||
LOG.exception(_('Exception while seeding instance_types table'))
|
||||
LOG.exception(_LE('Exception while seeding instance_types table'))
|
||||
raise
|
||||
|
||||
|
||||
|
@ -1100,7 +1100,7 @@ def upgrade(migrate_engine):
|
|||
table.create()
|
||||
except Exception:
|
||||
LOG.info(repr(table))
|
||||
LOG.exception(_('Exception while creating table.'))
|
||||
LOG.exception(_LE('Exception while creating table.'))
|
||||
raise
|
||||
|
||||
# task log unique constraint
|
||||
|
|
|
@ -24,7 +24,7 @@ from sqlalchemy.types import NullType
|
|||
|
||||
from nova.db.sqlalchemy import api as db
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LE
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
||||
|
@ -129,8 +129,8 @@ def create_shadow_table(migrate_engine, table_name=None, table=None,
|
|||
# which raises unwrapped OperationalError, so we should catch it until
|
||||
# oslo.db would wraps all such exceptions
|
||||
LOG.info(repr(shadow_table))
|
||||
LOG.exception(_('Exception while creating table.'))
|
||||
LOG.exception(_LE('Exception while creating table.'))
|
||||
raise exception.ShadowTableExists(name=shadow_table_name)
|
||||
except Exception:
|
||||
LOG.info(repr(shadow_table))
|
||||
LOG.exception(_('Exception while creating table.'))
|
||||
LOG.exception(_LE('Exception while creating table.'))
|
||||
|
|
|
@ -29,7 +29,7 @@ from oslo.config import cfg
|
|||
from oslo.utils import excutils
|
||||
import webob.exc
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LE
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import safe_utils
|
||||
|
||||
|
@ -121,7 +121,7 @@ class NovaException(Exception):
|
|||
exc_info = sys.exc_info()
|
||||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
LOG.exception(_('Exception in string format operation'))
|
||||
LOG.exception(_LE('Exception in string format operation'))
|
||||
for name, value in kwargs.iteritems():
|
||||
LOG.error("%s: %s" % (name, value)) # noqa
|
||||
|
||||
|
|
|
@ -56,9 +56,11 @@ asse_equal_start_with_none_re = re.compile(
|
|||
r"assertEqual\(None,")
|
||||
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
|
||||
log_translation = re.compile(
|
||||
r"(.)*LOG\.(audit|error|warn|warning|critical|exception)\(\s*('|\")")
|
||||
r"(.)*LOG\.(audit|error|warn|warning|critical)\(\s*('|\")")
|
||||
log_translation_info = re.compile(
|
||||
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
|
||||
log_translation_exception = re.compile(
|
||||
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
|
||||
translated_log = re.compile(
|
||||
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
|
||||
"\(\s*_\(\s*('|\")")
|
||||
|
@ -303,6 +305,9 @@ def validate_log_translations(logical_line, physical_line, filename):
|
|||
msg = "N328: LOG.info messages require translations `_LI()`!"
|
||||
if log_translation_info.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "N329: LOG.exception messages require translations `_LE()`!"
|
||||
if log_translation_exception.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "N321: Log messages require translations!"
|
||||
if log_translation.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
|
|
@ -21,7 +21,7 @@ from oslo.utils import excutils
|
|||
|
||||
from nova.db import base
|
||||
from nova import hooks
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LE
|
||||
from nova.network import model as network_model
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
|
@ -47,7 +47,7 @@ def update_instance_cache_with_nw_info(impl, context, instance,
|
|||
ic.save(update_cells=update_cells)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_('Failed storing info cache'), instance=instance)
|
||||
LOG.exception(_LE('Failed storing info cache'), instance=instance)
|
||||
|
||||
|
||||
def refresh_cache(f):
|
||||
|
|
|
@ -275,8 +275,8 @@ class FloatingIP(object):
|
|||
reservations = None
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deallocating "
|
||||
"floating IP"))
|
||||
LOG.exception(_LE("Failed to update usages deallocating "
|
||||
"floating IP"))
|
||||
|
||||
rows_updated = objects.FloatingIP.deallocate(context, address)
|
||||
# number of updated rows will be 0 if concurrently another
|
||||
|
|
|
@ -822,7 +822,7 @@ def clean_conntrack(fixed_ip):
|
|||
_execute('conntrack', '-D', '-r', fixed_ip, run_as_root=True,
|
||||
check_exit_code=[0, 1])
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.exception(_('Error deleting conntrack entries for %s'), fixed_ip)
|
||||
LOG.exception(_LE('Error deleting conntrack entries for %s'), fixed_ip)
|
||||
|
||||
|
||||
def _enable_ipv4_forwarding():
|
||||
|
|
|
@ -977,8 +977,8 @@ class NetworkManager(manager.Manager):
|
|||
quotas.reserve(context, fixed_ips=-1, project_id=quota_project,
|
||||
user_id=quota_user)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to update usages deallocating "
|
||||
"fixed IP"))
|
||||
LOG.exception(_LE("Failed to update usages deallocating "
|
||||
"fixed IP"))
|
||||
|
||||
try:
|
||||
self._do_trigger_security_group_members_refresh_for_instance(
|
||||
|
|
|
@ -54,7 +54,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
body).get('security_group')
|
||||
except n_exc.NeutronClientException as e:
|
||||
exc_info = sys.exc_info()
|
||||
LOG.exception(_("Neutron Error creating security group %s"),
|
||||
LOG.exception(_LE("Neutron Error creating security group %s"),
|
||||
name)
|
||||
if e.status_code == 401:
|
||||
# TODO(arosen) Cannot raise generic response from neutron here
|
||||
|
@ -75,7 +75,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
security_group['id'], body).get('security_group')
|
||||
except n_exc.NeutronClientException as e:
|
||||
exc_info = sys.exc_info()
|
||||
LOG.exception(_("Neutron Error updating security group %s"),
|
||||
LOG.exception(_LE("Neutron Error updating security group %s"),
|
||||
name)
|
||||
if e.status_code == 401:
|
||||
# TODO(arosen) Cannot raise generic response from neutron here
|
||||
|
@ -159,7 +159,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
'security_groups')
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Neutron Error getting security groups"))
|
||||
LOG.exception(_LE("Neutron Error getting security groups"))
|
||||
converted_rules = []
|
||||
for security_group in security_groups:
|
||||
converted_rules.append(
|
||||
|
@ -205,15 +205,15 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
except n_exc.NeutronClientException as e:
|
||||
exc_info = sys.exc_info()
|
||||
if e.status_code == 404:
|
||||
LOG.exception(_("Neutron Error getting security group %s"),
|
||||
LOG.exception(_LE("Neutron Error getting security group %s"),
|
||||
name)
|
||||
self.raise_not_found(six.text_type(e))
|
||||
elif e.status_code == 409:
|
||||
LOG.exception(_("Neutron Error adding rules to security "
|
||||
"group %s"), name)
|
||||
LOG.exception(_LE("Neutron Error adding rules to security "
|
||||
"group %s"), name)
|
||||
self.raise_over_quota(six.text_type(e))
|
||||
else:
|
||||
LOG.exception(_("Neutron Error:"))
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
converted_rules = []
|
||||
for rule in rules:
|
||||
|
@ -267,7 +267,8 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
neutron.delete_security_group_rule(rule_ids.pop())
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Neutron Error unable to delete %s"), rule_ids)
|
||||
LOG.exception(_LE("Neutron Error unable to delete %s"),
|
||||
rule_ids)
|
||||
|
||||
def get_rule(self, context, id):
|
||||
neutron = neutronv2.get_client(context)
|
||||
|
@ -411,14 +412,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
'project': context.project_id})
|
||||
self.raise_not_found(msg)
|
||||
else:
|
||||
LOG.exception(_("Neutron Error:"))
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
params = {'device_id': instance['uuid']}
|
||||
try:
|
||||
ports = neutron.list_ports(**params).get('ports')
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Neutron Error:"))
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
|
||||
if not ports:
|
||||
msg = (_("instance_id %s could not be found as device id on"
|
||||
|
@ -444,7 +445,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
neutron.update_port(port['id'], {'port': updated_port})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Neutron Error:"))
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
|
||||
@compute_api.wrap_check_security_groups_policy
|
||||
def remove_from_instance(self, context, instance, security_group_name):
|
||||
|
@ -464,14 +465,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
'project': context.project_id})
|
||||
self.raise_not_found(msg)
|
||||
else:
|
||||
LOG.exception(_("Neutron Error:"))
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
params = {'device_id': instance['uuid']}
|
||||
try:
|
||||
ports = neutron.list_ports(**params).get('ports')
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Neutron Error:"))
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
|
||||
if not ports:
|
||||
msg = (_("instance_id %s could not be found as device id on"
|
||||
|
@ -500,7 +501,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
found_security_group = True
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Neutron Error:"))
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
if not found_security_group:
|
||||
msg = (_("Security group %(security_group_name)s not associated "
|
||||
"with the instance %(instance)s") %
|
||||
|
|
|
@ -28,7 +28,7 @@ import six
|
|||
from nova.compute import flavors
|
||||
import nova.context
|
||||
from nova import db
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.image import glance
|
||||
from nova import network
|
||||
from nova.network import model as network_model
|
||||
|
@ -146,7 +146,7 @@ def send_update(context, old_instance, new_instance, service=None, host=None):
|
|||
service=service, host=host,
|
||||
old_display_name=old_display_name)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to send state update notification"),
|
||||
LOG.exception(_LE("Failed to send state update notification"),
|
||||
instance=new_instance)
|
||||
|
||||
|
||||
|
@ -185,7 +185,7 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
|||
new_vm_state=new_vm_state, new_task_state=new_task_state,
|
||||
service=service, host=host)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to send state update notification"),
|
||||
LOG.exception(_LE("Failed to send state update notification"),
|
||||
instance=instance)
|
||||
|
||||
|
||||
|
@ -289,7 +289,7 @@ def bandwidth_usage(instance_ref, audit_start,
|
|||
except Exception:
|
||||
try:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_('Failed to get nw_info'),
|
||||
LOG.exception(_LE('Failed to get nw_info'),
|
||||
instance=instance_ref)
|
||||
except Exception:
|
||||
if ignore_missing_network_data:
|
||||
|
|
|
@ -25,7 +25,7 @@ import six
|
|||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
@ -1327,7 +1327,8 @@ class QuotaEngine(object):
|
|||
# usage resynchronization and the reservation expiration
|
||||
# mechanisms will resolve the issue. The exception is
|
||||
# logged, however, because this is less than optimal.
|
||||
LOG.exception(_("Failed to commit reservations %s"), reservations)
|
||||
LOG.exception(_LE("Failed to commit reservations %s"),
|
||||
reservations)
|
||||
return
|
||||
LOG.debug("Committed reservations %s", reservations)
|
||||
|
||||
|
@ -1350,7 +1351,7 @@ class QuotaEngine(object):
|
|||
# usage resynchronization and the reservation expiration
|
||||
# mechanisms will resolve the issue. The exception is
|
||||
# logged, however, because this is less than optimal.
|
||||
LOG.exception(_("Failed to roll back reservations %s"),
|
||||
LOG.exception(_LE("Failed to roll back reservations %s"),
|
||||
reservations)
|
||||
return
|
||||
LOG.debug("Rolled back reservations %s", reservations)
|
||||
|
|
|
@ -29,7 +29,7 @@ from nova.compute import utils as compute_utils
|
|||
from nova.compute import vm_states
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova import notifications
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import rpc
|
||||
|
@ -57,7 +57,7 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
|
|||
ex.format_message().strip(),
|
||||
instance_uuid=instance_uuid)
|
||||
else:
|
||||
LOG.exception(_("Exception during scheduler.run_instance"))
|
||||
LOG.exception(_LE("Exception during scheduler.run_instance"))
|
||||
state = vm_states.ERROR.upper()
|
||||
LOG.warning(_LW('Setting instance to %s state.'), state,
|
||||
instance_uuid=instance_uuid)
|
||||
|
|
|
@ -28,7 +28,7 @@ from oslo.serialization import jsonutils
|
|||
from oslo.utils import excutils
|
||||
from oslo.utils import timeutils
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
||||
|
@ -66,8 +66,8 @@ class SchedulerOptions(object):
|
|||
return os.path.getmtime(filename)
|
||||
except os.error as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Could not stat scheduler options file "
|
||||
"%(filename)s: '%(e)s'"),
|
||||
LOG.exception(_LE("Could not stat scheduler options file "
|
||||
"%(filename)s: '%(e)s'"),
|
||||
{'filename': filename, 'e': e})
|
||||
|
||||
def _load_file(self, handle):
|
||||
|
@ -75,7 +75,7 @@ class SchedulerOptions(object):
|
|||
try:
|
||||
return jsonutils.load(handle)
|
||||
except ValueError as e:
|
||||
LOG.exception(_("Could not decode scheduler options: '%s'"), e)
|
||||
LOG.exception(_LE("Could not decode scheduler options: '%s'"), e)
|
||||
return {}
|
||||
|
||||
def _get_time_now(self):
|
||||
|
|
|
@ -295,7 +295,7 @@ class Service(service.Service):
|
|||
try:
|
||||
self.manager.cleanup_host()
|
||||
except Exception:
|
||||
LOG.exception(_('Service error occurred during cleanup_host'))
|
||||
LOG.exception(_LE('Service error occurred during cleanup_host'))
|
||||
pass
|
||||
|
||||
super(Service, self).stop()
|
||||
|
|
|
@ -20,6 +20,7 @@ from oslo.utils import excutils
|
|||
|
||||
from nova import block_device
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova.i18n import _LI
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
|
@ -253,8 +254,8 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
|
|||
device_type=self['device_type'], encryption=encryption)
|
||||
except Exception: # pylint: disable=W0702
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Driver failed to attach volume "
|
||||
"%(volume_id)s at %(mountpoint)s"),
|
||||
LOG.exception(_LE("Driver failed to attach volume "
|
||||
"%(volume_id)s at %(mountpoint)s"),
|
||||
{'volume_id': volume_id,
|
||||
'mountpoint': self['mount_device']},
|
||||
context=context, instance=instance)
|
||||
|
|
|
@ -415,7 +415,7 @@ def teardown_container(container_dir, container_root_device=None):
|
|||
utils.execute('qemu-nbd', '-d', container_root_device,
|
||||
run_as_root=True)
|
||||
except Exception as exn:
|
||||
LOG.exception(_('Failed to teardown container filesystem: %s'), exn)
|
||||
LOG.exception(_LE('Failed to teardown container filesystem: %s'), exn)
|
||||
|
||||
|
||||
def clean_lxc_namespace(container_dir):
|
||||
|
@ -428,7 +428,7 @@ def clean_lxc_namespace(container_dir):
|
|||
img = _DiskImage(image=None, mount_dir=container_dir)
|
||||
img.umount()
|
||||
except Exception as exn:
|
||||
LOG.exception(_('Failed to umount container filesystem: %s'), exn)
|
||||
LOG.exception(_LE('Failed to umount container filesystem: %s'), exn)
|
||||
|
||||
|
||||
def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
|
||||
|
|
|
@ -1381,7 +1381,7 @@ def load_compute_driver(virtapi, compute_driver=None):
|
|||
virtapi)
|
||||
return utils.check_isinstance(driver, ComputeDriver)
|
||||
except ImportError:
|
||||
LOG.exception(_("Unable to load the virtualization driver"))
|
||||
LOG.exception(_LE("Unable to load the virtualization driver"))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
|
|
@ -408,7 +408,7 @@ class VMOps(object):
|
|||
self._delete_disk_files(instance_name)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_('Failed to destroy instance: %s'),
|
||||
LOG.exception(_LE('Failed to destroy instance: %s'),
|
||||
instance_name)
|
||||
|
||||
def reboot(self, instance, network_info, reboot_type):
|
||||
|
|
|
@ -1418,7 +1418,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
|
||||
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
|
||||
except Exception as ex:
|
||||
LOG.exception(_('Failed to attach volume at mountpoint: %s'),
|
||||
LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
|
||||
mountpoint, instance=instance)
|
||||
if isinstance(ex, libvirt.libvirtError):
|
||||
errcode = ex.get_error_code()
|
||||
|
|
|
@ -30,7 +30,7 @@ from oslo.serialization import jsonutils
|
|||
from oslo.utils import units
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LE
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
from nova.virt import driver
|
||||
|
@ -138,7 +138,7 @@ class XenAPIDriver(driver.ComputeDriver):
|
|||
try:
|
||||
vm_utils.cleanup_attached_vdis(self._session)
|
||||
except Exception:
|
||||
LOG.exception(_('Failure while cleaning up attached VDIs'))
|
||||
LOG.exception(_LE('Failure while cleaning up attached VDIs'))
|
||||
|
||||
def instance_exists(self, instance):
|
||||
"""Checks existence of an instance on the host.
|
||||
|
|
|
@ -105,8 +105,8 @@ class Host(object):
|
|||
|
||||
break
|
||||
except self._session.XenAPI.Failure:
|
||||
LOG.exception(_('Unable to migrate VM %(vm_ref)s '
|
||||
'from %(host)s'),
|
||||
LOG.exception(_LE('Unable to migrate VM %(vm_ref)s '
|
||||
'from %(host)s'),
|
||||
{'vm_ref': vm_ref, 'host': host})
|
||||
instance.host = host
|
||||
instance.vm_state = vm_states.ACTIVE
|
||||
|
@ -309,7 +309,7 @@ def call_xenhost(session, method, arg_dict):
|
|||
return ''
|
||||
return jsonutils.loads(result)
|
||||
except ValueError:
|
||||
LOG.exception(_("Unable to get updated status"))
|
||||
LOG.exception(_LE("Unable to get updated status"))
|
||||
return None
|
||||
except session.XenAPI.Failure as e:
|
||||
LOG.error(_LE("The call to %(method)s returned "
|
||||
|
|
|
@ -62,8 +62,8 @@ class ResourcePool(object):
|
|||
aggregate.update_metadata(metadata)
|
||||
op(host)
|
||||
except Exception:
|
||||
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
|
||||
'during operation on %(host)s'),
|
||||
LOG.exception(_LE('Aggregate %(aggregate_id)s: unrecoverable '
|
||||
'state during operation on %(host)s'),
|
||||
{'aggregate_id': aggregate['id'], 'host': host})
|
||||
|
||||
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
|
||||
|
|
|
@ -1432,11 +1432,11 @@ def _fetch_vhd_image(context, session, instance, image_id):
|
|||
if type(handler) == type(default_handler):
|
||||
raise
|
||||
|
||||
LOG.exception(_("Download handler '%(handler)s' raised an"
|
||||
" exception, falling back to default handler"
|
||||
" '%(default_handler)s'") %
|
||||
{'handler': handler,
|
||||
'default_handler': default_handler})
|
||||
LOG.exception(_LE("Download handler '%(handler)s' raised an"
|
||||
" exception, falling back to default handler"
|
||||
" '%(default_handler)s'"),
|
||||
{'handler': handler,
|
||||
'default_handler': default_handler})
|
||||
|
||||
vdis = default_handler.download_image(
|
||||
context, session, instance, image_id)
|
||||
|
@ -1581,7 +1581,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
|
|||
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
|
||||
except (session.XenAPI.Failure, IOError, OSError) as e:
|
||||
# We look for XenAPI and OS failures.
|
||||
LOG.exception(_("Failed to fetch glance image"),
|
||||
LOG.exception(_LE("Failed to fetch glance image"),
|
||||
instance=instance)
|
||||
e.args = e.args + ([dict(type=ImageType.to_string(image_type),
|
||||
uuid=vdi_uuid,
|
||||
|
@ -1813,7 +1813,7 @@ def compile_diagnostics(vm_rec):
|
|||
|
||||
return diags
|
||||
except expat.ExpatError as e:
|
||||
LOG.exception(_('Unable to parse rrd of %s'), e)
|
||||
LOG.exception(_LE('Unable to parse rrd of %s'), e)
|
||||
return {"Unable to retrieve diagnostics": e}
|
||||
|
||||
|
||||
|
@ -1963,8 +1963,8 @@ def _get_rrd(server, vm_uuid):
|
|||
vm_uuid))
|
||||
return xml.read()
|
||||
except IOError:
|
||||
LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with '
|
||||
'server details: %(server)s.'),
|
||||
LOG.exception(_LE('Unable to obtain RRD XML for VM %(vm_uuid)s with '
|
||||
'server details: %(server)s.'),
|
||||
{'vm_uuid': vm_uuid, 'server': server})
|
||||
return None
|
||||
|
||||
|
|
|
@ -921,8 +921,8 @@ class VMOps(object):
|
|||
undo_mgr, old_vdi_ref)
|
||||
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
|
||||
except Exception as error:
|
||||
LOG.exception(_("_migrate_disk_resizing_down failed. "
|
||||
"Restoring orig vm due_to: %s."), error,
|
||||
LOG.exception(_LE("_migrate_disk_resizing_down failed. "
|
||||
"Restoring orig vm due_to: %s."), error,
|
||||
instance=instance)
|
||||
undo_mgr._rollback()
|
||||
raise exception.InstanceFaultRollback(error)
|
||||
|
@ -1075,8 +1075,8 @@ class VMOps(object):
|
|||
transfer_ephemeral_disks_then_all_leaf_vdis()
|
||||
|
||||
except Exception as error:
|
||||
LOG.exception(_("_migrate_disk_resizing_up failed. "
|
||||
"Restoring orig vm due_to: %s."), error,
|
||||
LOG.exception(_LE("_migrate_disk_resizing_up failed. "
|
||||
"Restoring orig vm due_to: %s."), error,
|
||||
instance=instance)
|
||||
try:
|
||||
self._restore_orig_vm_and_cleanup_orphan(instance)
|
||||
|
|
Loading…
Reference in New Issue