Merge "Implementing the use of _L’x’/i18n markers"

This commit is contained in:
Jenkins 2014-11-04 10:38:10 +00:00 committed by Gerrit Code Review
commit 0e2d1d6acc
11 changed files with 137 additions and 130 deletions

View File

@ -49,7 +49,7 @@ i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder.i18n import _
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
from cinder import service
from cinder import utils
@ -72,11 +72,11 @@ if __name__ == '__main__':
server = service.WSGIService('osapi_volume')
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_('Failed to load osapi_volume'))
LOG.exception(_LE('Failed to load osapi_volume'))
for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']:
try:
launcher.launch_service(service.Service.create(binary=binary))
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s'), binary)
LOG.exception(_LE('Failed to load %s'), binary)
launcher.wait()

View File

@ -56,7 +56,7 @@ from cinder import i18n
i18n.enable_lazy()
from cinder import context
from cinder import db
from cinder.i18n import _
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import utils
@ -129,7 +129,8 @@ if __name__ == '__main__':
volume_ref,
'exists', extra_usage_info=extra_info)
except Exception as e:
LOG.error(_("Failed to send exists notification for volume %s.") %
LOG.error(_LE("Failed to send exists notification"
" for volume %s.") %
volume_ref.id)
print(traceback.format_exc(e))
@ -156,8 +157,8 @@ if __name__ == '__main__':
volume_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_("Failed to send create notification for "
"volume %s.") % volume_ref.id)
LOG.error(_LE("Failed to send create notification for "
"volume %s.") % volume_ref.id)
print(traceback.format_exc(e))
if (CONF.send_actions and volume_ref.deleted_at and
@ -183,8 +184,8 @@ if __name__ == '__main__':
volume_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_("Failed to send delete notification for volume "
"%s.") % volume_ref.id)
LOG.error(_LE("Failed to send delete notification for volume "
"%s.") % volume_ref.id)
print(traceback.format_exc(e))
snapshots = db.snapshot_get_active_by_window(admin_context,
@ -203,7 +204,8 @@ if __name__ == '__main__':
'exists',
extra_info)
except Exception as e:
LOG.error(_("Failed to send exists notification for snapshot %s.")
LOG.error(_LE("Failed to send exists notification "
"for snapshot %s.")
% snapshot_ref.id)
print(traceback.format_exc(e))
@ -230,8 +232,8 @@ if __name__ == '__main__':
snapshot_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_("Failed to send create notification for snapshot "
"%s.") % snapshot_ref.id)
LOG.error(_LE("Failed to send create notification for snapshot"
"%s.") % snapshot_ref.id)
print(traceback.format_exc(e))
if (CONF.send_actions and snapshot_ref.deleted_at and
@ -257,8 +259,8 @@ if __name__ == '__main__':
snapshot_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_("Failed to send delete notification for snapshot "
"%s.") % snapshot_ref.id)
LOG.error(_LE("Failed to send delete notification for snapshot"
"%s.") % snapshot_ref.id)
print(traceback.format_exc(e))
print(_("Volume usage audit completed"))

View File

@ -25,7 +25,7 @@ from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
import cinder.policy
@ -139,9 +139,9 @@ class API(base.Base):
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG backup (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG backup (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
@ -151,9 +151,9 @@ class API(base.Base):
consumed=_consumed('backup_gigabytes'),
quota=quotas['backup_gigabytes'])
elif 'backups' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"backups (%(d_consumed)d backups "
"already consumed)")
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"backups (%(d_consumed)d backups "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
@ -209,8 +209,8 @@ class API(base.Base):
name = 'restore_backup_%s' % backup_id
description = 'auto-created_from_restore_from_backup'
LOG.info(_("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s"),
LOG.info(_LI("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s"),
{'size': size, 'backup_id': backup_id},
context=context)
volume = self.volume_api.create(context, size, name, description)
@ -236,8 +236,8 @@ class API(base.Base):
{'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg)
LOG.info(_("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s"),
LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s"),
{'volume_id': volume_id, 'backup_id': backup_id},
context=context)

View File

@ -22,7 +22,7 @@ import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LI
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
@ -55,7 +55,7 @@ class BackupMetadataAPI(base.Base):
try:
jsonutils.dumps(value)
except TypeError:
LOG.info(_("Value with type=%s is not serializable") %
LOG.info(_LI("Value with type=%s is not serializable") %
type(value))
return False
@ -75,8 +75,8 @@ class BackupMetadataAPI(base.Base):
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info(_("Unable to serialize field '%s' - excluding "
"from backup") % (key))
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup") % (key))
continue
container[type_tag][key] = value
@ -98,8 +98,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info(_("Unable to serialize field '%s' - excluding "
"from backup") % (entry))
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup") % (entry))
continue
container[type_tag][entry] = meta[entry]
@ -122,8 +122,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info(_("Unable to serialize field '%s' - "
"excluding from backup") % (entry))
LOG.info(_LI("Unable to serialize field '%s' - "
"excluding from backup") % (entry))
continue
container[type_tag][entry.key] = entry.value

View File

@ -53,7 +53,7 @@ from oslo.config import cfg
from cinder.backup.driver import BackupDriver
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
@ -177,8 +177,8 @@ class CephBackupDriver(BackupDriver):
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
else:
LOG.info(_("RBD striping not supported - ignoring configuration "
"settings for rbd striping"))
LOG.info(_LI("RBD striping not supported - ignoring configuration "
"settings for rbd striping"))
self.rbd_stripe_count = 0
self.rbd_stripe_unit = 0
@ -432,15 +432,15 @@ class CephBackupDriver(BackupDriver):
snap, rem = self._delete_backup_snapshot(client, base_name,
backup_id)
if rem:
msg = (_("Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base "
"image delete.") %
msg = (_LI("Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base "
"image delete.") %
{'snapshots': rem, 'volume': volume_id})
LOG.info(msg)
return
LOG.info(_("Deleting backup base image='%(basename)s' of "
"volume %(volume)s.") %
LOG.info(_LI("Deleting backup base image='%(basename)s' of "
"volume %(volume)s.") %
{'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots
try:
@ -448,16 +448,17 @@ class CephBackupDriver(BackupDriver):
except self.rbd.ImageBusy as exc:
# Allow a retry if the image is busy
if retries > 0:
LOG.info(_("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
"in %(delay)ss.") %
LOG.info(_LI("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
"in %(delay)ss.") %
{'retries': retries,
'delay': delay,
'volume': volume_id})
eventlet.sleep(delay)
else:
LOG.error(_("Max retries reached deleting backup "
"%(basename)s image of volume %(volume)s.")
LOG.error(_LE("Max retries reached deleting backup "
"%(basename)s image of volume "
"%(volume)s.")
% {'volume': volume_id,
'basename': base_name})
raise exc
@ -491,7 +492,7 @@ class CephBackupDriver(BackupDriver):
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_("Pipe1 failed - %s ") % unicode(e))
LOG.error(_LE("Pipe1 failed - %s ") % unicode(e))
raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
@ -505,7 +506,7 @@ class CephBackupDriver(BackupDriver):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_("Pipe2 failed - %s ") % unicode(e))
LOG.error(_LE("Pipe2 failed - %s ") % unicode(e))
raise
p1.stdout.close()
@ -969,8 +970,8 @@ class CephBackupDriver(BackupDriver):
dest_user=rbd_user, dest_conf=rbd_conf,
src_snap=restore_point)
except exception.BackupRBDOperationFailed:
LOG.exception(_("Differential restore failed, trying full "
"restore"))
LOG.exception(_LE("Differential restore failed, trying full "
"restore"))
raise
# If the volume we are restoring to is larger than the backup volume,
@ -1082,8 +1083,9 @@ class CephBackupDriver(BackupDriver):
return True, restore_point
else:
LOG.info(_("No restore point found for backup='%(backup)s' of "
"volume %(volume)s - forcing full copy.") %
LOG.info(_LI("No restore point found for "
"backup='%(backup)s' of "
"volume %(volume)s - forcing full copy.") %
{'backup': backup['id'],
'volume': backup['volume_id']})
@ -1170,8 +1172,8 @@ class CephBackupDriver(BackupDriver):
LOG.debug('Restore to volume %s finished successfully.' %
volume_id)
except exception.BackupOperationError as e:
LOG.error(_('Restore to volume %(volume)s finished with error - '
'%(error)s.') % {'error': e, 'volume': volume_id})
LOG.error(_LE('Restore to volume %(volume)s finished with error - '
'%(error)s.') % {'error': e, 'volume': volume_id})
raise
def delete(self, backup):
@ -1182,8 +1184,8 @@ class CephBackupDriver(BackupDriver):
try:
self._try_delete_base_image(backup['id'], backup['volume_id'])
except self.rbd.ImageNotFound:
msg = (_("RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata.")
msg = (_LW("RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata.")
% {'backup': backup['id'], 'volume': backup['volume_id']})
LOG.warning(msg)
delete_failed = True
@ -1192,8 +1194,9 @@ class CephBackupDriver(BackupDriver):
VolumeMetadataBackup(client, backup['id']).remove_if_exists()
if delete_failed:
LOG.info(_("Delete of backup '%(backup)s' for volume '%(volume)s' "
"finished with warning.") %
LOG.info(_LI("Delete of backup '%(backup)s' "
"for volume '%(volume)s' "
"finished with warning.") %
{'backup': backup['id'], 'volume': backup['volume_id']})
else:
LOG.debug("Delete of backup '%(backup)s' for volume "

View File

@ -43,13 +43,12 @@ from swiftclient import client as swift
from cinder.backup.driver import BackupDriver
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
LOG = logging.getLogger(__name__)
swiftbackup_service_opts = [
@ -157,8 +156,8 @@ class SwiftBackupDriver(BackupDriver):
CONF.backup_swift_auth))
if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None:
LOG.error(_("single_user auth mode enabled, "
"but %(param)s not set")
LOG.error(_LE("single_user auth mode enabled, "
"but %(param)s not set")
% {'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user')
self.conn = swift.Connection(
@ -392,7 +391,7 @@ class SwiftBackupDriver(BackupDriver):
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.exception(
_("Backup volume metadata to swift failed: %s") %
_LE("Backup volume metadata to swift failed: %s") %
six.text_type(err))
self.delete(backup)
@ -448,8 +447,9 @@ class SwiftBackupDriver(BackupDriver):
try:
fileno = volume_file.fileno()
except IOError:
LOG.info("volume_file does not support fileno() so skipping "
"fsync()")
LOG.info(_LI("volume_file does not support "
"fileno() so skipping"
"fsync()"))
else:
os.fsync(fileno)
@ -514,8 +514,8 @@ class SwiftBackupDriver(BackupDriver):
try:
swift_object_names = self._generate_object_names(backup)
except Exception:
LOG.warn(_('swift error while listing objects, continuing'
' with delete'))
LOG.warn(_LW('swift error while listing objects, continuing'
' with delete'))
for swift_object_name in swift_object_names:
try:
@ -523,8 +523,9 @@ class SwiftBackupDriver(BackupDriver):
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
except Exception:
LOG.warn(_('swift error while deleting object %s, '
'continuing with delete') % swift_object_name)
LOG.warn(_LW('swift error while deleting object %s, '
'continuing with delete')
% swift_object_name)
else:
LOG.debug('deleted swift object: %(swift_object_name)s'
' in container: %(container)s' %

View File

@ -33,7 +33,7 @@ from oslo.config import cfg
from cinder.backup.driver import BackupDriver
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE, _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import utils
@ -249,9 +249,9 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to remove backup hardlink'
' from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.')
err = (_LE('backup: %(vol_id)s failed to remove backup hardlink'
' from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
@ -528,8 +528,8 @@ class TSMBackupDriver(BackupDriver):
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
err = (_('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s')
err = (_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': out,
'err': err})

View File

@ -40,7 +40,7 @@ from cinder.backup import driver
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
@ -115,7 +115,7 @@ class BackupManager(manager.SchedulerDependentManager):
LOG.debug("Got backend '%s'." % (backend))
return backend
LOG.info(_("Backend not found in hostname (%s) so using default.") %
LOG.info(_LI("Backend not found in hostname (%s) so using default.") %
(host))
if 'default' not in self.volume_managers:
@ -166,7 +166,7 @@ class BackupManager(manager.SchedulerDependentManager):
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):
LOG.info(_("Starting volume driver %(driver_name)s (%(version)s).") %
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s).") %
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
@ -192,19 +192,19 @@ class BackupManager(manager.SchedulerDependentManager):
for mgr in self.volume_managers.itervalues():
self._init_volume_driver(ctxt, mgr.driver)
LOG.info(_("Cleaning up incomplete backup operations."))
LOG.info(_LI("Cleaning up incomplete backup operations."))
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
for volume in volumes:
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
if volume['status'] == 'backing-up':
LOG.info(_('Resetting volume %s to available '
'(was backing-up).') % volume['id'])
LOG.info(_LI('Resetting volume %s to available '
'(was backing-up).') % volume['id'])
mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id'])
if volume['status'] == 'restoring-backup':
LOG.info(_('Resetting volume %s to error_restoring '
'(was restoring-backup).') % volume['id'])
LOG.info(_LI('Resetting volume %s to error_restoring '
'(was restoring-backup).') % volume['id'])
mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id'])
self.db.volume_update(ctxt, volume['id'],
@ -215,18 +215,19 @@ class BackupManager(manager.SchedulerDependentManager):
backups = self.db.backup_get_all_by_host(ctxt, self.host)
for backup in backups:
if backup['status'] == 'creating':
LOG.info(_('Resetting backup %s to error (was creating).')
LOG.info(_LI('Resetting backup %s to error (was creating).')
% backup['id'])
err = 'incomplete backup reset on manager restart'
self.db.backup_update(ctxt, backup['id'], {'status': 'error',
'fail_reason': err})
if backup['status'] == 'restoring':
LOG.info(_('Resetting backup %s to available (was restoring).')
LOG.info(_LI('Resetting backup %s to '
' available (was restoring).')
% backup['id'])
self.db.backup_update(ctxt, backup['id'],
{'status': 'available'})
if backup['status'] == 'deleting':
LOG.info(_('Resuming delete on backup: %s.') % backup['id'])
LOG.info(_LI('Resuming delete on backup: %s.') % backup['id'])
self.delete_backup(ctxt, backup['id'])
def create_backup(self, context, backup_id):
@ -234,8 +235,8 @@ class BackupManager(manager.SchedulerDependentManager):
backup = self.db.backup_get(context, backup_id)
volume_id = backup['volume_id']
volume = self.db.volume_get(context, volume_id)
LOG.info(_('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') %
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
@ -292,12 +293,12 @@ class BackupManager(manager.SchedulerDependentManager):
'size': volume['size'],
'availability_zone':
self.az})
LOG.info(_('Create backup finished. backup: %s.'), backup_id)
LOG.info(_LI('Create backup finished. backup: %s.'), backup_id)
def restore_backup(self, context, backup_id, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') %
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
backup = self.db.backup_get(context, backup_id)
@ -330,9 +331,9 @@ class BackupManager(manager.SchedulerDependentManager):
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info(_('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'),
LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'),
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
@ -372,8 +373,8 @@ class BackupManager(manager.SchedulerDependentManager):
self.db.volume_update(context, volume_id, {'status': 'available'})
self.db.backup_update(context, backup_id, {'status': 'available'})
LOG.info(_('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.') %
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
def delete_backup(self, context, backup_id):
@ -391,7 +392,7 @@ class BackupManager(manager.SchedulerDependentManager):
'fail_reason':
unicode(err)})
LOG.info(_('Delete backup started, backup: %s.'), backup_id)
LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id)
self.db.backup_update(context, backup_id, {'host': self.host})
@ -441,7 +442,7 @@ class BackupManager(manager.SchedulerDependentManager):
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting backup"))
LOG.exception(_LE("Failed to update usages deleting backup"))
context = context.elevated()
self.db.backup_destroy(context, backup_id)
@ -451,7 +452,7 @@ class BackupManager(manager.SchedulerDependentManager):
QUOTAS.commit(context, reservations,
project_id=backup['project_id'])
LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id)
def export_record(self, context, backup_id):
"""Export all volume backup metadata details to allow clean import.
@ -466,7 +467,7 @@ class BackupManager(manager.SchedulerDependentManager):
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
LOG.info(_('Export record started, backup: %s.'), backup_id)
LOG.info(_LI('Export record started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id)
@ -502,7 +503,7 @@ class BackupManager(manager.SchedulerDependentManager):
msg = unicode(err)
raise exception.InvalidBackup(reason=msg)
LOG.info(_('Export record finished, backup %s exported.'), backup_id)
LOG.info(_LI('Export record finished, backup %s exported.'), backup_id)
return backup_record
def import_record(self,
@ -521,7 +522,7 @@ class BackupManager(manager.SchedulerDependentManager):
:raises: InvalidBackup
:raises: ServiceNotFound
"""
LOG.info(_('Import record started, backup_url: %s.'), backup_url)
LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
# Can we import this backup?
if (backup_service != self.driver_name):
@ -588,11 +589,11 @@ class BackupManager(manager.SchedulerDependentManager):
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup_id)
else:
LOG.warn(_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping verify.') % {'service':
self.driver_name,
'id': backup_id})
LOG.warn(_LW('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping verify.') % {'service':
self.driver_name,
'id': backup_id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id,
@ -600,8 +601,8 @@ class BackupManager(manager.SchedulerDependentManager):
'fail_reason':
unicode(err)})
LOG.info(_('Import record id %s metadata from driver '
'finished.') % backup_id)
LOG.info(_LI('Import record id %s metadata from driver '
'finished.') % backup_id)
def reset_status(self, context, backup_id, status):
"""Reset volume backup status.
@ -613,8 +614,8 @@ class BackupManager(manager.SchedulerDependentManager):
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
LOG.info(_('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
{'backup_id': backup_id,
'status': status})
try:
@ -625,11 +626,11 @@ class BackupManager(manager.SchedulerDependentManager):
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_("Backup driver has not been initialized"))
LOG.exception(_LE("Backup driver has not been initialized"))
backup = self.db.backup_get(context, backup_id)
backup_service = self._map_service_to_driver(backup['service'])
LOG.info(_('Backup service: %s.'), backup_service)
LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
@ -695,4 +696,4 @@ class BackupManager(manager.SchedulerDependentManager):
notifier_info = {'id': backup_id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups" + '.reset_status.end',
notifier_info)
notifier_info)

View File

@ -16,7 +16,7 @@ from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
from cinder.i18n import _
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -47,7 +47,7 @@ def upgrade(migrate_engine):
try:
quota_classes.create()
except Exception:
LOG.error(_("Table |%s| not created!"), repr(quota_classes))
LOG.error(_LE("Table |%s| not created!"), repr(quota_classes))
raise
quota_usages = Table('quota_usages', meta,
@ -72,7 +72,7 @@ def upgrade(migrate_engine):
try:
quota_usages.create()
except Exception:
LOG.error(_("Table |%s| not created!"), repr(quota_usages))
LOG.error(_LE("Table |%s| not created!"), repr(quota_usages))
raise
reservations = Table('reservations', meta,
@ -103,7 +103,7 @@ def upgrade(migrate_engine):
try:
reservations.create()
except Exception:
LOG.error(_("Table |%s| not created!"), repr(reservations))
LOG.error(_LE("Table |%s| not created!"), repr(reservations))
raise
@ -125,25 +125,25 @@ def downgrade(migrate_engine):
fkey = ForeignKeyConstraint(**params)
fkey.drop()
except Exception:
LOG.error(_("Dropping foreign key reservations_ibfk_1 failed."))
LOG.error(_LE("Dropping foreign key reservations_ibfk_1 failed."))
quota_classes = Table('quota_classes', meta, autoload=True)
try:
quota_classes.drop()
except Exception:
LOG.error(_("quota_classes table not dropped"))
LOG.error(_LE("quota_classes table not dropped"))
raise
quota_usages = Table('quota_usages', meta, autoload=True)
try:
quota_usages.drop()
except Exception:
LOG.error(_("quota_usages table not dropped"))
LOG.error(_LE("quota_usages table not dropped"))
raise
reservations = Table('reservations', meta, autoload=True)
try:
reservations.drop()
except Exception:
LOG.error(_("reservations table not dropped"))
LOG.error(_LE("reservations table not dropped"))
raise

View File

@ -13,7 +13,7 @@
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
from cinder.i18n import _
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -29,7 +29,7 @@ def upgrade(migrate_engine):
try:
table.drop()
except Exception:
LOG.error(_("migrations table not dropped"))
LOG.error(_LE("migrations table not dropped"))
raise
@ -59,5 +59,5 @@ def downgrade(migrate_engine):
try:
table.create()
except Exception:
LOG.error(_("Table |%s| not created"), repr(table))
LOG.error(_LE("Table |%s| not created"), repr(table))
raise

View File

@ -17,7 +17,7 @@ import datetime
from oslo.config import cfg
from sqlalchemy import MetaData, Table
from cinder.i18n import _
from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging
# Get default values via config. The defaults will either
@ -47,8 +47,8 @@ def upgrade(migrate_engine):
# Do not add entries if there are already 'default' entries. We don't
# want to write over something the user added.
if rows:
LOG.info(_("Found existing 'default' entries in the quota_classes "
"table. Skipping insertion of default values."))
LOG.info(_LI("Found existing 'default' entries in the quota_classes "
"table. Skipping insertion of default values."))
return
try:
@ -71,9 +71,9 @@ def upgrade(migrate_engine):
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False, })
LOG.info(_("Added default quota class data into the DB."))
LOG.info(_LI("Added default quota class data into the DB."))
except Exception:
LOG.error(_("Default quota class data not inserted into the DB."))
LOG.error(_LE("Default quota class data not inserted into the DB."))
raise