Only raw string literals should be used with _()

Fix a number of places where formatted strings were used with _() (causing
gettext to not match the string) or variables with _() (causing xgettext
to not extract a string)

Also, there's no value in internationalizing an empty string

Change-Id: Iac7dbe46eeaa8ddf03c2a357ecd52f69aa8678aa
This commit is contained in:
Johannes Erdfelt 2012-03-04 19:06:31 +00:00
parent 8813ab185d
commit 534a894ad1
32 changed files with 114 additions and 113 deletions

View File

@ -611,8 +611,8 @@ class CloudController(object):
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.EC2APIError(_(err))
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
self.compute_api.ensure_default_security_group(context)
notfound = exception.SecurityGroupNotFound
if group_name:
@ -626,8 +626,8 @@ class CloudController(object):
if not security_group:
raise notfound(security_group_id=group_id)
msg = "Revoke security group ingress %s"
LOG.audit(_(msg), security_group['name'], context=context)
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
prevalues = []
try:
prevalues = kwargs['ip_permissions']
@ -638,8 +638,8 @@ class CloudController(object):
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
if not rulesvalues:
err = "%s Not enough parameters to build a valid rule"
raise exception.EC2APIError(_(err % rulesvalues))
err = _("%s Not enough parameters to build a valid rule")
raise exception.EC2APIError(err % rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
@ -665,8 +665,8 @@ class CloudController(object):
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.EC2APIError(_(err))
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
self.compute_api.ensure_default_security_group(context)
notfound = exception.SecurityGroupNotFound
if group_name:
@ -680,8 +680,8 @@ class CloudController(object):
if not security_group:
raise notfound(security_group_id=group_id)
msg = "Authorize security group ingress %s"
LOG.audit(_(msg), security_group['name'], context=context)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
prevalues = []
try:
prevalues = kwargs['ip_permissions']
@ -691,14 +691,14 @@ class CloudController(object):
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
if not rulesvalues:
err = "%s Not enough parameters to build a valid rule"
raise exception.EC2APIError(_(err % rulesvalues))
err = _("%s Not enough parameters to build a valid rule")
raise exception.EC2APIError(err % rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group,
values_for_rule):
err = '%s - This rule already exists in group'
raise exception.EC2APIError(_(err) % values_for_rule)
err = _('%s - This rule already exists in group')
raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule)
rule_ids = []
@ -772,8 +772,8 @@ class CloudController(object):
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.EC2APIError(_(err))
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,

View File

@ -41,7 +41,7 @@ def disk_config_from_api(value):
elif value == 'MANUAL':
return False
else:
msg = _("%s must be either 'MANUAL' or 'AUTO'." % API_DISK_CONFIG)
msg = _("%s must be either 'MANUAL' or 'AUTO'.") % API_DISK_CONFIG
raise exc.HTTPBadRequest(explanation=msg)

View File

@ -67,7 +67,7 @@ class NetworkController(object):
def _disassociate(self, request, network_id, body):
context = request.environ['nova.context']
authorize(context)
LOG.debug(_("Disassociating network with id %s" % network_id))
LOG.debug(_("Disassociating network with id %s") % network_id)
try:
self.network_api.disassociate(context, network_id)
except exception.NetworkNotFound:

View File

@ -137,9 +137,9 @@ class Limit(object):
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
self.error_message = _("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s." %
self.__dict__)
msg = _("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.")
self.error_message = msg % self.__dict__
def __call__(self, verb, url):
"""

View File

@ -886,7 +886,7 @@ class Resource(wsgi.Application):
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError, e:
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
msg = _("%(url)s returned a fault: %(e)s") % msg_dict
LOG.info(msg)

View File

@ -137,8 +137,10 @@ def validate(args, validator):
assert callable(f)
if not f(args[key]):
msg = "%s with value %s failed validator %s" % (
key, args[key], f.__name__)
LOG.debug(_(msg))
value = args[key]
validator = f.__name__
msg = _("%(key)s with value %(value)s failed validator"
" %(validator)s")
LOG.debug(msg % locals())
return False
return True

View File

@ -989,17 +989,17 @@ class ComputeManager(manager.SchedulerDependentManager):
images = fetch_images()
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
% locals()))
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)")
% locals())
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups" % excess))
LOG.debug(_("Rotating out %d backups") % excess)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %s" % image_id))
LOG.debug(_("Deleting image %s") % image_id)
image_service.delete(context, image_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())

View File

@ -562,8 +562,8 @@ def compute_node_utilization_update(context, host, free_ram_mb_delta=0,
with_lockmode('update').\
first()
if compute_node is None:
raise exception.NotFound(_("No ComputeNode for %(host)s" %
locals()))
raise exception.NotFound(_("No ComputeNode for %(host)s") %
locals())
# This table thingy is how we get atomic UPDATE x = x + 1
# semantics.
@ -597,8 +597,8 @@ def compute_node_utilization_set(context, host, free_ram_mb=None,
with_lockmode('update').\
first()
if compute_node is None:
raise exception.NotFound(_("No ComputeNode for %(host)s" %
locals()))
raise exception.NotFound(_("No ComputeNode for %(host)s") %
locals())
if free_ram_mb != None:
compute_node.free_ram_mb = free_ram_mb

View File

@ -270,7 +270,7 @@ class FloatingIP(object):
self.l3driver.add_floating_ip(floating_ip['address'],
fixed_address, floating_ip['interface'])
except exception.ProcessExecutionError:
msg = _('Interface %(interface)s not found' % locals())
msg = _('Interface %(interface)s not found') % locals()
LOG.debug(msg)
raise exception.NoFloatingIpInterface(interface=interface)
@ -468,7 +468,7 @@ class FloatingIP(object):
fixed_address = self.db.floating_ip_disassociate(context,
floating_address)
if "Cannot find device" in str(e):
msg = _('Interface %(interface)s not found' % locals())
msg = _('Interface %(interface)s not found') % locals()
LOG.error(msg)
raise exception.NoFloatingIpInterface(interface=interface)
@ -1384,7 +1384,7 @@ class NetworkManager(manager.SchedulerDependentManager):
if require_disassociated and network.project_id is not None:
raise ValueError(_('Network must be disassociated from project %s'
' before delete' % network.project_id))
' before delete') % network.project_id)
db.network_delete_safe(context, network.id)
@property

View File

@ -177,8 +177,8 @@ class Client(object):
if self.logger:
self.logger.debug(
_("Quantum Client Request: %(method)s %(action)s" %
locals()))
_("Quantum Client Request: %(method)s %(action)s") %
locals())
if body:
self.logger.debug(body)
@ -193,7 +193,7 @@ class Client(object):
if status_code in NOT_FOUND_CODES:
raise QuantumNotFoundException(
_("Quantum entity not found: %s" % data))
_("Quantum entity not found: %s") % data)
if status_code in (httplib.OK,
httplib.CREATED,
@ -203,12 +203,12 @@ class Client(object):
return self.deserialize(data, status_code)
else:
raise QuantumServerException(
_("Server %(status_code)s error: %(data)s"
% locals()))
_("Server %(status_code)s error: %(data)s")
% locals())
except (socket.error, IOError), e:
raise QuantumIOException(_("Unable to connect to "
"server. Got error: %s" % e))
"server. Got error: %s") % e)
def get_status_code(self, response):
"""Returns the integer status code from the response, which
@ -225,8 +225,8 @@ class Client(object):
elif isinstance(data, dict):
return JSONSerializer().serialize(data, self.content_type())
else:
raise Exception(_("unable to deserialize object of type = '%s'" %
type(data)))
raise Exception(_("unable to deserialize object of type = '%s'") %
type(data))
def deserialize(self, data, status_code):
return JSONSerializer().deserialize(data, self.content_type())

View File

@ -203,8 +203,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
if not self.q_conn.network_exists(q_tenant_id, quantum_net_id):
raise Exception(_("Unable to find existing quantum "
"network for tenant '%(q_tenant_id)s' "
"with net-id '%(quantum_net_id)s'" %
locals()))
"with net-id '%(quantum_net_id)s'") %
locals())
else:
nova_id = self._get_nova_id()
quantum_net_id = self.q_conn.create_network(q_tenant_id, label,

View File

@ -92,7 +92,7 @@ class MelangeConnection(object):
response_str = response.read()
if response.status < 400:
return response_str
raise Exception(_("Server returned error: %s" % response_str))
raise Exception(_("Server returned error: %s") % response_str)
except (socket.error, IOError), e:
LOG.exception(_('Connection error contacting melange'
' service, retrying'))

View File

@ -87,7 +87,7 @@ class QuantumNovaIPAMLib(object):
admin_context = context.elevated()
network = db.network_get_by_uuid(admin_context, net_id)
if not network:
raise Exception(_("No network with net_id = %s" % net_id))
raise Exception(_("No network with net_id = %s") % net_id)
manager.FlatManager.delete_network(self.net_manager,
admin_context, None,
network['uuid'],
@ -218,8 +218,8 @@ class QuantumNovaIPAMLib(object):
{'allocated': False,
'virtual_interface_id': None})
if len(fixed_ips) == 0:
LOG.error(_('No fixed IPs to deallocate for vif %s' %
vif_ref['id']))
LOG.error(_('No fixed IPs to deallocate for vif %s') %
vif_ref['id'])
def get_allocated_ips(self, context, subnet_id, project_id):
"""Returns a list of (ip, vif_id) pairs"""

View File

@ -97,7 +97,7 @@ class QuantumClientConnection(object):
vNIC with the specified interface-id.
"""
LOG.debug(_("Connecting interface %(interface_id)s to "
"net %(net_id)s for %(tenant_id)s" % locals()))
"net %(net_id)s for %(tenant_id)s") % locals())
port_data = {'port': {'state': 'ACTIVE'}}
for kw in kwargs:
port_data['port'][kw] = kwargs[kw]
@ -111,7 +111,7 @@ class QuantumClientConnection(object):
def detach_and_delete_port(self, tenant_id, net_id, port_id):
"""Detach and delete the specified Quantum port."""
LOG.debug(_("Deleting port %(port_id)s on net %(net_id)s"
" for %(tenant_id)s" % locals()))
" for %(tenant_id)s") % locals())
self.client.detach_resource(net_id, port_id, tenant=tenant_id)
self.client.delete_port(net_id, port_id, tenant=tenant_id)

View File

@ -112,7 +112,7 @@ def notify(publisher_id, event_type, priority, payload):
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities' % priority))
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = utils.to_primitive(payload, convert_instances=True)
@ -128,5 +128,5 @@ def notify(publisher_id, event_type, priority, payload):
driver.notify(msg)
except Exception, e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. Payload=%(payload)s" %
locals()))
"send to notification system. Payload=%(payload)s") %
locals())

View File

@ -223,7 +223,7 @@ class Consumer(messaging.Consumer):
# persistent failure occurs.
except Exception, e: # pylint: disable=W0703
if not self.failed_connection:
LOG.exception(_('Failed to fetch message from queue: %s' % e))
LOG.exception(_('Failed to fetch message from queue: %s') % e)
self.failed_connection = True

View File

@ -425,7 +425,7 @@ class Connection(object):
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on '
'%(hostname)s:%(port)d' % self.params))
'%(hostname)s:%(port)d') % self.params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.

View File

@ -337,12 +337,12 @@ class Connection(object):
try:
self.connection.open()
except qpid.messaging.exceptions.ConnectionError, e:
LOG.error(_('Unable to connect to AMQP server: %s ' % str(e)))
LOG.error(_('Unable to connect to AMQP server: %s ') % e)
time.sleep(FLAGS.qpid_reconnect_interval or 1)
else:
break
LOG.info(_('Connected to AMQP server on %s' % self.broker))
LOG.info(_('Connected to AMQP server on %s') % self.broker)
self.session = self.connection.session()

View File

@ -48,7 +48,7 @@ class DistributedScheduler(driver.Scheduler):
NOTE: We're only focused on compute instances right now,
so this method will always raise NoValidHost()."""
msg = _("No host selection for %s defined." % topic)
msg = _("No host selection for %s defined.") % topic
raise exception.NoValidHost(reason=msg)
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
@ -72,7 +72,7 @@ class DistributedScheduler(driver.Scheduler):
*args, **kwargs)
if not weighted_hosts:
raise exception.NoValidHost(reason=_(""))
raise exception.NoValidHost(reason="")
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
@ -106,7 +106,7 @@ class DistributedScheduler(driver.Scheduler):
hosts = self._schedule(context, 'compute', request_spec,
*args, **kwargs)
if not hosts:
raise exception.NoValidHost(reason=_(""))
raise exception.NoValidHost(reason="")
host = hosts.pop(0)
# NOTE(comstud): Make sure we do not pass this through. It

View File

@ -64,7 +64,7 @@ class SchedulerOptions(object):
return os.path.getmtime(filename)
except os.error, e:
LOG.exception(_("Could not stat scheduler options file "
"%(filename)s: '%(e)s'", locals()))
"%(filename)s: '%(e)s'"), locals())
raise
def _load_file(self, handle):

View File

@ -181,7 +181,7 @@ class VsaScheduler(simple.SimpleScheduler):
selected_hosts,
unique)
if host is None:
raise exception.NoValidHost(reason=_(""))
raise exception.NoValidHost(reason="")
return (host, qos_cap)

View File

@ -93,7 +93,7 @@ def fake_execute(*cmd_parts, **kwargs):
run_as_root=run_as_root,
check_exit_code=check_exit_code)
except exception.ProcessExecutionError as e:
LOG.debug(_('Faked command raised an exception %s' % str(e)))
LOG.debug(_('Faked command raised an exception %s') % e)
raise
stdout = reply[0]

View File

@ -633,8 +633,8 @@ class XenAPIVMTestCase(test.TestCase):
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent' %
self._tmpdir))
LOG.debug(_('Creating files in %s to simulate guest agent') %
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
@ -644,8 +644,8 @@ class XenAPIVMTestCase(test.TestCase):
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s' %
self._tmpdir))
LOG.debug(_('Removing simulated guest agent files in %s') %
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))

View File

@ -830,23 +830,23 @@ def synchronized(name, external=False):
_semaphores[name] = semaphore.Semaphore()
sem = _semaphores[name]
LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
'"%(method)s"...' % {'lock': name,
'method': f.__name__}))
'"%(method)s"...') % {'lock': name,
'method': f.__name__})
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...' % {'lock': name,
'method': f.__name__}))
'"%(method)s"...') % {'lock': name,
'method': f.__name__})
if external and not FLAGS.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...' %
{'lock': name, 'method': f.__name__}))
'method "%(method)s"...') %
{'lock': name, 'method': f.__name__})
lock_file_path = os.path.join(FLAGS.lock_path,
'nova-%s' % name)
lock = lockfile.FileLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" for '
'method "%(method)s"...' %
{'lock': name, 'method': f.__name__}))
'method "%(method)s"...') %
{'lock': name, 'method': f.__name__})
retval = f(*args, **kwargs)
else:
retval = f(*args, **kwargs)
@ -903,15 +903,15 @@ def cleanup_file_locks():
if match is None:
continue
pid = match.group(1)
LOG.debug(_('Found sentinel %(filename)s for pid %(pid)s' %
{'filename': filename, 'pid': pid}))
LOG.debug(_('Found sentinel %(filename)s for pid %(pid)s') %
{'filename': filename, 'pid': pid})
try:
os.kill(int(pid), 0)
except OSError, e:
# PID wasn't found
delete_if_exists(os.path.join(FLAGS.lock_path, filename))
LOG.debug(_('Cleaned sentinel %(filename)s for pid %(pid)s' %
{'filename': filename, 'pid': pid}))
LOG.debug(_('Cleaned sentinel %(filename)s for pid %(pid)s') %
{'filename': filename, 'pid': pid})
# cleanup lock files
for filename in files:
@ -925,13 +925,13 @@ def cleanup_file_locks():
continue
else:
raise
msg = _('Found lockfile %(file)s with link count %(count)d' %
{'file': filename, 'count': stat_info.st_nlink})
msg = (_('Found lockfile %(file)s with link count %(count)d') %
{'file': filename, 'count': stat_info.st_nlink})
LOG.debug(msg)
if stat_info.st_nlink == 1:
delete_if_exists(os.path.join(FLAGS.lock_path, filename))
msg = _('Cleaned lockfile %(file)s with link count %(count)d' %
{'file': filename, 'count': stat_info.st_nlink})
msg = (_('Cleaned lockfile %(file)s with link count %(count)d') %
{'file': filename, 'count': stat_info.st_nlink})
LOG.debug(msg)
@ -1071,7 +1071,7 @@ def parse_server_string(server_str):
return (address, port)
except Exception:
LOG.debug(_('Invalid server_string: %s' % server_str))
LOG.debug(_('Invalid server_string: %s') % server_str)
return ('', '')

View File

@ -112,7 +112,7 @@ class BareMetalDom(object):
self.domains.remove(dom)
continue
LOG.debug(_(self.domains))
LOG.debug(self.domains)
self.store_domain()
def reboot_domain(self, name):

View File

@ -253,7 +253,7 @@ class ProxyConnection(driver.ComputeDriver):
network_info=network_info,
block_device_info=block_device_info)
LOG.debug(_("instance %s: is building"), instance['name'])
LOG.debug(_(xml_dict))
LOG.debug(xml_dict)
def _wait_for_boot():
try:
@ -471,8 +471,8 @@ class ProxyConnection(driver.ComputeDriver):
for injection in ('metadata', 'key', 'net'):
if locals()[injection]:
LOG.info(_('instance %(inst_name)s: injecting '
'%(injection)s into image %(img_id)s'
% locals()))
'%(injection)s into image %(img_id)s')
% locals())
try:
disk.inject_data(injection_path, key, net, metadata,
partition=target_partition,

View File

@ -272,14 +272,13 @@ class BareMetalNodes(object):
out_msg = file.readline().find("Unreachable")
utils.execute('sudo', 'rm', tile_output)
if out_msg == -1:
cmd = ("TILERA_BOARD_#" + str(node_id) + " " + node_ip +
" is ready")
LOG.debug(_(cmd))
cmd = _("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready")
LOG.debug(cmd % locals())
return True
else:
cmd = ("TILERA_BOARD_#" + str(node_id) + " " +
node_ip + " is not ready, out_msg=" + out_msg)
LOG.debug(_(cmd))
cmd = _("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
" out_msg=%(out_msg)s")
LOG.debug(cmd % local())
self.power_mgr(node_id, 2)
return False
@ -290,8 +289,8 @@ class BareMetalNodes(object):
From basepath to /tftpboot, kernel is set based on the given mode
such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
"""
cmd = "Noting to do for tilera nodes: vmlinux is in CF"
LOG.debug(_(cmd))
cmd = _("Noting to do for tilera nodes: vmlinux is in CF")
LOG.debug(cmd)
def sleep_mgr(self, time_in_seconds):
"""

View File

@ -162,7 +162,7 @@ class _DiskImage(object):
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
return cls
raise exception.Error(_("unknown disk image handler: %s" % mode))
raise exception.Error(_("unknown disk image handler: %s") % mode)
def mount(self):
"""Mount a disk image, using the object attributes.

View File

@ -1232,8 +1232,8 @@ class LibvirtConnection(driver.ComputeDriver):
for injection in ('metadata', 'key', 'net', 'admin_password'):
if locals()[injection]:
LOG.info(_('Injecting %(injection)s into image %(img_id)s'
% locals()), instance=instance)
LOG.info(_('Injecting %(injection)s into image %(img_id)s')
% locals(), instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_password,
@ -1810,7 +1810,7 @@ class LibvirtConnection(driver.ComputeDriver):
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
dic = utils.loads(cpu_info)
xml = str(Template(self.cpuinfo_xml, searchList=dic))
LOG.info(_('to xml...\n:%s ' % xml))
LOG.info(_('to xml...\n:%s ') % xml)
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")

View File

@ -728,7 +728,7 @@ class VMHelper(HelperBase):
vdis = json.loads(result)
for vdi in vdis:
LOG.debug(_("xapi 'download_vhd' returned VDI of "
"type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi))
"type '%(vdi_type)s' with UUID '%(vdi_uuid)s'") % vdi)
cls.scan_sr(session, instance, sr_ref)
@ -756,7 +756,7 @@ class VMHelper(HelperBase):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d' % locals()))
'%(vdi_size_bytes)d') % locals())
size_bytes += vdi_size_bytes
return size_bytes

View File

@ -1269,8 +1269,8 @@ class VMOps(object):
rescue_vm_ref = VMHelper.lookup(self._session,
"%s-rescue" % instance.name)
if rescue_vm_ref:
raise RuntimeError(_(
"Instance is already in Rescue Mode: %s" % instance.name))
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance.name)
vm_ref = VMHelper.lookup(self._session, instance.name)
self._shutdown(instance, vm_ref)

View File

@ -694,7 +694,7 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
cluster_password))[:-1]
header['Authorization'] = 'Basic %s' % auth_key
LOG.debug(_("Payload for SolidFire API call: %s" % payload))
LOG.debug(_("Payload for SolidFire API call: %s") % payload)
connection = httplib.HTTPSConnection(host, port)
connection.request('POST', '/json-rpc/1.0', payload, header)
response = connection.getresponse()
@ -711,12 +711,12 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
except (TypeError, ValueError), exc:
connection.close()
msg = _("Call to json.loads() raised an exception: %s" % exc)
msg = _("Call to json.loads() raised an exception: %s") % exc
raise exception.SfJsonEncodeFailure(msg)
connection.close()
LOG.debug(_("Results of SolidFire API call: %s" % data))
LOG.debug(_("Results of SolidFire API call: %s") % data)
return data
def _get_volumes_by_sfaccount(self, account_id):
@ -730,7 +730,7 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
params = {'username': sf_account_name}
data = self._issue_api_request('GetAccountByName', params)
if 'result' in data and 'account' in data['result']:
LOG.debug(_('Found solidfire account: %s' % sf_account_name))
LOG.debug(_('Found solidfire account: %s') % sf_account_name)
sfaccount = data['result']['account']
return sfaccount
@ -744,8 +744,8 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
sf_account_name = socket.gethostname() + '-' + nova_project_id
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
LOG.debug(_('solidfire account: %s does not exist, create it...'
% sf_account_name))
LOG.debug(_('solidfire account: %s does not exist, create it...')
% sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
@ -878,7 +878,7 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
volid = v['volumeID']
if found_count != 1:
LOG.debug(_("Deleting volumeID: %s " % volid))
LOG.debug(_("Deleting volumeID: %s ") % volid)
raise exception.DuplicateSfVolumeNames(vol_name=volume['name'])
params = {'volumeID': volid}