Add a hacking rule for string interpolation at logging

String interpolation should be delayed to be handled
by the logging code, rather than being done
at the point of the logging call.
So add the following hacking rule for it.

- [N354] String interpolation should be delayed at logging calls.

See the oslo i18n guideline.

* http://docs.openstack.org/developer/oslo.i18n/guidelines.html

Change-Id: Ief6d3ee3539c0857098fffdb7acfeec3e0fed6eb
Closes-Bug: #1596829
This commit is contained in:
Takashi NATSUME 2016-06-28 16:42:31 +09:00
parent 3d7ff766a7
commit 4eb89c206e
24 changed files with 134 additions and 56 deletions

View File

@ -64,6 +64,7 @@ Nova Specific Commandments
- [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method.
- [N352] LOG.warn is deprecated. Enforce use of LOG.warning.
- [N353] Validate that context objects is not passed in logging calls.
- [N354] String interpolation should be delayed at logging calls.
Creating Unit Tests
-------------------

View File

@ -406,7 +406,7 @@ class InstanceMetadata(object):
else:
LOG.debug('Metadata for device with unknown bus %s '
'has not been included in the '
'output' % device.bus.__class__.__name__)
'output', device.bus.__class__.__name__)
continue
if 'address' in device.bus:
address = device.bus.address
@ -424,7 +424,7 @@ class InstanceMetadata(object):
else:
LOG.debug('Metadata for device of unknown type %s has not '
'been included in the '
'output' % device.__class__.__name__)
'output', device.__class__.__name__)
continue
device_metadata['bus'] = bus

View File

@ -1940,7 +1940,7 @@ class API(base.Base):
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _LW("Ignoring volume cleanup failure due to %s")
LOG.warning(err_str % exc, instance=instance)
LOG.warning(err_str, exc, instance=instance)
bdm.destroy()
def _local_delete(self, context, instance, bdms, delete_type, cb):

View File

@ -5177,7 +5177,7 @@ class ComputeManager(manager.Manager):
network_info,
disk,
migrate_data)
LOG.debug('driver pre_live_migration data is %s' % migrate_data)
LOG.debug('driver pre_live_migration data is %s', migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
@ -6171,9 +6171,9 @@ class ComputeManager(manager.Manager):
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s' % uuid)
LOG.debug('Sync already in progress for %s', uuid)
else:
LOG.debug('Triggering sync for uuid %s' % uuid)
LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)

View File

@ -105,6 +105,9 @@ doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
log_remove_context = re.compile(
r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
log_string_interpolation = re.compile(r".*LOG\.(error|warning|info"
r"|critical|exception|debug)"
r"\([^,]*%[^,]*[,)]")
class BaseASTChecker(ast.NodeVisitor):
@ -794,6 +797,28 @@ def check_context_log(logical_line, physical_line, filename):
"kwarg.")
def check_delayed_string_interpolation(logical_line, physical_line, filename):
"""Check whether string interpolation is delayed at logging calls
Not correct: LOG.debug('Example: %s' % 'bad')
Correct: LOG.debug('Example: %s', 'good')
N354
"""
if "nova/tests" in filename:
return
if pep8.noqa(physical_line):
return
if log_string_interpolation.match(logical_line):
yield(logical_line.index('%'),
"N354: String interpolation should be delayed to be "
"handled by the logging code, rather than being done "
"at the point of the logging call. "
"Use ',' instead of '%'.")
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
@ -834,3 +859,4 @@ def factory(register):
register(no_log_warn)
register(CheckForUncalledTestClosure)
register(check_context_log)
register(check_delayed_string_interpolation)

View File

@ -833,7 +833,7 @@ class NetworkManager(manager.Manager):
if address:
LOG.debug('Associating instance with specified fixed IP '
'%(address)s in network %(network)s on subnet '
'%(cidr)s.' %
'%(cidr)s.',
{'address': address, 'network': network['id'],
'cidr': network['cidr']},
instance=instance)
@ -842,7 +842,7 @@ class NetworkManager(manager.Manager):
vif_id=vif.id)
else:
LOG.debug('Associating instance with fixed IP from pool '
'in network %(network)s on subnet %(cidr)s.' %
'in network %(network)s on subnet %(cidr)s.',
{'network': network['id'],
'cidr': network['cidr']},
instance=instance)
@ -879,7 +879,7 @@ class NetworkManager(manager.Manager):
self.instance_dns_manager.delete_entry,
instance_id, self.instance_dns_domain))
LOG.debug('Setting up network %(network)s on host %(host)s.' %
LOG.debug('Setting up network %(network)s on host %(host)s.',
{'network': network['id'], 'host': self.host},
instance=instance)
self._setup_network_on_host(context, network)

View File

@ -2352,9 +2352,9 @@ class API(base_api.NetworkAPI):
if old_pci_devices and new_pci_devices:
LOG.debug("Determining PCI devices mapping using migration"
"context: old_pci_devices: %(old)s, "
"new_pci_devices: %(new)s" %
"new_pci_devices: %(new)s",
{'old': [dev for dev in old_pci_devices],
'new': [dev for dev in new_pci_devices]})
'new': [dev for dev in new_pci_devices]})
return {old.address: new
for old in old_pci_devices
for new in new_pci_devices

View File

@ -65,7 +65,7 @@ class BuildRequest(base.NovaObject):
jsonutils.loads(db_instance))
except TypeError:
LOG.debug('Failed to load instance from BuildRequest with uuid '
'%s because it is None' % (self.instance_uuid))
'%s because it is None', self.instance_uuid)
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
except ovoo_exc.IncompatibleObjectVersion as exc:
# This should only happen if proper service upgrade strategies are

View File

@ -170,7 +170,7 @@ class LibvirtLiveMigrateData(LiveMigrateData):
self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s' % self)
LOG.debug('Converting to legacy: %s', self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'})
@ -193,11 +193,11 @@ class LibvirtLiveMigrateData(LiveMigrateData):
legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s' % legacy)
LOG.debug('Legacy result: %s', legacy)
return legacy
def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s' % legacy)
LOG.debug('Converting legacy dict to obj: %s', legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}:
@ -213,7 +213,7 @@ class LibvirtLiveMigrateData(LiveMigrateData):
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s' % self)
LOG.debug('Converted object: %s', self)
def is_on_shared_storage(self):
return self.is_shared_block_storage or self.is_shared_instance_path

View File

@ -337,8 +337,8 @@ class SchedulerReportClient(object):
new_gen = updated_inventories_result['resource_provider_generation']
self._resource_providers[compute_node.uuid].generation = new_gen
LOG.debug('Updated inventory for %s at generation %i' % (
compute_node.uuid, new_gen))
LOG.debug('Updated inventory for %s at generation %i',
compute_node.uuid, new_gen)
return True
@safe_connect

View File

@ -51,8 +51,8 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
prop = image_props.get(key)
except AttributeError:
LOG.warning(_LW("Host '%(host)s' has a metadata key '%(key)s' "
"that is not present in the image metadata.") %
{"host": host_state.host, "key": key})
"that is not present in the image metadata."),
{"host": host_state.host, "key": key})
continue
# NOTE(sbauza): Aggregate metadata is only strings, we need to

View File

@ -494,7 +494,7 @@ class HostManager(object):
forced_hosts_str = ', '.join(hosts_to_force)
msg = _LI("No hosts matched due to not matching "
"'force_hosts' value of '%s'")
LOG.info(msg % forced_hosts_str)
LOG.info(msg, forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = []
@ -510,7 +510,7 @@ class HostManager(object):
forced_nodes_str = ', '.join(nodes_to_force)
msg = _LI("No nodes matched due to not matching "
"'force_nodes' value of '%s'")
LOG.info(msg % forced_nodes_str)
LOG.info(msg, forced_nodes_str)
def _get_hosts_matching_request(hosts, requested_destination):
(host, node) = (requested_destination.host,
@ -519,14 +519,14 @@ class HostManager(object):
if x.host == host and x.nodename == node]
if requested_nodes:
LOG.info(_LI('Host filter only checking host %(host)s and '
'node %(node)s') % {'host': host, 'node': node})
'node %(node)s'), {'host': host, 'node': node})
else:
# NOTE(sbauza): The API level should prevent the user from
# providing a wrong destination but let's make sure a wrong
# destination doesn't trample the scheduler still.
LOG.info(_LI('No hosts matched due to not matching requested '
'destination (%(host)s, %(node)s)'
) % {'host': host, 'node': node})
'destination (%(host)s, %(node)s)'),
{'host': host, 'node': node})
return iter(requested_nodes)
ignore_hosts = spec_obj.ignore_hosts or []

View File

@ -63,7 +63,7 @@ class MemcachedDriver(base.Driver):
key = "%(topic)s:%(host)s" % service_ref
is_up = self.mc.get(str(key)) is not None
if not is_up:
LOG.debug('Seems service %s is down' % key)
LOG.debug('Seems service %s is down', key)
return is_up

View File

@ -788,3 +788,54 @@ class HackingTestCase(test.NoDBTestCase):
instance=instance)
"""
self._assert_has_no_errors(code, checks.check_context_log)
def test_check_delayed_string_interpolation(self):
checker = checks.check_delayed_string_interpolation
code = """
msg_w = _LW('Test string (%s)')
msg_i = _LI('Test string (%s)')
value = 'test'
LOG.error(_LE("Test string (%s)") % value)
LOG.warning(msg_w % 'test%string')
LOG.info(msg_i %
"test%string%info")
LOG.critical(
_LC('Test string (%s)') % value,
instance=instance)
LOG.exception(_LE(" 'Test quotation %s' \"Test\"") % 'test')
LOG.debug(' "Test quotation %s" \'Test\'' % "test")
LOG.debug('Tesing %(test)s' %
{'test': ','.join(
['%s=%s' % (name, value)
for name, value in test.items()])})
"""
expected_errors = [(5, 34, 'N354'), (6, 18, 'N354'), (7, 15, 'N354'),
(10, 28, 'N354'), (12, 49, 'N354'),
(13, 40, 'N354'), (14, 28, 'N354')]
self._assert_has_errors(code, checker, expected_errors=expected_errors)
self._assert_has_no_errors(code, checker,
filename='nova/tests/unit/test_hacking.py')
code = """
msg_w = _LW('Test string (%s)')
msg_i = _LI('Test string (%s)')
value = 'test'
LOG.error(_LE("Test string (%s)"), value)
LOG.error(_LE("Test string (%s)") % value) # noqa
LOG.warning(msg_w, 'test%string')
LOG.info(msg_i,
"test%string%info")
LOG.critical(
_LC('Test string (%s)'), value,
instance=instance)
LOG.exception(_LE(" 'Test quotation %s' \"Test\""), 'test')
LOG.debug(' "Test quotation %s" \'Test\'', "test")
LOG.debug('Tesing %(test)s',
{'test': ','.join(
['%s=%s' % (name, value)
for name, value in test.items()])})
"""
self._assert_has_no_errors(code, checker)

View File

@ -462,7 +462,7 @@ def teardown_container(container_dir, container_root_device=None):
utils.execute('qemu-nbd', '-d', container_root_device,
run_as_root=True)
else:
LOG.debug('No release necessary for block device %s' %
LOG.debug('No release necessary for block device %s',
container_root_device)
except Exception:
LOG.exception(_LE('Failed to teardown container filesystem'))

View File

@ -762,7 +762,7 @@ class Host(object):
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
LOG.debug('Secret XML: %s', xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:

View File

@ -267,9 +267,9 @@ class Image(object):
if size < base_size:
msg = _LE('%(base)s virtual size %(base_size)s '
'larger than flavor root disk size %(size)s')
LOG.error(msg % {'base': base,
'base_size': base_size,
'size': size})
LOG.error(msg, {'base': base,
'base_size': base_size,
'size': size})
raise exception.FlavorDiskSmallerThanImage(
flavor_size=size, image_size=base_size)
@ -871,7 +871,7 @@ class Rbd(Image):
include_locations=True)
locations = image_meta['locations']
LOG.debug('Image locations are: %(locs)s' % {'locs': locations})
LOG.debug('Image locations are: %(locs)s', {'locs': locations})
if image_meta.get('disk_format') not in ['raw', 'iso']:
reason = _('Image is not raw format')

View File

@ -220,7 +220,7 @@ class RBDDriver(object):
try:
return self.exists(image, pool=pool, snapshot=snapshot)
except rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s' %
LOG.debug('Unable to open image %(loc)s: %(err)s',
dict(loc=url, err=e))
return False

View File

@ -343,18 +343,18 @@ class XenAPISession(object):
task_ref = self.call_xenapi("task.create", name,
desc)
try:
LOG.debug('Created task %s with ref %s' % (name, task_ref))
LOG.debug('Created task %s with ref %s', name, task_ref)
yield task_ref
finally:
self.call_xenapi("task.destroy", task_ref)
LOG.debug('Destroyed task ref %s' % (task_ref))
LOG.debug('Destroyed task ref %s', task_ref)
@contextlib.contextmanager
def http_connection(session):
conn = None
xs_url = urllib.parse.urlparse(session.url)
LOG.debug("Creating http(s) connection to %s" % session.url)
LOG.debug("Creating http(s) connection to %s", session.url)
if xs_url.scheme == 'http':
conn = http_client.HTTPConnection(xs_url.netloc)
elif xs_url.scheme == 'https':

View File

@ -56,9 +56,9 @@ def invalid_option(option_name, recommended_value):
LOG.exception(_LE('Current value of '
'CONF.xenserver.%(option)s option incompatible with '
'CONF.xenserver.independent_compute=True. '
'Consider using "%(recommended)s"') % {
'option': option_name,
'recommended': recommended_value})
'Consider using "%(recommended)s"'),
{'option': option_name,
'recommended': recommended_value})
raise exception.NotSupportedWithOption(
operation=option_name,
option='CONF.xenserver.independent_compute')

View File

@ -932,7 +932,7 @@ def try_auto_configure_disk(session, vdi_ref, new_gb):
_auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e:
msg = _LW('Attempted auto_configure_disk failed because: %s')
LOG.warning(msg % e)
LOG.warning(msg, e)
def _make_partition(session, dev, partition_start, partition_end):

View File

@ -363,8 +363,8 @@ def _stream_to_vdi(conn, vdi_import_path, file_size, file_obj):
'Content-Length': '%s' % file_size}
CHUNK_SIZE = 16 * 1024
LOG.debug('Initialising PUT request to %s (Headers: %s)' % (
vdi_import_path, headers))
LOG.debug('Initialising PUT request to %s (Headers: %s)',
vdi_import_path, headers)
conn.request('PUT', vdi_import_path, headers=headers)
remain_size = file_size
while remain_size >= CHUNK_SIZE:

View File

@ -224,5 +224,5 @@ class VolumeOps(object):
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
except Exception:
LOG.debug('Ignoring error while purging sr: %s' % sr_ref,
exc_info=True)
LOG.debug('Ignoring error while purging sr: %s', sr_ref,
exc_info=True)

View File

@ -45,24 +45,24 @@ def delete_if_exists(path):
os.unlink(path)
except OSError, e: # noqa
if e.errno == errno.ENOENT:
LOG.warning("'%s' was already deleted, skipping delete" % path)
LOG.warning("'%s' was already deleted, skipping delete", path)
else:
raise
def _link(src, dst):
LOG.info("Hard-linking file '%s' -> '%s'" % (src, dst))
LOG.info("Hard-linking file '%s' -> '%s'", src, dst)
os.link(src, dst)
def _rename(src, dst):
LOG.info("Renaming file '%s' -> '%s'" % (src, dst))
LOG.info("Renaming file '%s' -> '%s'", src, dst)
try:
os.rename(src, dst)
except OSError, e: # noqa
if e.errno == errno.EXDEV:
LOG.error("Invalid cross-device link. Perhaps %s and %s should "
"be symlinked on the same filesystem?" % (src, dst))
"be symlinked on the same filesystem?", src, dst)
raise
@ -70,7 +70,7 @@ def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False,
universal_newlines=False, close_fds=True, env=None):
"""Make a subprocess according to the given command-line string
"""
LOG.info("Running cmd '%s'" % " ".join(cmdline))
LOG.info("Running cmd '%s'", " ".join(cmdline))
kwargs = {}
kwargs['stdout'] = stdout and subprocess.PIPE or None
kwargs['stderr'] = stderr and subprocess.PIPE or None
@ -109,7 +109,7 @@ def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):
ret = proc.returncode
if ret not in ok_exit_codes:
LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected "
"return code in '%(ok)s' but got '%(rc)s': %(err)s" %
"return code in '%(ok)s' but got '%(rc)s': %(err)s",
{'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes,
'rc': ret, 'err': err})
raise SubprocessException(' '.join(cmdline), ret, out, err)
@ -132,11 +132,11 @@ def run_command(cmd, cmd_input=None, ok_exit_codes=None):
def try_kill_process(proc):
"""Sends the given process the SIGKILL signal."""
pid = proc.pid
LOG.info("Killing process %s" % pid)
LOG.info("Killing process %s", pid)
try:
os.kill(pid, signal.SIGKILL)
except Exception:
LOG.exception("Failed to kill %s" % pid)
LOG.exception("Failed to kill %s", pid)
def make_staging_area(sr_path):
@ -279,14 +279,14 @@ def _validate_vhd(vdi_path):
extra = (" ensure source and destination host machines have "
"time set correctly")
LOG.info("VDI Error details: %s" % out)
LOG.info("VDI Error details: %s", out)
raise Exception(
"VDI '%(vdi_path)s' has an invalid %(part)s: '%(details)s'"
"%(extra)s" % {'vdi_path': vdi_path, 'part': part,
'details': details, 'extra': extra})
LOG.info("VDI is valid: %s" % vdi_path)
LOG.info("VDI is valid: %s", vdi_path)
def _validate_vdi_chain(vdi_path):
@ -470,7 +470,7 @@ def extract_tarball(fileobj, path, callback=None):
tar_pid = tar_proc.pid
if returncode is not None:
LOG.error("tar extract with process id '%(pid)s' "
"exited early with '%(rc)s'" %
"exited early with '%(rc)s'",
{'pid': tar_pid, 'rc': returncode})
raise SubprocessException(
' '.join(tar_cmd), returncode, "", "")
@ -479,7 +479,7 @@ def extract_tarball(fileobj, path, callback=None):
# no need to kill already dead process
raise
except Exception:
LOG.exception("Failed while sending data to tar pid: %s" % tar_pid)
LOG.exception("Failed while sending data to tar pid: %s", tar_pid)
try_kill_process(tar_proc)
raise