Using LOG.warning replace LOG.warn

Python 3 deprecated the logger.warn method, see:
https://docs.python.org/3/library/logging.html#logging.warning
so we prefer to use warning to avoid DeprecationWarning.

Closes-Bugs: #1529913

Change-Id: Icc01ce5fbd10880440cf75a2e0833394783464a0
Co-Authored-By: Gary Kotton <gkotton@vmware.com>
This commit is contained in:
LiuNanke 2015-12-29 23:25:55 +08:00 committed by Armando Migliaccio
parent 2768da320d
commit 83ef6b5677
39 changed files with 155 additions and 136 deletions

View File

@ -23,6 +23,8 @@ Neutron Specific Commandments
- [N331] Detect wrong usage with assertTrue(isinstance()). - [N331] Detect wrong usage with assertTrue(isinstance()).
- [N332] Use assertEqual(expected_http_code, observed_http_code) instead of - [N332] Use assertEqual(expected_http_code, observed_http_code) instead of
assertEqual(observed_http_code, expected_http_code). assertEqual(observed_http_code, expected_http_code).
- [N333] Validate that LOG.warning is used instead of LOG.warn. The latter
is deprecated.
Creating Unit Tests Creating Unit Tests
------------------- -------------------

View File

@ -449,11 +449,11 @@ class OVSBridge(BaseOVS):
if_exists=True) if_exists=True)
for result in results: for result in results:
if result['ofport'] == UNASSIGNED_OFPORT: if result['ofport'] == UNASSIGNED_OFPORT:
LOG.warn(_LW("Found not yet ready openvswitch port: %s"), LOG.warning(_LW("Found not yet ready openvswitch port: %s"),
result['name']) result['name'])
elif result['ofport'] == INVALID_OFPORT: elif result['ofport'] == INVALID_OFPORT:
LOG.warn(_LW("Found failed openvswitch port: %s"), LOG.warning(_LW("Found failed openvswitch port: %s"),
result['name']) result['name'])
elif 'attached-mac' in result['external_ids']: elif 'attached-mac' in result['external_ids']:
port_id = self.portid_from_external_ids(result['external_ids']) port_id = self.portid_from_external_ids(result['external_ids'])
if port_id: if port_id:
@ -511,9 +511,9 @@ class OVSBridge(BaseOVS):
@staticmethod @staticmethod
def _check_ofport(port_id, port_info): def _check_ofport(port_id, port_info):
if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]: if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a" LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s "
" positive integer"), "is not a positive integer"),
{'ofport': port_info['ofport'], 'vif': port_id}) {'ofport': port_info['ofport'], 'vif': port_id})
return False return False
return True return True

View File

@ -203,7 +203,7 @@ class DhcpAgent(manager.Manager):
try: try:
network = self.plugin_rpc.get_network_info(network_id) network = self.plugin_rpc.get_network_info(network_id)
if not network: if not network:
LOG.warn(_LW('Network %s has been deleted.'), network_id) LOG.warning(_LW('Network %s has been deleted.'), network_id)
return network return network
except Exception as e: except Exception as e:
self.schedule_resync(e, network_id) self.schedule_resync(e, network_id)
@ -223,8 +223,9 @@ class DhcpAgent(manager.Manager):
self.configure_dhcp_for_network(network) self.configure_dhcp_for_network(network)
LOG.info(_LI('Finished network %s dhcp configuration'), network_id) LOG.info(_LI('Finished network %s dhcp configuration'), network_id)
except (exceptions.NetworkNotFound, RuntimeError): except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_LW('Network %s may have been deleted and its resources ' LOG.warning(_LW('Network %s may have been deleted and '
'may have already been disposed.'), network.id) 'its resources may have already been disposed.'),
network.id)
def configure_dhcp_for_network(self, network): def configure_dhcp_for_network(self, network):
if not network.admin_state_up: if not network.admin_state_up:
@ -585,8 +586,8 @@ class DhcpAgentWithStateReport(DhcpAgent):
self.schedule_resync("Agent has just been revived") self.schedule_resync("Agent has just been revived")
except AttributeError: except AttributeError:
# This means the server does not support report_state # This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report." LOG.warning(_LW("Neutron server does not support state report. "
" State report for this agent will be disabled.")) "State report for this agent will be disabled."))
self.heartbeat.stop() self.heartbeat.stop()
self.run() self.run()
return return

View File

@ -362,8 +362,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
def _router_removed(self, router_id): def _router_removed(self, router_id):
ri = self.router_info.get(router_id) ri = self.router_info.get(router_id)
if ri is None: if ri is None:
LOG.warn(_LW("Info for router %s was not found. " LOG.warning(_LW("Info for router %s was not found. "
"Performing router cleanup"), router_id) "Performing router cleanup"), router_id)
self.namespaces_manager.ensure_router_cleanup(router_id) self.namespaces_manager.ensure_router_cleanup(router_id)
return return
@ -683,8 +683,8 @@ class L3NATAgentWithStateReport(L3NATAgent):
self.agent_state.pop('start_flag', None) self.agent_state.pop('start_flag', None)
except AttributeError: except AttributeError:
# This means the server does not support report_state # This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report. " LOG.warning(_LW("Neutron server does not support state report. "
"State report for this agent will be disabled.")) "State report for this agent will be disabled."))
self.heartbeat.stop() self.heartbeat.stop()
return return
except Exception: except Exception:

View File

@ -209,10 +209,11 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
return True return True
else: else:
if operation == 'add': if operation == 'add':
LOG.warn(_LW("Device %s does not exist so ARP entry " LOG.warning(_LW("Device %s does not exist so ARP entry "
"cannot be updated, will cache information " "cannot be updated, will cache "
"to be applied later when the device exists"), "information to be applied later "
device) "when the device exists"),
device)
self._cache_arp_entry(ip, mac, subnet_id, operation) self._cache_arp_entry(ip, mac, subnet_id, operation)
return False return False
except Exception: except Exception:

View File

@ -265,8 +265,8 @@ class RouterInfo(object):
except RuntimeError: except RuntimeError:
# any exception occurred here should cause the floating IP # any exception occurred here should cause the floating IP
# to be set in error state # to be set in error state
LOG.warn(_LW("Unable to configure IP address for " LOG.warning(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id']) "floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device): def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError() raise NotImplementedError()

View File

@ -250,10 +250,10 @@ class IptablesTable(object):
top, self.wrap_name, top, self.wrap_name,
comment=comment))) comment=comment)))
except ValueError: except ValueError:
LOG.warn(_LW('Tried to remove rule that was not there:' LOG.warning(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'), ' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule, {'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap}) 'top': top, 'wrap': wrap})
def _get_chain_rules(self, chain, wrap): def _get_chain_rules(self, chain, wrap):
chain = get_chain_name(chain, wrap) chain = get_chain_name(chain, wrap)
@ -696,8 +696,8 @@ class IptablesManager(object):
"""Return the sum of the traffic counters of all rules of a chain.""" """Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables: if not cmd_tables:
LOG.warn(_LW('Attempted to get traffic counters of chain %s which ' LOG.warning(_LW('Attempted to get traffic counters of chain %s '
'does not exist'), chain) 'which does not exist'), chain)
return return
name = get_chain_name(chain, wrap) name = get_chain_name(chain, wrap)

View File

@ -200,7 +200,7 @@ class MetadataProxyHandler(object):
req.response.body = content req.response.body = content
return req.response return req.response
elif resp.status == 403: elif resp.status == 403:
LOG.warn(_LW( LOG.warning(_LW(
'The remote metadata server responded with Forbidden. This ' 'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.' 'response usually occurs when shared secrets do not match.'
)) ))
@ -215,7 +215,7 @@ class MetadataProxyHandler(object):
msg = _( msg = _(
'Remote metadata server experienced an internal server error.' 'Remote metadata server experienced an internal server error.'
) )
LOG.warn(msg) LOG.warning(msg)
explanation = six.text_type(msg) explanation = six.text_type(msg)
return webob.exc.HTTPInternalServerError(explanation=explanation) return webob.exc.HTTPInternalServerError(explanation=explanation)
else: else:
@ -267,8 +267,8 @@ class UnixDomainMetadataProxy(object):
use_call=self.agent_state.get('start_flag')) use_call=self.agent_state.get('start_flag'))
except AttributeError: except AttributeError:
# This means the server does not support report_state # This means the server does not support report_state
LOG.warn(_LW('Neutron server does not support state report.' LOG.warning(_LW('Neutron server does not support state report.'
' State report for this agent will be disabled.')) ' State report for this agent will be disabled.'))
self.heartbeat.stop() self.heartbeat.stop()
return return
except Exception: except Exception:

View File

@ -118,7 +118,7 @@ class PluginApi(object):
# may not work correctly, however it can function in 'degraded' # may not work correctly, however it can function in 'degraded'
# mode, in that DVR routers may not be in the system yet, and # mode, in that DVR routers may not be in the system yet, and
# it might be not necessary to retrieve info about the host. # it might be not necessary to retrieve info about the host.
LOG.warn(_LW('DVR functionality requires a server upgrade.')) LOG.warning(_LW('DVR functionality requires a server upgrade.'))
res = [ res = [
self.get_device_details(context, device, agent_id, host) self.get_device_details(context, device, agent_id, host)
for device in devices for device in devices
@ -196,7 +196,8 @@ class PluginApi(object):
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip, res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type, host=host) tunnel_type=tunnel_type, host=host)
except oslo_messaging.UnsupportedVersion: except oslo_messaging.UnsupportedVersion:
LOG.warn(_LW('Tunnel synchronization requires a server upgrade.')) LOG.warning(_LW('Tunnel synchronization requires a '
'server upgrade.'))
cctxt = self.client.prepare() cctxt = self.client.prepare()
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip, res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type) tunnel_type=tunnel_type)

View File

@ -63,8 +63,8 @@ def _is_valid_driver_combination():
def is_firewall_enabled(): def is_firewall_enabled():
if not _is_valid_driver_combination(): if not _is_valid_driver_combination():
LOG.warn(_LW("Driver configuration doesn't match with " LOG.warning(_LW("Driver configuration doesn't match with "
"enable_security_group")) "enable_security_group"))
return cfg.CONF.SECURITYGROUP.enable_security_group return cfg.CONF.SECURITYGROUP.enable_security_group
@ -97,8 +97,8 @@ class SecurityGroupAgentRpc(object):
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop' firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop'
LOG.debug("Init firewall settings (driver=%s)", firewall_driver) LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
if not _is_valid_driver_combination(): if not _is_valid_driver_combination():
LOG.warn(_LW("Driver configuration doesn't match " LOG.warning(_LW("Driver configuration doesn't match "
"with enable_security_group")) "with enable_security_group"))
firewall_class = firewall.load_firewall_driver_class(firewall_driver) firewall_class = firewall.load_firewall_driver_class(firewall_driver)
try: try:
self.firewall = firewall_class( self.firewall = firewall_class(

View File

@ -107,9 +107,9 @@ def _get_pagination_max_limit():
if max_limit == 0: if max_limit == 0:
raise ValueError() raise ValueError()
except ValueError: except ValueError:
LOG.warn(_LW("Invalid value for pagination_max_limit: %s. It " LOG.warning(_LW("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0"), "should be an integer greater to 0"),
cfg.CONF.pagination_max_limit) cfg.CONF.pagination_max_limit)
return max_limit return max_limit

View File

@ -532,17 +532,17 @@ class ExtensionManager(object):
ext_name = mod_name[0].upper() + mod_name[1:] ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None) new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class: if not new_ext_class:
LOG.warn(_LW('Did not find expected name ' LOG.warning(_LW('Did not find expected name '
'"%(ext_name)s" in %(file)s'), '"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name, {'ext_name': ext_name,
'file': ext_path}) 'file': ext_path})
continue continue
new_ext = new_ext_class() new_ext = new_ext_class()
self.add_extension(new_ext) self.add_extension(new_ext)
except Exception as exception: except Exception as exception:
LOG.warn(_LW("Extension file %(f)s wasn't loaded due to " LOG.warning(_LW("Extension file %(f)s wasn't loaded due to "
"%(exception)s"), "%(exception)s"),
{'f': f, 'exception': exception}) {'f': f, 'exception': exception})
def add_extension(self, ext): def add_extension(self, ext):
# Do nothing if the extension doesn't check out # Do nothing if the extension doesn't check out
@ -578,9 +578,9 @@ class PluginAwareExtensionManager(ExtensionManager):
alias = extension.get_alias() alias = extension.get_alias()
supports_extension = alias in self.get_supported_extension_aliases() supports_extension = alias in self.get_supported_extension_aliases()
if not supports_extension: if not supports_extension:
LOG.warn(_LW("Extension %s not supported by any of loaded " LOG.warning(_LW("Extension %s not supported by any of loaded "
"plugins"), "plugins"),
alias) alias)
return supports_extension return supports_extension
def _plugins_implement_interface(self, extension): def _plugins_implement_interface(self, extension):
@ -589,8 +589,9 @@ class PluginAwareExtensionManager(ExtensionManager):
for plugin in self.plugins.values(): for plugin in self.plugins.values():
if isinstance(plugin, extension.get_plugin_interface()): if isinstance(plugin, extension.get_plugin_interface()):
return True return True
LOG.warn(_LW("Loaded plugins do not implement extension %s interface"), LOG.warning(_LW("Loaded plugins do not implement extension "
extension.get_alias()) "%s interface"),
extension.get_alias())
return False return False
@classmethod @classmethod

View File

@ -70,9 +70,10 @@ class DhcpAgentNotifyAPI(object):
context, 'network_create_end', context, 'network_create_end',
{'network': {'id': network['id']}}, agent['host']) {'network': {'id': network['id']}}, agent['host'])
elif not existing_agents: elif not existing_agents:
LOG.warn(_LW('Unable to schedule network %s: no agents available; ' LOG.warning(_LW('Unable to schedule network %s: no agents '
'will retry on subsequent port and subnet creation ' 'available; will retry on subsequent port '
'events.'), network['id']) 'and subnet creation events.'),
network['id'])
return new_agents + existing_agents return new_agents + existing_agents
def _get_enabled_agents(self, context, network, agents, method, payload): def _get_enabled_agents(self, context, network, agents, method, payload):
@ -87,12 +88,13 @@ class DhcpAgentNotifyAPI(object):
len_enabled_agents = len(enabled_agents) len_enabled_agents = len(enabled_agents)
len_active_agents = len(active_agents) len_active_agents = len(active_agents)
if len_active_agents < len_enabled_agents: if len_active_agents < len_enabled_agents:
LOG.warn(_LW("Only %(active)d of %(total)d DHCP agents associated " LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents "
"with network '%(net_id)s' are marked as active, so " "associated with network '%(net_id)s' "
"notifications may be sent to inactive agents."), "are marked as active, so notifications "
{'active': len_active_agents, "may be sent to inactive agents."),
'total': len_enabled_agents, {'active': len_active_agents,
'net_id': network_id}) 'total': len_enabled_agents,
'net_id': network_id})
if not enabled_agents: if not enabled_agents:
num_ports = self.plugin.get_ports_count( num_ports = self.plugin.get_ports_count(
context, {'network_id': [network_id]}) context, {'network_id': [network_id]})

View File

@ -104,9 +104,9 @@ class DhcpRpcCallback(object):
else: else:
ctxt.reraise = True ctxt.reraise = True
net_id = port['port']['network_id'] net_id = port['port']['network_id']
LOG.warn(_LW("Action %(action)s for network %(net_id)s " LOG.warning(_LW("Action %(action)s for network %(net_id)s "
"could not complete successfully: %(reason)s"), "could not complete successfully: %(reason)s"),
{"action": action, "net_id": net_id, 'reason': e}) {"action": action, "net_id": net_id, 'reason': e})
def get_active_networks(self, context, **kwargs): def get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active network ids.""" """Retrieve and return a list of the active network ids."""

View File

@ -198,8 +198,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
'%(host)s', {'agent_type': agent_type, 'host': host}) '%(host)s', {'agent_type': agent_type, 'host': host})
return return
if self.is_agent_down(agent.heartbeat_timestamp): if self.is_agent_down(agent.heartbeat_timestamp):
LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'), LOG.warning(_LW('%(agent_type)s agent %(agent_id)s is not active'),
{'agent_type': agent_type, 'agent_id': agent.id}) {'agent_type': agent_type, 'agent_id': agent.id})
return agent return agent
@staticmethod @staticmethod
@ -222,9 +222,9 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
except Exception: except Exception:
msg = _LW('Dictionary %(dict_name)s for agent %(agent_type)s on ' msg = _LW('Dictionary %(dict_name)s for agent %(agent_type)s on '
'host %(host)s is invalid.') 'host %(host)s is invalid.')
LOG.warn(msg, {'dict_name': dict_name, LOG.warning(msg, {'dict_name': dict_name,
'agent_type': agent_db.agent_type, 'agent_type': agent_db.agent_type,
'host': agent_db.host}) 'host': agent_db.host})
conf = {} conf = {}
return conf return conf
@ -286,11 +286,11 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
(agent['agent_type'], (agent['agent_type'],
agent['heartbeat_timestamp'], agent['heartbeat_timestamp'],
agent['host']) for agent in dead_agents]) agent['host']) for agent in dead_agents])
LOG.warn(_LW("Agent healthcheck: found %(count)s dead agents " LOG.warning(_LW("Agent healthcheck: found %(count)s dead agents "
"out of %(total)s:\n%(data)s"), "out of %(total)s:\n%(data)s"),
{'count': len(dead_agents), {'count': len(dead_agents),
'total': len(agents), 'total': len(agents),
'data': data}) 'data': data})
else: else:
LOG.debug("Agent healthcheck: found %s active agents", LOG.debug("Agent healthcheck: found %s active agents",
len(agents)) len(agents))

View File

@ -143,10 +143,11 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary', tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
timeutils.utcnow()) timeutils.utcnow())
if tdelta.total_seconds() > cfg.CONF.agent_down_time: if tdelta.total_seconds() > cfg.CONF.agent_down_time:
LOG.warn(_LW("Time since last %s agent reschedule check has " LOG.warning(_LW("Time since last %s agent reschedule check has "
"exceeded the interval between checks. Waiting " "exceeded the interval between checks. Waiting "
"before check to allow agents to send a heartbeat " "before check to allow agents to send a heartbeat "
"in case there was a clock adjustment."), agent_type) "in case there was a clock adjustment."),
agent_type)
time.sleep(agent_dead_limit) time.sleep(agent_dead_limit)
self._clock_jump_canary = timeutils.utcnow() self._clock_jump_canary = timeutils.utcnow()
@ -282,17 +283,17 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
active_agents = [agent for agent in agents if active_agents = [agent for agent in agents if
self.is_eligible_agent(context, True, agent)] self.is_eligible_agent(context, True, agent)]
if not active_agents: if not active_agents:
LOG.warn(_LW("No DHCP agents available, " LOG.warning(_LW("No DHCP agents available, "
"skipping rescheduling")) "skipping rescheduling"))
return return
for binding in dead_bindings: for binding in dead_bindings:
LOG.warn(_LW("Removing network %(network)s from agent " LOG.warning(_LW("Removing network %(network)s from agent "
"%(agent)s because the agent did not report " "%(agent)s because the agent did not report "
"to the server in the last %(dead_time)s " "to the server in the last %(dead_time)s "
"seconds."), "seconds."),
{'network': binding.network_id, {'network': binding.network_id,
'agent': binding.dhcp_agent_id, 'agent': binding.dhcp_agent_id,
'dead_time': agent_dead_limit}) 'dead_time': agent_dead_limit})
# save binding object to avoid ObjectDeletedError # save binding object to avoid ObjectDeletedError
# in case binding is concurrently deleted from the DB # in case binding is concurrently deleted from the DB
saved_binding = {'net': binding.network_id, saved_binding = {'net': binding.network_id,

View File

@ -118,7 +118,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
agents_back_online.add(binding.l3_agent_id) agents_back_online.add(binding.l3_agent_id)
continue continue
LOG.warn(_LW( LOG.warning(_LW(
"Rescheduling router %(router)s from agent %(agent)s " "Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in " "because the agent did not report to the server in "
"the last %(dead_time)s seconds."), "the last %(dead_time)s seconds."),

View File

@ -375,8 +375,8 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin):
try: try:
mac_address = query.one()[0] mac_address = query.one()[0]
except (exc.NoResultFound, exc.MultipleResultsFound): except (exc.NoResultFound, exc.MultipleResultsFound):
LOG.warn(_LW('No valid gateway port on subnet %s is ' LOG.warning(_LW('No valid gateway port on subnet %s is '
'found for IPv6 RA'), subnet['id']) 'found for IPv6 RA'), subnet['id'])
return return
lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
n_const.IPV6_LLA_PREFIX, n_const.IPV6_LLA_PREFIX,

View File

@ -108,7 +108,7 @@ class NeutronDebugAgent(object):
try: try:
ip.netns.delete(namespace) ip.netns.delete(namespace)
except Exception: except Exception:
LOG.warn(_LW('Failed to delete namespace %s'), namespace) LOG.warning(_LW('Failed to delete namespace %s'), namespace)
else: else:
self.driver.unplug(self.driver.get_device_name(port), self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge) bridge=bridge)

View File

@ -35,7 +35,6 @@ _all_log_levels = {
# a exception # a exception
'error': '_LE', 'error': '_LE',
'info': '_LI', 'info': '_LI',
'warn': '_LW',
'warning': '_LW', 'warning': '_LW',
'critical': '_LC', 'critical': '_LC',
'exception': '_LE', 'exception': '_LE',
@ -55,6 +54,8 @@ log_translation_hint = re.compile(
'|'.join('(?:%s)' % _regex_for_level(level, hint) '|'.join('(?:%s)' % _regex_for_level(level, hint)
for level, hint in six.iteritems(_all_log_levels))) for level, hint in six.iteritems(_all_log_levels)))
log_warn = re.compile(
r"(.)*LOG\.(warn)\(\s*('|\"|_)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(") contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
@ -218,6 +219,12 @@ def check_assertequal_for_httpcode(logical_line, filename):
yield (0, msg) yield (0, msg)
def check_log_warn_deprecated(logical_line, filename):
msg = "N333: Use LOG.warning due to compatibility with py3"
if log_warn.match(logical_line):
yield (0, msg)
def factory(register): def factory(register):
register(validate_log_translations) register(validate_log_translations)
register(use_jsonutils) register(use_jsonutils)
@ -233,3 +240,4 @@ def factory(register):
register(check_assertempty) register(check_assertempty)
register(check_assertisinstance) register(check_assertisinstance)
register(check_assertequal_for_httpcode) register(check_assertequal_for_httpcode)
register(check_log_warn_deprecated)

View File

@ -71,7 +71,7 @@ class ItemController(utils.NeutronPecanController):
controller = manager.NeutronManager.get_controller_for_resource( controller = manager.NeutronManager.get_controller_for_resource(
collection) collection)
if not controller: if not controller:
LOG.warn(_LW("No controller found for: %s - returning response " LOG.warning(_LW("No controller found for: %s - returning response "
"code 404"), collection) "code 404"), collection)
pecan.abort(404) pecan.abort(404)
return controller, remainder return controller, remainder

View File

@ -93,8 +93,8 @@ class V2Controller(object):
controller = manager.NeutronManager.get_controller_for_resource( controller = manager.NeutronManager.get_controller_for_resource(
collection) collection)
if not controller: if not controller:
LOG.warn(_LW("No controller found for: %s - returning response " LOG.warning(_LW("No controller found for: %s - returning response "
"code 404"), collection) "code 404"), collection)
pecan.abort(404) pecan.abort(404)
# Store resource and collection names in pecan request context so that # Store resource and collection names in pecan request context so that
# hooks can leverage them if necessary. The following code uses # hooks can leverage them if necessary. The following code uses

View File

@ -52,7 +52,7 @@ def _plugin_for_resource(collection):
hasattr(plugin, 'get_%s' % collection)): hasattr(plugin, 'get_%s' % collection)):
# This plugin implements this resource # This plugin implements this resource
return plugin return plugin
LOG.warn(_LW("No plugin found for:%s"), collection) LOG.warning(_LW("No plugin found for: %s"), collection)
def _handle_plurals(collection): def _handle_plurals(collection):
@ -127,15 +127,15 @@ def initialize_all():
manager.NeutronManager.set_plugin_for_resource( manager.NeutronManager.set_plugin_for_resource(
resource, plugin) resource, plugin)
else: else:
LOG.warn(_LW("No plugin found for resource:%s. API calls " LOG.warning(_LW("No plugin found for resource:%s. API calls "
"may not be correctly dispatched"), resource) "may not be correctly dispatched"), resource)
controller = pecan_controllers.get(collection) controller = pecan_controllers.get(collection)
if not controller: if not controller:
LOG.debug("Building controller for resource:%s", resource) LOG.debug("Building controller for resource:%s", resource)
controller = res_ctrl.CollectionsController(collection, resource) controller = res_ctrl.CollectionsController(collection, resource)
else: else:
LOG.debug("There are already controllers for resource:%s", LOG.debug("There are already controllers for resource: %s",
resource) resource)
manager.NeutronManager.set_controller_for_resource( manager.NeutronManager.set_controller_for_resource(

View File

@ -24,10 +24,10 @@ LOG = logging.getLogger(__name__)
# TODO(claudiub): Remove this module at the beginning of the O cycle. # TODO(claudiub): Remove this module at the beginning of the O cycle.
new_driver = 'hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver' new_driver = 'hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver'
LOG.warn(_LW("You are using the deprecated firewall driver: %(deprecated)s. " LOG.warning(_LW("You are using the deprecated firewall driver: "
"Use the recommended driver %(new)s instead."), "%(deprecated)s.Use the recommended driver %(new)s instead."),
{'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__, {'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__,
'new': new_driver}) 'new': new_driver})
HyperVSecurityGroupsDriver = moves.moved_class( HyperVSecurityGroupsDriver = moves.moved_class(
sg_driver.HyperVSecurityGroupsDriver, sg_driver.HyperVSecurityGroupsDriver,

View File

@ -326,7 +326,7 @@ class SriovNicSwitchAgent(object):
self.ext_manager.delete_port(self.context, port) self.ext_manager.delete_port(self.context, port)
else: else:
LOG.warning(_LW("port_id to device with MAC " LOG.warning(_LW("port_id to device with MAC "
"%s not found"), mac) "%s not found"), mac)
dev_details = self.plugin_rpc.update_device_down(self.context, dev_details = self.plugin_rpc.update_device_down(self.context,
mac, mac,
self.agent_id, self.agent_id,

View File

@ -138,8 +138,8 @@ class OpenFlowSwitchMixin(object):
cookies = set([f.cookie for f in self.dump_flows()]) - \ cookies = set([f.cookie for f in self.dump_flows()]) - \
self.reserved_cookies self.reserved_cookies
for c in cookies: for c in cookies:
LOG.warn(_LW("Deleting flow with cookie 0x%(cookie)x") % { LOG.warning(_LW("Deleting flow with cookie 0x%(cookie)x"),
'cookie': c}) {'cookie': c})
self.delete_flows(cookie=c, cookie_mask=((1 << 64) - 1)) self.delete_flows(cookie=c, cookie_mask=((1 << 64) - 1))
def install_goto_next(self, table_id): def install_goto_next(self, table_id):

View File

@ -1368,8 +1368,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
# error condition of which operators should be aware # error condition of which operators should be aware
port_needs_binding = True port_needs_binding = True
if not vif_port.ofport: if not vif_port.ofport:
LOG.warn(_LW("VIF port: %s has no ofport configured, " LOG.warning(_LW("VIF port: %s has no ofport configured, "
"and might not be able to transmit"), vif_port.vif_id) "and might not be able to transmit"),
vif_port.vif_id)
if vif_port: if vif_port:
if admin_state_up: if admin_state_up:
port_needs_binding = self.port_bound( port_needs_binding = self.port_bound(
@ -1648,7 +1649,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
try: try:
return '%08x' % netaddr.IPAddress(ip_address, version=4) return '%08x' % netaddr.IPAddress(ip_address, version=4)
except Exception: except Exception:
LOG.warn(_LW("Invalid remote IP: %s"), ip_address) LOG.warning(_LW("Invalid remote IP: %s"), ip_address)
return return
def tunnel_sync(self): def tunnel_sync(self):
@ -1701,11 +1702,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
# Check for the canary flow # Check for the canary flow
status = self.int_br.check_canary_table() status = self.int_br.check_canary_table()
if status == constants.OVS_RESTARTED: if status == constants.OVS_RESTARTED:
LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset " LOG.warning(_LW("OVS is restarted. OVSNeutronAgent will reset "
"bridges and recover ports.")) "bridges and recover ports."))
elif status == constants.OVS_DEAD: elif status == constants.OVS_DEAD:
LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running " LOG.warning(_LW("OVS is dead. OVSNeutronAgent will keep running "
"and checking OVS status periodically.")) "and checking OVS status periodically."))
return status return status
def loop_count_and_wait(self, start_time, port_stats): def loop_count_and_wait(self, start_time, port_stats):
@ -1760,7 +1761,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
consecutive_resyncs = consecutive_resyncs + 1 consecutive_resyncs = consecutive_resyncs + 1
if (consecutive_resyncs >= if (consecutive_resyncs >=
constants.MAX_DEVICE_RETRIES): constants.MAX_DEVICE_RETRIES):
LOG.warn(_LW( LOG.warning(_LW(
"Clearing cache of registered ports," "Clearing cache of registered ports,"
" retries to resync were > %s"), " retries to resync were > %s"),
constants.MAX_DEVICE_RETRIES) constants.MAX_DEVICE_RETRIES)

View File

@ -352,7 +352,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
else: else:
# at least one of drivers does not support QoS, meaning # at least one of drivers does not support QoS, meaning
# there are no rule types supported by all of them # there are no rule types supported by all of them
LOG.warn( LOG.warning(
_LW("%s does not support QoS; " _LW("%s does not support QoS; "
"no rule types available"), "no rule types available"),
driver.name) driver.name)

View File

@ -109,8 +109,9 @@ def _build_subattr_match_rule(attr_name, attr, action, target):
validate = attr['validate'] validate = attr['validate']
key = list(filter(lambda k: k.startswith('type:dict'), validate.keys())) key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
if not key: if not key:
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"), LOG.warning(_LW("Unable to find data type descriptor "
attr_name) "for attribute %s"),
attr_name)
return return
data = validate[key[0]] data = validate[key[0]]
if not isinstance(data, dict): if not isinstance(data, dict):

View File

@ -212,7 +212,7 @@ class ResourceRegistry(object):
def register_resource(self, resource): def register_resource(self, resource):
if resource.name in self._resources: if resource.name in self._resources:
LOG.warn(_LW('%s is already registered'), resource.name) LOG.warning(_LW('%s is already registered'), resource.name)
if resource.name in self._tracked_resource_mappings: if resource.name in self._tracked_resource_mappings:
resource.register_events() resource.register_events()
self._resources[resource.name] = resource self._resources[resource.name] = resource

View File

@ -59,7 +59,8 @@ class AutoScheduler(object):
for dhcp_agent in dhcp_agents: for dhcp_agent in dhcp_agents:
if agents_db.AgentDbMixin.is_agent_down( if agents_db.AgentDbMixin.is_agent_down(
dhcp_agent.heartbeat_timestamp): dhcp_agent.heartbeat_timestamp):
LOG.warn(_LW('DHCP agent %s is not active'), dhcp_agent.id) LOG.warning(_LW('DHCP agent %s is not active'),
dhcp_agent.id)
continue continue
for net_id in net_ids: for net_id in net_ids:
agents = plugin.get_dhcp_agents_hosting_networks( agents = plugin.get_dhcp_agents_hosting_networks(
@ -207,7 +208,7 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter):
active_dhcp_agents = plugin.get_agents_db( active_dhcp_agents = plugin.get_agents_db(
context, filters=filters) context, filters=filters)
if not active_dhcp_agents: if not active_dhcp_agents:
LOG.warn(_LW('No more DHCP agents')) LOG.warning(_LW('No more DHCP agents'))
return [] return []
return active_dhcp_agents return active_dhcp_agents

View File

@ -145,8 +145,8 @@ class L3Scheduler(object):
target_routers = self._get_routers_can_schedule( target_routers = self._get_routers_can_schedule(
context, plugin, unscheduled_routers, l3_agent) context, plugin, unscheduled_routers, l3_agent)
if not target_routers: if not target_routers:
LOG.warn(_LW('No routers compatible with L3 agent configuration ' LOG.warning(_LW('No routers compatible with L3 agent '
'on host %s'), host) 'configuration on host %s'), host)
return False return False
self._bind_routers(context, plugin, target_routers, l3_agent) self._bind_routers(context, plugin, target_routers, l3_agent)
@ -170,14 +170,14 @@ class L3Scheduler(object):
active_l3_agents = plugin.get_l3_agents(context, active=True) active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents: if not active_l3_agents:
LOG.warn(_LW('No active L3 agents')) LOG.warning(_LW('No active L3 agents'))
return [] return []
candidates = plugin.get_l3_agent_candidates(context, candidates = plugin.get_l3_agent_candidates(context,
sync_router, sync_router,
active_l3_agents) active_l3_agents)
if not candidates: if not candidates:
LOG.warn(_LW('No L3 agents can host the router %s'), LOG.warning(_LW('No L3 agents can host the router %s'),
sync_router['id']) sync_router['id'])
return candidates return candidates

View File

@ -140,7 +140,7 @@ class BgpDrAgentSchedulerBase(BgpDrAgentFilter):
if agents_db.AgentDbMixin.is_agent_down( if agents_db.AgentDbMixin.is_agent_down(
bgp_dragent.heartbeat_timestamp): bgp_dragent.heartbeat_timestamp):
LOG.warn(_LW('BgpDrAgent %s is down'), bgp_dragent.id) LOG.warning(_LW('BgpDrAgent %s is down'), bgp_dragent.id)
return False return False
if self._is_bgp_speaker_hosted(context, bgp_dragent['id']): if self._is_bgp_speaker_hosted(context, bgp_dragent['id']):

View File

@ -276,8 +276,8 @@ class MeteringAgentWithStateReport(MeteringAgent):
self.use_call = False self.use_call = False
except AttributeError: except AttributeError:
# This means the server does not support report_state # This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report." LOG.warning(_LW("Neutron server does not support state report. "
" State report for this agent will be disabled.")) "State report for this agent will be disabled."))
self.heartbeat.stop() self.heartbeat.stop()
return return
except Exception: except Exception:

View File

@ -707,7 +707,7 @@ class TestDhcpAgentEventHandler(base.BaseTestCase):
def test_enable_dhcp_helper_network_none(self): def test_enable_dhcp_helper_network_none(self):
self.plugin.get_network_info.return_value = None self.plugin.get_network_info.return_value = None
with mock.patch.object(dhcp_agent.LOG, 'warn') as log: with mock.patch.object(dhcp_agent.LOG, 'warning') as log:
self.dhcp.enable_dhcp_helper('fake_id') self.dhcp.enable_dhcp_helper('fake_id')
self.plugin.assert_has_calls( self.plugin.assert_has_calls(
[mock.call.get_network_info('fake_id')]) [mock.call.get_network_info('fake_id')])

View File

@ -927,7 +927,7 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase):
def test_remove_nonexistent_rule(self): def test_remove_nonexistent_rule(self):
with mock.patch.object(iptables_manager, "LOG") as log: with mock.patch.object(iptables_manager, "LOG") as log:
self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP') self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
log.warn.assert_called_once_with( log.warning.assert_called_once_with(
'Tried to remove rule that was not there: ' 'Tried to remove rule that was not there: '
'%(chain)r %(rule)r %(wrap)r %(top)r', '%(chain)r %(rule)r %(wrap)r %(top)r',
{'wrap': True, 'top': False, 'rule': '-j DROP', {'wrap': True, 'top': False, 'rule': '-j DROP',
@ -1001,7 +1001,7 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase):
acc = self.iptables.get_traffic_counters('chain1') acc = self.iptables.get_traffic_counters('chain1')
self.assertIsNone(acc) self.assertIsNone(acc)
self.assertEqual(0, self.execute.call_count) self.assertEqual(0, self.execute.call_count)
log.warn.assert_called_once_with( log.warning.assert_called_once_with(
'Attempted to get traffic counters of chain %s which ' 'Attempted to get traffic counters of chain %s which '
'does not exist', 'chain1') 'does not exist', 'chain1')

View File

@ -51,7 +51,7 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase):
new_agents = [] new_agents = []
self.assertEqual(new_agents + existing_agents, agents) self.assertEqual(new_agents + existing_agents, agents)
self.assertEqual(expected_casts, self.mock_cast.call_count) self.assertEqual(expected_casts, self.mock_cast.call_count)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count) self.assertEqual(expected_warnings, self.mock_log.warning.call_count)
def test__schedule_network(self): def test__schedule_network(self):
agent = agents_db.Agent() agent = agents_db.Agent()
@ -86,7 +86,7 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase):
if not cfg.CONF.enable_services_on_agents_with_admin_state_down: if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
agents = [x for x in agents if x.admin_state_up] agents = [x for x in agents if x.admin_state_up]
self.assertEqual(agents, enabled_agents) self.assertEqual(agents, enabled_agents)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count) self.assertEqual(expected_warnings, self.mock_log.warning.call_count)
self.assertEqual(expected_errors, self.mock_log.error.call_count) self.assertEqual(expected_errors, self.mock_log.error.call_count)
def test__get_enabled_agents(self): def test__get_enabled_agents(self):

View File

@ -182,7 +182,7 @@ class TestAgentsDbMixin(TestAgentsDbBase):
'alive': True}] 'alive': True}]
with mock.patch.object(self.plugin, 'get_agents', with mock.patch.object(self.plugin, 'get_agents',
return_value=agents),\ return_value=agents),\
mock.patch.object(agents_db.LOG, 'warn') as warn,\ mock.patch.object(agents_db.LOG, 'warning') as warn,\
mock.patch.object(agents_db.LOG, 'debug') as debug: mock.patch.object(agents_db.LOG, 'debug') as debug:
self.plugin.agent_health_check() self.plugin.agent_health_check()
self.assertTrue(debug.called) self.assertTrue(debug.called)

View File

@ -29,7 +29,6 @@ class HackingTestCase(base.BaseTestCase):
expected_marks = { expected_marks = {
'error': '_LE', 'error': '_LE',
'info': '_LI', 'info': '_LI',
'warn': '_LW',
'warning': '_LW', 'warning': '_LW',
'critical': '_LC', 'critical': '_LC',
'exception': '_LE', 'exception': '_LE',