Enforce log hints in neutron.services.loadbalancer

This change enforces log hints use and removes debug level log
translation, modifications are validated through a hacking rule and
the change respects loggging guidelines.

Validate that hacking rules apply to directories:
    - neutron/services/loadbalancer

Change-Id: I5f2a1a8861728399f3852fc9704eb160c35f7f8d
Partial-bug: #1320867
This commit is contained in:
Cedric Brandily 2014-11-11 17:01:54 +01:00
parent cc537ebf57
commit 0e8c8a4bc0
13 changed files with 175 additions and 192 deletions

View File

@ -58,7 +58,8 @@ def _directory_to_check_translation(filename):
"neutron/scheduler",
"neutron/server",
"neutron/services/firewall",
"neutron/services/l3_router"]
"neutron/services/l3_router",
"neutron/services/loadbalancer"]
return any([dir in filename for dir in dirs])

View File

@ -20,6 +20,7 @@ from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context
from neutron.openstack.common.gettextutils import _LE, _LI
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
@ -116,7 +117,7 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
LOG.exception(_LE("Failed reporting state!"))
def initialize_service_hook(self, started_by):
self.sync_state()
@ -136,7 +137,7 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
LOG.exception(_('Error updating statistics on pool %s'),
LOG.exception(_LE('Error updating statistics on pool %s'),
pool_id)
self.needs_resync = True
@ -152,7 +153,7 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
self._reload_pool(pool_id)
except Exception:
LOG.exception(_('Unable to retrieve ready devices'))
LOG.exception(_LE('Unable to retrieve ready devices'))
self.needs_resync = True
self.remove_orphans()
@ -169,8 +170,7 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
logical_config = self.plugin_rpc.get_logical_device(pool_id)
driver_name = logical_config['driver']
if driver_name not in self.device_drivers:
LOG.error(_('No device driver '
'on agent: %s.'), driver_name)
LOG.error(_LE('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status(
'pool', pool_id, constants.ERROR)
return
@ -179,7 +179,8 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
self.instance_mapping[pool_id] = driver_name
self.plugin_rpc.pool_deployed(pool_id)
except Exception:
LOG.exception(_('Unable to deploy instance for pool: %s'), pool_id)
LOG.exception(_LE('Unable to deploy instance for pool: %s'),
pool_id)
self.needs_resync = True
def _destroy_pool(self, pool_id):
@ -189,7 +190,8 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
del self.instance_mapping[pool_id]
self.plugin_rpc.pool_destroyed(pool_id)
except Exception:
LOG.exception(_('Unable to destroy device for pool: %s'), pool_id)
LOG.exception(_LE('Unable to destroy device for pool: %s'),
pool_id)
self.needs_resync = True
def remove_orphans(self):
@ -202,8 +204,8 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
pass # Not all drivers will support this
def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver):
LOG.exception(_('%(operation)s %(obj)s %(id)s failed on device driver '
'%(driver)s'),
LOG.exception(_LE('%(operation)s %(obj)s %(id)s failed on device '
'driver %(driver)s'),
{'operation': operation.capitalize(), 'obj': obj_type,
'id': obj_id, 'driver': driver})
self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR)
@ -234,7 +236,7 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
def create_pool(self, context, pool, driver_name):
if driver_name not in self.device_drivers:
LOG.error(_('No device driver on agent: %s.'), driver_name)
LOG.error(_LE('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR)
return
@ -328,7 +330,7 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks):
self.needs_resync = True
else:
for pool_id in self.instance_mapping.keys():
LOG.info(_("Destroying pool %s due to agent disabling"),
LOG.info(_LI("Destroying pool %s due to agent disabling"),
pool_id)
self._destroy_pool(pool_id)
LOG.info(_("Agent_updated by server side %s!"), payload)
LOG.info(_LI("Agent_updated by server side %s!"), payload)

View File

@ -24,6 +24,7 @@ from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import model_base
from neutron.extensions import lbaas_agentscheduler
from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -98,21 +99,21 @@ class ChanceScheduler(object):
lbaas_agent = plugin.get_lbaas_agent_hosting_pool(
context, pool['id'])
if lbaas_agent:
LOG.debug(_('Pool %(pool_id)s has already been hosted'
' by lbaas agent %(agent_id)s'),
LOG.debug('Pool %(pool_id)s has already been hosted'
' by lbaas agent %(agent_id)s',
{'pool_id': pool['id'],
'agent_id': lbaas_agent['id']})
return
active_agents = plugin.get_lbaas_agents(context, active=True)
if not active_agents:
LOG.warn(_('No active lbaas agents for pool %s'), pool['id'])
LOG.warn(_LW('No active lbaas agents for pool %s'), pool['id'])
return
candidates = plugin.get_lbaas_agent_candidates(device_driver,
active_agents)
if not candidates:
LOG.warn(_('No lbaas agent supporting device driver %s'),
LOG.warn(_LW('No lbaas agent supporting device driver %s'),
device_driver)
return
@ -121,8 +122,8 @@ class ChanceScheduler(object):
binding.agent = chosen_agent
binding.pool_id = pool['id']
context.session.add(binding)
LOG.debug(_('Pool %(pool_id)s is scheduled to '
'lbaas agent %(agent_id)s'),
LOG.debug('Pool %(pool_id)s is scheduled to lbaas agent '
'%(agent_id)s',
{'pool_id': pool['id'],
'agent_id': chosen_agent['id']})
return chosen_agent

View File

@ -24,6 +24,7 @@ from neutron.db import agents_db
from neutron.db.loadbalancer import loadbalancer_db
from neutron.extensions import lbaas_agentscheduler
from neutron.extensions import portbindings
from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
@ -67,7 +68,8 @@ class LoadBalancerCallbacks(n_rpc.RpcCallback):
if not agents:
return []
elif len(agents) > 1:
LOG.warning(_('Multiple lbaas agents found on host %s'), host)
LOG.warning(_LW('Multiple lbaas agents found on host %s'),
host)
pools = self.plugin.list_pools_on_lbaas_agent(context,
agents[0].id)
pool_ids = [pool['id'] for pool in pools['pools']]
@ -158,9 +160,9 @@ class LoadBalancerCallbacks(n_rpc.RpcCallback):
except n_exc.NotFound:
# update_status may come from agent on an object which was
# already deleted from db with other request
LOG.warning(_('Cannot update status: %(obj_type)s %(obj_id)s '
'not found in the DB, it was probably deleted '
'concurrently'),
LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s '
'not found in the DB, it was probably deleted '
'concurrently'),
{'obj_type': obj_type, 'obj_id': obj_id})
def pool_destroyed(self, context, pool_id=None):
@ -181,8 +183,7 @@ class LoadBalancerCallbacks(n_rpc.RpcCallback):
port_id
)
except n_exc.PortNotFound:
msg = _('Unable to find port %s to plug.')
LOG.debug(msg, port_id)
LOG.debug('Unable to find port %s to plug.', port_id)
return
port['admin_state_up'] = True
@ -205,9 +206,9 @@ class LoadBalancerCallbacks(n_rpc.RpcCallback):
port_id
)
except n_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
LOG.debug('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.',
port_id)
return
port['admin_state_up'] = False
@ -222,9 +223,9 @@ class LoadBalancerCallbacks(n_rpc.RpcCallback):
)
except n_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
LOG.debug('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.',
port_id)
def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
self.plugin.update_pool_stats(context, pool_id, data=stats)

View File

@ -17,6 +17,7 @@ from eventlet import greenthread
from eventlet import queue
from heleosapi import exceptions as h_exc
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.embrane.common import contexts as ctx
from neutron.services.loadbalancer.drivers.embrane.agent import lb_operations
@ -103,4 +104,4 @@ class Dispatcher(object):
operation_context.n_context,
operation_context.item)
except Exception:
LOG.exception(_('Unhandled exception occurred'))
LOG.exception(_LE('Unhandled exception occurred'))

View File

@ -17,6 +17,7 @@ import functools
from heleosapi import exceptions as h_exc
from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
from neutron.services.loadbalancer import constants as lcon
from neutron.services.loadbalancer.drivers.embrane import constants as econ
@ -106,8 +107,8 @@ def _delete_load_balancer(driver, context, vip):
try:
driver._heleos_api.delete_dva(context.tenant_id, vip['id'])
except h_exc.DvaNotFound:
LOG.warning(_('The load balancer %s had no physical representation, '
'likely already deleted'), vip['id'])
LOG.warning(_LW('The load balancer %s had no physical representation, '
'likely already deleted'), vip['id'])
return econ.DELETED

View File

@ -22,6 +22,7 @@ from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.extensions import loadbalancer as lb_ext
from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as pcon
from neutron.plugins.embrane.common import contexts as embrane_ctx
@ -126,8 +127,8 @@ class EmbraneLbaas(abstract_driver.LoadBalancerAbstractDriver):
subnet = self.plugin._core_plugin.get_subnet(context,
db_pool["subnet_id"])
except n_exc.SubnetNotFound:
LOG.warning(_("Subnet assigned to pool %s doesn't exist, "
"backend port can't be created"), db_pool['id'])
LOG.warning(_LW("Subnet assigned to pool %s doesn't exist, "
"backend port can't be created"), db_pool['id'])
return
fixed_ip = {'subnet_id': subnet['id'],

View File

@ -18,6 +18,7 @@ from heleosapi import exceptions as h_exc
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as sdb
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants as ccon
@ -51,7 +52,7 @@ class Poller(object):
try:
self.synchronize_vips(ctx)
except h_exc.PollingException as e:
LOG.exception(_('Unhandled exception occurred'), e)
LOG.exception(_LE('Unhandled exception occurred'), e)
def synchronize_vips(self, ctx):
session = ctx.session

View File

@ -25,6 +25,7 @@ from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.common import utils as n_utils
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LE, _LW
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
@ -168,7 +169,7 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
pool_stats['members'] = self._get_servers_stats(parsed_stats)
return pool_stats
else:
LOG.warn(_('Stats socket not found for pool %s'), pool_id)
LOG.warn(_LW('Stats socket not found for pool %s'), pool_id)
return {}
def _get_backend_stats(self, parsed_stats):
@ -210,7 +211,7 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warn(_('Error while connecting to stats socket: %s'), e)
LOG.warn(_LW('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
@ -389,6 +390,6 @@ def kill_pids_in_file(root_helper, pid_path):
utils.execute(['kill', '-9', pid], root_helper)
except RuntimeError:
LOG.exception(
_('Unable to kill haproxy process: %s'),
_LE('Unable to kill haproxy process: %s'),
pid
)

View File

@ -18,6 +18,7 @@ from oslo.serialization import jsonutils
import requests
from neutron.common import exceptions as n_exc
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -52,9 +53,8 @@ class NSClient(object):
def __init__(self, service_uri, username, password):
if not service_uri:
msg = _("No NetScaler Control Center URI specified. "
"Cannot connect.")
LOG.exception(msg)
LOG.exception(_LE("No NetScaler Control Center URI specified. "
"Cannot connect."))
raise NCCException(NCCException.CONNECTION_ERROR)
self.service_uri = service_uri.strip('/')
self.auth = None
@ -131,53 +131,47 @@ class NSClient(object):
response = requests.request(method, url=resource_uri,
headers=headers, data=body)
except requests.exceptions.ConnectionError:
msg = (_("Connection error occurred while connecting to %s") %
self.service_uri)
LOG.exception(msg)
LOG.exception(_LE("Connection error occurred while connecting "
"to %s"),
self.service_uri)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.SSLError:
msg = (_("SSL error occurred while connecting to %s") %
self.service_uri)
LOG.exception(msg)
LOG.exception(_LE("SSL error occurred while connecting to %s"),
self.service_uri)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.Timeout:
msg = _("Request to %s timed out") % self.service_uri
LOG.exception(msg)
LOG.exception(_LE("Request to %s timed out"), self.service_uri)
raise NCCException(NCCException.CONNECTION_ERROR)
except (requests.exceptions.URLRequired,
requests.exceptions.InvalidURL,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema):
msg = _("Request did not specify a valid URL")
LOG.exception(msg)
LOG.exception(_LE("Request did not specify a valid URL"))
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.TooManyRedirects:
msg = _("Too many redirects occurred for request to %s")
LOG.exception(msg)
LOG.exception(_LE("Too many redirects occurred for request to %s"))
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.RequestException:
msg = (_("A request error while connecting to %s") %
self.service_uri)
LOG.exception(msg)
LOG.exception(_LE("A request error while connecting to %s"),
self.service_uri)
raise NCCException(NCCException.REQUEST_ERROR)
except Exception:
msg = (_("A unknown error occurred during request to %s") %
self.service_uri)
LOG.exception(msg)
LOG.exception(_LE("A unknown error occurred during request to %s"),
self.service_uri)
raise NCCException(NCCException.UNKNOWN_ERROR)
resp_dict = self._get_response_dict(response)
LOG.debug(_("Response: %s"), resp_dict['body'])
LOG.debug("Response: %s", resp_dict['body'])
response_status = resp_dict['status']
if response_status == requests.codes.unauthorized:
LOG.exception(_("Unable to login. Invalid credentials passed."
"for: %s"), self.service_uri)
LOG.exception(_LE("Unable to login. Invalid credentials passed."
"for: %s"),
self.service_uri)
raise NCCException(NCCException.RESPONSE_ERROR)
if not self._is_valid_response(response_status):
msg = (_("Failed %(method)s operation on %(url)s "
"status code: %(response_status)s") %
{"method": method,
"url": resource_uri,
"response_status": response_status})
LOG.exception(msg)
LOG.exception(_LE("Failed %(method)s operation on %(url)s "
"status code: %(response_status)s"),
{"method": method,
"url": resource_uri,
"response_status": response_status})
raise NCCException(NCCException.RESPONSE_ERROR)
return response_status, resp_dict

View File

@ -16,6 +16,7 @@ from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.db.loadbalancer import loadbalancer_db
from neutron.openstack.common.gettextutils import _LI
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
@ -72,8 +73,7 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
network_info = self._get_vip_network_info(context, vip)
ncc_vip = self._prepare_vip_for_creation(vip)
ncc_vip = dict(ncc_vip.items() + network_info.items())
msg = _("NetScaler driver vip creation: %s") % repr(ncc_vip)
LOG.debug(msg)
LOG.debug("NetScaler driver vip creation: %r", ncc_vip)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, VIPS_RESOURCE,
@ -87,9 +87,8 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
"""Update a vip on a NetScaler device."""
update_vip = self._prepare_vip_for_update(vip)
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
msg = (_("NetScaler driver vip %(vip_id)s update: %(vip_obj)s") %
{"vip_id": vip["id"], "vip_obj": repr(vip)})
LOG.debug(msg)
LOG.debug("NetScaler driver vip %(vip_id)s update: %(vip_obj)r",
{"vip_id": vip["id"], "vip_obj": vip})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
@ -102,8 +101,7 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
def delete_vip(self, context, vip):
"""Delete a vip on a NetScaler device."""
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
msg = _("NetScaler driver vip removal: %s") % vip["id"]
LOG.debug(msg)
LOG.debug("NetScaler driver vip removal: %s", vip["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
@ -123,8 +121,7 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
network_info)
ncc_pool = self._prepare_pool_for_creation(pool)
ncc_pool = dict(ncc_pool.items() + network_info.items())
msg = _("NetScaler driver pool creation: %s") % repr(ncc_pool)
LOG.debug(msg)
LOG.debug("NetScaler driver pool creation: %r", ncc_pool)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, POOLS_RESOURCE,
@ -138,9 +135,8 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
"""Update a pool on a NetScaler device."""
ncc_pool = self._prepare_pool_for_update(pool)
resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"])
msg = (_("NetScaler driver pool %(pool_id)s update: %(pool_obj)s") %
{"pool_id": old_pool["id"], "pool_obj": repr(ncc_pool)})
LOG.debug(msg)
LOG.debug("NetScaler driver pool %(pool_id)s update: %(pool_obj)r",
{"pool_id": old_pool["id"], "pool_obj": ncc_pool})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
@ -153,8 +149,7 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
def delete_pool(self, context, pool):
"""Delete a pool on a NetScaler device."""
resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id'])
msg = _("NetScaler driver pool removal: %s") % pool["id"]
LOG.debug(msg)
LOG.debug("NetScaler driver pool removal: %s", pool["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
@ -170,9 +165,8 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
def create_member(self, context, member):
"""Create a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_creation(member)
msg = (_("NetScaler driver poolmember creation: %s") %
repr(ncc_member))
LOG.info(msg)
LOG.info(_LI("NetScaler driver poolmember creation: %r"),
ncc_member)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id,
@ -188,11 +182,10 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
"""Update a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_update(member)
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"])
msg = (_("NetScaler driver poolmember %(member_id)s update:"
" %(member_obj)s") %
{"member_id": old_member["id"],
"member_obj": repr(ncc_member)})
LOG.debug(msg)
LOG.debug("NetScaler driver poolmember %(member_id)s update: "
"%(member_obj)r",
{"member_id": old_member["id"],
"member_obj": ncc_member})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
@ -205,9 +198,7 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
def delete_member(self, context, member):
"""Delete a pool member on a NetScaler device."""
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id'])
msg = (_("NetScaler driver poolmember removal: %s") %
member["id"])
LOG.debug(msg)
LOG.debug("NetScaler driver poolmember removal: %s", member["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
@ -223,11 +214,9 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
pool_id)
resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE)
msg = (_("NetScaler driver healthmonitor creation for pool %(pool_id)s"
": %(monitor_obj)s") %
{"pool_id": pool_id,
"monitor_obj": repr(ncc_hm)})
LOG.debug(msg)
LOG.debug("NetScaler driver healthmonitor creation for pool "
"%(pool_id)s: %(monitor_obj)r",
{"pool_id": pool_id, "monitor_obj": ncc_hm})
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, resource_path,
@ -246,11 +235,10 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
ncc_hm = self._prepare_healthmonitor_for_update(health_monitor)
resource_path = "%s/%s" % (MONITORS_RESOURCE,
old_health_monitor["id"])
msg = (_("NetScaler driver healthmonitor %(monitor_id)s update: "
"%(monitor_obj)s") %
{"monitor_id": old_health_monitor["id"],
"monitor_obj": repr(ncc_hm)})
LOG.debug(msg)
LOG.debug("NetScaler driver healthmonitor %(monitor_id)s update: "
"%(monitor_obj)r",
{"monitor_id": old_health_monitor["id"],
"monitor_obj": ncc_hm})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
@ -267,11 +255,10 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE,
health_monitor["id"])
msg = (_("NetScaler driver healthmonitor %(monitor_id)s"
"removal for pool %(pool_id)s") %
{"monitor_id": health_monitor["id"],
"pool_id": pool_id})
LOG.debug(msg)
LOG.debug("NetScaler driver healthmonitor %(monitor_id)s"
"removal for pool %(pool_id)s",
{"monitor_id": health_monitor["id"],
"pool_id": pool_id})
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
@ -287,8 +274,7 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
def stats(self, context, pool_id):
"""Retrieve pool statistics from the NetScaler device."""
resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id)
msg = _("NetScaler driver pool stats retrieval: %s") % pool_id
LOG.debug(msg)
LOG.debug("NetScaler driver pool stats retrieval: %s", pool_id)
try:
stats = self.client.retrieve_resource(context.tenant_id,
resource_path)[1]
@ -412,12 +398,11 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
device_id = '_lb-snatport-' + subnet_id
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
msg = (_("Filtering ports based on network_id=%(network_id)s, "
"tenant_id=%(tenant_id)s, device_id=%(device_id)s") %
{'network_id': network_id,
'tenant_id': tenant_id,
'device_id': device_id})
LOG.debug(msg)
LOG.debug("Filtering ports based on network_id=%(network_id)s, "
"tenant_id=%(tenant_id)s, device_id=%(device_id)s",
{'network_id': network_id,
'tenant_id': tenant_id,
'device_id': device_id})
filter_dict = {
'network_id': [network_id],
'tenant_id': [tenant_id],
@ -427,11 +412,10 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
ports = self.plugin._core_plugin.get_ports(context,
filters=filter_dict)
if ports:
msg = _("Found an existing SNAT port for subnet %s") % subnet_id
LOG.info(msg)
LOG.info(_LI("Found an existing SNAT port for subnet %s"),
subnet_id)
return ports[0]
msg = _("Found no SNAT ports for subnet %s") % subnet_id
LOG.info(msg)
LOG.info(_LI("Found no SNAT ports for subnet %s"), subnet_id)
def _create_snatport_for_subnet(self, context, tenant_id, subnet_id,
ip_address):
@ -451,31 +435,27 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
}
port = self.plugin._core_plugin.create_port(context,
{'port': port_data})
msg = _("Created SNAT port: %s") % repr(port)
LOG.info(msg)
LOG.info(_LI("Created SNAT port: %r"), port)
return port
def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if port:
self.plugin._core_plugin.delete_port(context, port['id'])
msg = _("Removed SNAT port: %s") % repr(port)
LOG.info(msg)
LOG.info(_LI("Removed SNAT port: %r"), port)
def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id,
subnet_id, network_info):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if not port:
msg = _("No SNAT port found for subnet %s."
" Creating one...") % subnet_id
LOG.info(msg)
LOG.info(_LI("No SNAT port found for subnet %s. Creating one..."),
subnet_id)
port = self._create_snatport_for_subnet(context, tenant_id,
subnet_id,
ip_address=None)
network_info['port_id'] = port['id']
network_info['snat_ip'] = port['fixed_ips'][0]['ip_address']
msg = _("SNAT port: %s") % repr(port)
LOG.info(msg)
LOG.info(_LI("SNAT port: %r"), port)
def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id,
subnet_id):
@ -484,6 +464,6 @@ class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
#No pools left on the old subnet.
#We can remove the SNAT port/ipaddress
self._remove_snatport_for_subnet(context, tenant_id, subnet_id)
msg = _("Removing SNAT port for subnet %s "
"as this is the last pool using it...") % subnet_id
LOG.info(msg)
LOG.info(_LI("Removing SNAT port for subnet %s "
"as this is the last pool using it..."),
subnet_id)

View File

@ -33,6 +33,7 @@ from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
@ -227,9 +228,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
self.l4_action_name, ext_vip, context)
finally:
LOG.debug(_('vip: %(vip)s, '
'extended_vip: %(extended_vip)s, '
'service_name: %(service_name)s, '),
LOG.debug('vip: %(vip)s, extended_vip: %(extended_vip)s, '
'service_name: %(service_name)s, ',
log_info)
def update_vip(self, context, old_vip, vip):
@ -261,16 +261,15 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
ports = self.plugin._core_plugin.get_ports(context,
filters=port_filter)
if ports:
LOG.debug(_('Retrieved pip nport: %(port)r for '
'vip: %(vip)s'), {'port': ports[0],
'vip': vip['id']})
LOG.debug('Retrieved pip nport: %(port)r for vip: %(vip)s',
{'port': ports[0], 'vip': vip['id']})
delete_pip_nport_function = self._get_delete_pip_nports(
context, ports)
else:
delete_pip_nport_function = None
LOG.debug(_('Found no pip nports associated with '
'vip: %s'), vip['id'])
LOG.debug('Found no pip nports associated with vip: %s',
vip['id'])
# removing the WF will cause deletion of the configuration from the
# device
@ -278,8 +277,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
except r_exc.RESTRequestFailure:
pool_id = ext_vip['pool_id']
LOG.exception(_('Failed to remove workflow %s. '
'Going to set vip to ERROR status'),
LOG.exception(_LE('Failed to remove workflow %s. '
'Going to set vip to ERROR status'),
pool_id)
self.plugin.update_status(context, lb_db.Vip, ids['vip'],
@ -292,11 +291,11 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
try:
self.plugin._core_plugin.delete_port(
context, port['id'])
LOG.debug(_('pip nport id: %s'), port['id'])
LOG.debug('pip nport id: %s', port['id'])
except Exception as exception:
# stop exception propagation, nport may have
# been deleted by other means
LOG.warning(_('pip nport delete failed: %r'),
LOG.warning(_LW('pip nport delete failed: %r'),
exception)
return _delete_pip_nports
@ -384,9 +383,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id,
"delete": delete, "vip_id": vip_id}
LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
'pool_id = %(pool_id)s delete = %(delete)s '
'vip_id = %(vip_id)s'),
LOG.debug('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
'pool_id = %(pool_id)s delete = %(delete)s '
'vip_id = %(vip_id)s',
debug_params)
if vip_id:
@ -415,7 +414,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
def _start_completion_handling_thread(self):
if not self.completion_handler_started:
LOG.info(_('Starting operation completion handling thread'))
LOG.info(_LI('Starting operation completion handling thread'))
self.completion_handler.start()
self.completion_handler_started = True
@ -445,7 +444,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
response = _rest_wrapper(self.rest_client.call('POST', resource,
{'parameters': params},
TEMPLATE_HEADER))
LOG.debug(_('_update_workflow response: %s '), response)
LOG.debug('_update_workflow response: %s ', response)
if action not in self.actions_to_skip:
ids = params.pop('__ids__', None)
@ -454,7 +453,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
lbaas_entity,
entity_id,
delete=delete)
LOG.debug(_('Pushing operation %s to the queue'), oper)
LOG.debug('Pushing operation %s to the queue', oper)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
@ -462,7 +461,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
def _remove_workflow(self, ids, context, post_remove_function):
wf_name = ids['pool']
LOG.debug(_('Remove the workflow %s') % wf_name)
LOG.debug('Remove the workflow %s' % wf_name)
resource = '/api/workflow/%s' % (wf_name)
rest_return = self.rest_client.call('DELETE', resource, None, None)
response = _rest_wrapper(rest_return, [204, 202, 404])
@ -470,12 +469,12 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
if post_remove_function:
try:
post_remove_function(True)
LOG.debug(_('Post-remove workflow function '
'%r completed'), post_remove_function)
LOG.debug('Post-remove workflow function %r completed',
post_remove_function)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Post-remove workflow function '
'%r failed'), post_remove_function)
LOG.exception(_LE('Post-remove workflow function '
'%r failed'), post_remove_function)
self.plugin._delete_db_vip(context, ids['vip'])
else:
oper = OperationAttributes(
@ -485,7 +484,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
ids['vip'],
delete=True,
post_op_function=post_remove_function)
LOG.debug(_('Pushing operation %s to the queue'), oper)
LOG.debug('Pushing operation %s to the queue', oper)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
@ -591,7 +590,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
resource,
params,
TEMPLATE_HEADER))
LOG.debug(_('create_workflow response: %s'), str(response))
LOG.debug('create_workflow response: %s', response)
def _verify_workflow_templates(self):
"""Verify the existence of workflows on vDirect server."""
@ -680,14 +679,14 @@ class vDirectRESTClient:
'sec_server': self.secondary_server,
'port': self.port,
'ssl': self.ssl}
LOG.debug(_('vDirectRESTClient:init server=%(server)s, '
'secondary server=%(sec_server)s, '
'port=%(port)d, '
'ssl=%(ssl)r'), debug_params)
LOG.debug('vDirectRESTClient:init server=%(server)s, '
'secondary server=%(sec_server)s, '
'port=%(port)d, ssl=%(ssl)r',
debug_params)
def _flip_servers(self):
LOG.warning(_('Fliping servers. Current is: %(server)s, '
'switching to %(secondary)s'),
LOG.warning(_LW('Fliping servers. Current is: %(server)s, '
'switching to %(secondary)s'),
{'server': self.server,
'secondary': self.secondary_server})
self.server, self.secondary_server = self.secondary_server, self.server
@ -699,19 +698,19 @@ class vDirectRESTClient:
headers, binary)
return resp
else:
LOG.exception(_('REST client is not able to recover '
'since only one vDirect server is '
LOG.exception(_LE('REST client is not able to recover '
'since only one vDirect server is '
'configured.'))
return -1, None, None, None
def call(self, action, resource, data, headers, binary=False):
resp = self._call(action, resource, data, headers, binary)
if resp[RESP_STATUS] == -1:
LOG.warning(_('vDirect server is not responding (%s).'),
LOG.warning(_LW('vDirect server is not responding (%s).'),
self.server)
return self._recover(action, resource, data, headers, binary)
elif resp[RESP_STATUS] in (301, 307):
LOG.warning(_('vDirect server is not active (%s).'),
LOG.warning(_LW('vDirect server is not active (%s).'),
self.server)
return self._recover(action, resource, data, headers, binary)
else:
@ -739,14 +738,14 @@ class vDirectRESTClient:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('vdirectRESTClient: Could not establish HTTPS '
LOG.error(_LE('vdirectRESTClient: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('vdirectRESTClient: Could not establish HTTP '
LOG.error(_LE('vdirectRESTClient: Could not establish HTTP '
'connection'))
return 0, None, None, None
@ -763,7 +762,7 @@ class vDirectRESTClient:
ret = (response.status, response.reason, respstr, respdata)
except Exception as e:
log_dict = {'action': action, 'e': e}
LOG.error(_('vdirectRESTClient: %(action)s failure, %(e)r'),
LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'),
log_dict)
ret = -1, None, None, None
conn.close()
@ -831,9 +830,9 @@ class OperationCompletionHandler(threading.Thread):
debug_data = {'oper': oper,
'sec_to_completion': sec_to_completion,
'success': success}
LOG.debug(_('Operation %(oper)s is completed after '
LOG.debug('Operation %(oper)s is completed after '
'%(sec_to_completion)d sec '
'with success status: %(success)s :'),
'with success status: %(success)s :',
debug_data)
db_status = None
if not success:
@ -843,7 +842,8 @@ class OperationCompletionHandler(threading.Thread):
else:
msg = "unknown"
error_params = {"operation": oper, "msg": msg}
LOG.error(_('Operation %(operation)s failed. Reason: %(msg)s'),
LOG.error(_LE('Operation %(operation)s failed. Reason: '
'%(msg)s'),
error_params)
db_status = constants.ERROR
else:
@ -870,12 +870,11 @@ class OperationCompletionHandler(threading.Thread):
if self.opers_to_handle_before_rest <= 0:
self.opers_to_handle_before_rest = self.queue.qsize() + 1
LOG.debug('Operation consumed from the queue: ' +
str(oper))
LOG.debug('Operation consumed from the queue: %s', oper)
# check the status - if oper is done: update the db ,
# else push the oper again to the queue
if not self.handle_operation_completion(oper):
LOG.debug(_('Operation %s is not completed yet..') % oper)
LOG.debug('Operation %s is not completed yet..', oper)
# Not completed - push to the queue again
self.queue.put_nowait(oper)
@ -899,15 +898,13 @@ class OperationCompletionHandler(threading.Thread):
log_data = {'func': oper.post_op_function, 'oper': oper}
try:
oper.post_op_function(success)
LOG.debug(_('Post-operation function '
'%(func)r completed '
'after operation %(oper)r'),
LOG.debug('Post-operation function %(func)r completed '
'after operation %(oper)r',
log_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Post-operation function '
'%(func)r failed '
'after operation %(oper)r'),
LOG.exception(_LE('Post-operation function %(func)r '
'failed after operation %(oper)r'),
log_data)
@ -946,7 +943,7 @@ def _update_vip_graph_status(plugin, oper, status):
ctx = context.get_admin_context(load_admin_roles=False)
LOG.debug(_('_update: %s '), oper)
LOG.debug('_update: %s ', oper)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin.update_pool_health_monitor(ctx,
oper.entity_id,
@ -986,7 +983,7 @@ def _update_vip_graph_status_cascade(plugin, ids, ctx, status):
def _remove_object_from_db(plugin, oper):
"""Remove a specific entity from db."""
LOG.debug(_('_remove_object_from_db %s'), str(oper))
LOG.debug('_remove_object_from_db %s', oper)
ctx = context.get_admin_context(load_admin_roles=False)
@ -1052,7 +1049,7 @@ def _translate_vip_object_graph(extended_vip, plugin, context):
return ids
trans_vip = {}
LOG.debug('Vip graph to be translated: ' + str(extended_vip))
LOG.debug('Vip graph to be translated: %s', extended_vip)
for vip_property in VIP_PROPERTIES:
trans_vip['vip_' + vip_property] = extended_vip.get(
vip_property, TRANSLATION_DEFAULTS.get(vip_property))
@ -1109,5 +1106,5 @@ def _translate_vip_object_graph(extended_vip, plugin, context):
trans_vip['__ids__'] = ids
if 'pip_address' in extended_vip:
trans_vip['pip_address'] = extended_vip['pip_address']
LOG.debug('Translated Vip graph: ' + str(trans_vip))
LOG.debug('Translated Vip graph: %s', trans_vip)
return trans_vip

View File

@ -20,6 +20,7 @@ from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
@ -75,9 +76,9 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated loadbalancer pools before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
LOG.exception(_LE("Delete associated loadbalancer pools before "
"removing providers %s"),
list(lost_providers))
raise SystemExit(1)
def _get_driver_for_provider(self, provider):
@ -183,7 +184,8 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
# that should not happen
# if it's still a case - something goes wrong
# log the error and mark the pool as ERROR
LOG.error(_('Failed to delete pool %s, putting it in ERROR state'),
LOG.error(_LE('Failed to delete pool %s, putting it in ERROR '
'state'),
id)
with excutils.save_and_reraise_exception():
self.update_status(context, ldb.Pool,