Merge "Remove translation of log messages"

This commit is contained in:
Zuul 2018-03-20 20:01:50 +00:00 committed by Gerrit Code Review
commit fb5c1a9779
86 changed files with 842 additions and 810 deletions

View File

@ -22,10 +22,6 @@ import logging
import dns.zone
from designate.i18n import _LI
from designate.i18n import _LE
logging.basicConfig()
LOG = logging.getLogger(__name__)
@ -170,14 +166,14 @@ class Extractor:
zone_object = dns.zone.from_file(zonefile,
allow_include=True)
except dns.zone.UnknownOrigin:
LOG.info(_LI('%(zonefile)s is missing $ORIGIN, '
'inserting %(name)s'),
LOG.info('%(zonefile)s is missing $ORIGIN, '
'inserting %(name)s',
{'zonefile': zonefile, 'name': name})
zone_object = dns.zone.from_file(zonefile,
allow_include=True,
origin=name)
except dns.zone.NoSOA:
LOG.error(_LE('%s has no SOA') % zonefile)
LOG.error('%s has no SOA', zonefile)
zones.append(Zone(zone_object))
return zones

View File

@ -36,9 +36,6 @@ from oslo_log import log as logging
from designate import utils
from designate import dnsutils
from designate.backend import agent_backend
from designate.i18n import _LW
from designate.i18n import _LE
from designate.i18n import _LI
import designate.backend.private_codes as pcodes
LOG = logging.getLogger(__name__)
@ -53,8 +50,7 @@ class RequestHandler(object):
master = {'host': raw_server[0], 'port': int(raw_server[1])}
self.masters.append(master)
LOG.info(_LI("Agent masters: %(masters)s"),
{'masters': self.masters})
LOG.info("Agent masters: %(masters)s", {'masters': self.masters})
self.allow_notify = CONF['service:agent'].allow_notify
self.transfer_source = CONF['service:agent'].transfer_source
@ -120,7 +116,7 @@ class RequestHandler(object):
# Does this warrant a warning?
# There is a race condition between checking if the zone exists
# and creating it.
LOG.warning(_LW("Not creating %(name)s, zone already exists"),
LOG.warning("Not creating %(name)s, zone already exists",
{'name': zone_name})
# Provide an authoritative answer
response.flags |= dns.flags.AA
@ -137,7 +133,7 @@ class RequestHandler(object):
except Exception as e:
# TODO(Federico) unknown exceptions should be logged with a full
# traceback. Same in the other methods.
LOG.error(_LE("Exception while creating zone %r"), e)
LOG.error("Exception while creating zone %r", e)
response.set_rcode(dns.rcode.from_text("SERVFAIL"))
return response
@ -168,8 +164,8 @@ class RequestHandler(object):
serial = self.backend.find_zone_serial(zone_name)
if serial is None:
LOG.warning(_LW("Refusing NOTIFY for %(name)s, doesn't exist") %
{'name': zone_name})
LOG.warning("Refusing NOTIFY for %(name)s, doesn't exist",
{'name': zone_name})
response.set_rcode(dns.rcode.from_text("REFUSED"))
return response
@ -219,8 +215,8 @@ class RequestHandler(object):
serial = self.backend.find_zone_serial(zone_name)
if serial is None:
LOG.warning(_LW("Not deleting %(name)s, zone doesn't exist") %
{'name': zone_name})
LOG.warning("Not deleting %(name)s, zone doesn't exist",
{'name': zone_name})
# Provide an authoritative answer
response.flags |= dns.flags.AA
return response
@ -246,8 +242,8 @@ class RequestHandler(object):
return True
if requester not in self.allow_notify:
LOG.warning(_LW("%(verb)s for %(name)s from %(server)s refused") %
{'verb': op, 'name': zone_name, 'server': requester})
LOG.warning("%(verb)s for %(name)s from %(server)s refused",
{'verb': op, 'name': zone_name, 'server': requester})
return False
return True

View File

@ -16,7 +16,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from stevedore import named
from designate.i18n import _LI
from designate.api.v2.controllers import errors
@ -39,7 +38,7 @@ class RootController(object):
for ext in self._mgr:
controller = self
path = ext.obj.get_path()
LOG.info(_LI("Registering an API extension at path %s"), path)
LOG.info("Registering an API extension at path %s", path)
for p in path.split('.')[:-1]:
if p != '':
controller = getattr(controller, p)

View File

@ -29,10 +29,6 @@ from designate import notifications
from designate import context
from designate import objects
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
from designate.i18n import _LW
from designate.i18n import _LE
from designate.i18n import _LC
LOG = logging.getLogger(__name__)
@ -46,7 +42,7 @@ def auth_pipeline_factory(loader, global_conf, **local_conf):
"""
pipeline = local_conf[cfg.CONF['service:api'].auth_strategy]
pipeline = pipeline.split()
LOG.info(_LI('Getting auth pipeline: %s'), pipeline[:-1])
LOG.info('Getting auth pipeline: %s', pipeline[:-1])
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
@ -116,7 +112,7 @@ class KeystoneContextMiddleware(ContextMiddleware):
def __init__(self, application):
super(KeystoneContextMiddleware, self).__init__(application)
LOG.info(_LI('Starting designate keystonecontext middleware'))
LOG.info('Starting designate keystonecontext middleware')
def process_request(self, request):
headers = request.headers
@ -156,7 +152,7 @@ class NoAuthContextMiddleware(ContextMiddleware):
def __init__(self, application):
super(NoAuthContextMiddleware, self).__init__(application)
LOG.info(_LI('Starting designate noauthcontext middleware'))
LOG.info('Starting designate noauthcontext middleware')
def process_request(self, request):
headers = request.headers
@ -174,8 +170,8 @@ class TestContextMiddleware(ContextMiddleware):
def __init__(self, application, tenant_id=None, user_id=None):
super(TestContextMiddleware, self).__init__(application)
LOG.critical(_LC('Starting designate testcontext middleware'))
LOG.critical(_LC('**** DO NOT USE IN PRODUCTION ****'))
LOG.critical('Starting designate testcontext middleware')
LOG.critical('**** DO NOT USE IN PRODUCTION ****')
self.default_tenant_id = tenant_id
self.default_user_id = user_id
@ -197,7 +193,7 @@ class MaintenanceMiddleware(base.Middleware):
def __init__(self, application):
super(MaintenanceMiddleware, self).__init__(application)
LOG.info(_LI('Starting designate maintenance middleware'))
LOG.info('Starting designate maintenance middleware')
self.enabled = cfg.CONF['service:api'].maintenance_mode
self.role = cfg.CONF['service:api'].maintenance_mode_role
@ -211,7 +207,7 @@ class MaintenanceMiddleware(base.Middleware):
# If the caller has the bypass role, let them through
if ('context' in request.environ
and self.role in request.environ['context'].roles):
LOG.warning(_LW('Request authorized to bypass maintenance mode'))
LOG.warning('Request authorized to bypass maintenance mode')
return None
# Otherwise, reject the request with a 503 Service Unavailable
@ -231,7 +227,7 @@ class FaultWrapperMiddleware(base.Middleware):
def __init__(self, application):
super(FaultWrapperMiddleware, self).__init__(application)
LOG.info(_LI('Starting designate faultwrapper middleware'))
LOG.info('Starting designate faultwrapper middleware')
@webob.dec.wsgify
def __call__(self, request):
@ -302,7 +298,7 @@ class FaultWrapperMiddleware(base.Middleware):
else:
# TODO(ekarlso): Remove after verifying that there's actually a
# context always set
LOG.error(_LE('Missing context in request, please check.'))
LOG.error('Missing context in request, please check.')
return flask.Response(status=status, headers=headers,
response=json.dumps(response))
@ -313,7 +309,7 @@ class ValidationErrorMiddleware(base.Middleware):
def __init__(self, application):
super(ValidationErrorMiddleware, self).__init__(application)
LOG.info(_LI('Starting designate validation middleware'))
LOG.info('Starting designate validation middleware')
@webob.dec.wsgify
def __call__(self, request):
@ -353,7 +349,7 @@ class ValidationErrorMiddleware(base.Middleware):
else:
# TODO(ekarlso): Remove after verifying that there's actually a
# context always set
LOG.error(_LE('Missing context in request, please check.'))
LOG.error('Missing context in request, please check.')
return flask.Response(status=exception.error_code, headers=headers,
response=json.dumps(response))
@ -377,7 +373,7 @@ class SSLMiddleware(base.Middleware):
@removals.remove
def __init__(self, application):
super(SSLMiddleware, self).__init__(application)
LOG.info(_LI('Starting designate ssl middleware'))
LOG.info('Starting designate ssl middleware')
self.secure_proxy_ssl_header = 'HTTP_{0}'.format(
cfg.CONF['service:api'].secure_proxy_ssl_header.upper().
replace('-', '_'))

View File

@ -17,7 +17,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from paste import deploy
from designate.i18n import _LI
from designate import exceptions
from designate import utils
from designate import service
@ -61,6 +60,6 @@ class Service(service.WSGIService, service.Service):
msg = 'Unable to determine appropriate api-paste-config file'
raise exceptions.ConfigurationError(msg)
LOG.info(_LI('Using api-paste-config found at: %s'), config_paths[0])
LOG.info('Using api-paste-config found at: %s', config_paths[0])
return deploy.loadapp("config:%s" % config_paths[0], name='osapi_dns')

View File

@ -21,7 +21,6 @@ from designate import utils
from designate.api.v2.controllers import rest
from designate.objects import Blacklist
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -40,7 +39,7 @@ class BlacklistsController(rest.RestController):
blacklist = self.central_api.get_blacklist(context, blacklist_id)
LOG.info(_LI("Retrieved %(blacklist)s"), {'blacklist': blacklist})
LOG.info("Retrieved %(blacklist)s", {'blacklist': blacklist})
return DesignateAdapter.render('API_v2', blacklist, request=request)
@ -62,7 +61,7 @@ class BlacklistsController(rest.RestController):
blacklists = self.central_api.find_blacklists(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(blacklists)s"), {'blacklists': blacklists})
LOG.info("Retrieved %(blacklists)s", {'blacklists': blacklists})
return DesignateAdapter.render('API_v2', blacklists, request=request)
@ -82,7 +81,7 @@ class BlacklistsController(rest.RestController):
blacklist = self.central_api.create_blacklist(
context, blacklist)
LOG.info(_LI("Created %(blacklist)s"), {'blacklist': blacklist})
LOG.info("Created %(blacklist)s", {'blacklist': blacklist})
response.status_int = 201
@ -116,7 +115,7 @@ class BlacklistsController(rest.RestController):
blacklist = self.central_api.update_blacklist(context, blacklist)
LOG.info(_LI("Updated %(blacklist)s"), {'blacklist': blacklist})
LOG.info("Updated %(blacklist)s", {'blacklist': blacklist})
response.status_int = 200
@ -132,7 +131,7 @@ class BlacklistsController(rest.RestController):
blacklist = self.central_api.delete_blacklist(context, blacklist_id)
LOG.info(_LI("Deleted %(blacklist)s"), {'blacklist': blacklist})
LOG.info("Deleted %(blacklist)s", {'blacklist': blacklist})
response.status_int = 204

View File

@ -22,7 +22,6 @@ from designate import exceptions
from designate import objects
from designate.objects.adapters import DesignateAdapter
from designate.api.v2.controllers import rest
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -52,7 +51,7 @@ class FloatingIPController(rest.RestController):
fips = self.central_api.list_floatingips(context)
LOG.info(_LI("Retrieved %(fips)s"), {'fips': fips})
LOG.info("Retrieved %(fips)s", {'fips': fips})
return DesignateAdapter.render('API_v2', fips, request=request)
@ -80,7 +79,7 @@ class FloatingIPController(rest.RestController):
fip.validate()
LOG.info(_LI("Updated %(fip)s"), {'fip': fip})
LOG.info("Updated %(fip)s", {'fip': fip})
fip = self.central_api.update_floatingip(context, region, id_, fip)
@ -101,6 +100,6 @@ class FloatingIPController(rest.RestController):
fip = self.central_api.get_floatingip(context, region, id_)
LOG.info(_LI("Retrieved %(fip)s"), {'fip': fip})
LOG.info("Retrieved %(fip)s", {'fip': fip})
return DesignateAdapter.render('API_v2', fip, request=request)

View File

@ -16,11 +16,9 @@ import pecan
from oslo_log import log as logging
from designate import utils
from designate.i18n import _LW
from designate.api.v2.controllers import rest
from designate.objects import Pool
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -38,7 +36,7 @@ class PoolsController(rest.RestController):
pool = self.central_api.get_pool(context, pool_id)
LOG.info(_LI("Retrieved %(pool)s"), {'pool': pool})
LOG.info("Retrieved %(pool)s", {'pool': pool})
return DesignateAdapter.render('API_v2', pool, request=request)
@ -60,7 +58,7 @@ class PoolsController(rest.RestController):
pools = self.central_api.find_pools(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(pools)s"), {'pools': pools})
LOG.info("Retrieved %(pools)s", {'pools': pools})
return DesignateAdapter.render('API_v2', pools, request=request)
@ -68,9 +66,9 @@ class PoolsController(rest.RestController):
def post_all(self):
"""Create a Pool"""
LOG.warning(_LW("Use of this API Method is DEPRECATED. This will have "
"unforeseen side affects when used with the "
"designate-manage pool commands"))
LOG.warning("Use of this API Method is DEPRECATED. This will have "
"unforeseen side affects when used with the "
"designate-manage pool commands")
request = pecan.request
response = pecan.response
@ -84,7 +82,7 @@ class PoolsController(rest.RestController):
# Create the pool
pool = self.central_api.create_pool(context, pool)
LOG.info(_LI("Created %(pool)s"), {'pool': pool})
LOG.info("Created %(pool)s", {'pool': pool})
pool = DesignateAdapter.render('API_v2', pool, request=request)
response.status_int = 201
@ -100,9 +98,9 @@ class PoolsController(rest.RestController):
def patch_one(self, pool_id):
"""Update the specific pool"""
LOG.warning(_LW("Use of this API Method is DEPRECATED. This will have "
"unforeseen side affects when used with the "
"designate-manage pool commands"))
LOG.warning("Use of this API Method is DEPRECATED. This will have "
"unforeseen side affects when used with the "
"designate-manage pool commands")
request = pecan.request
context = request.environ['context']
@ -121,7 +119,7 @@ class PoolsController(rest.RestController):
pool = self.central_api.update_pool(context, pool)
LOG.info(_LI("Updated %(pool)s"), {'pool': pool})
LOG.info("Updated %(pool)s", {'pool': pool})
response.status_int = 202
@ -132,9 +130,9 @@ class PoolsController(rest.RestController):
def delete_one(self, pool_id):
"""Delete the specific pool"""
LOG.warning(_LW("Use of this API Method is DEPRECATED. This will have "
"unforeseen side affects when used with the "
"designate-manage pool commands"))
LOG.warning("Use of this API Method is DEPRECATED. This will have "
"unforeseen side affects when used with the "
"designate-manage pool commands")
request = pecan.request
response = pecan.response
@ -142,7 +140,7 @@ class PoolsController(rest.RestController):
pool = self.central_api.delete_pool(context, pool_id)
LOG.info(_LI("Deleted %(pool)s"), {'pool': pool})
LOG.info("Deleted %(pool)s", {'pool': pool})
response.status_int = 204

View File

@ -20,7 +20,6 @@ from designate import utils
from designate.api.v2.controllers import common
from designate.api.v2.controllers import rest
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -38,7 +37,7 @@ class RecordSetsViewController(rest.RestController):
rrset = self.central_api.get_recordset(context, None, recordset_id)
LOG.info(_LI("Retrieved %(recordset)s"), {'recordset': rrset})
LOG.info("Retrieved %(recordset)s", {'recordset': rrset})
canonical_loc = common.get_rrset_canonical_location(request,
rrset.zone_id,
@ -53,6 +52,6 @@ class RecordSetsViewController(rest.RestController):
recordsets = common.retrieve_matched_rrsets(context, self, None,
**params)
LOG.info(_LI("Retrieved %(recordsets)s"), {'recordsets': recordsets})
LOG.info("Retrieved %(recordsets)s", {'recordsets': recordsets})
return DesignateAdapter.render('API_v2', recordsets, request=request)

View File

@ -33,7 +33,6 @@ import pecan.routing
from designate import exceptions
from designate.central import rpcapi as central_rpcapi
from designate.pool_manager import rpcapi as pool_mgr_rpcapi
from designate.i18n import _
class RestController(pecan.rest.RestController):

View File

@ -19,8 +19,6 @@ from designate import utils
from designate.api.v2.controllers import rest
from designate.objects import Tld
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -38,7 +36,7 @@ class TldsController(rest.RestController):
tld = self.central_api.get_tld(context, tld_id)
LOG.info(_LI("Retrieved %(tld)s"), {'tld': tld})
LOG.info("Retrieved %(tld)s", {'tld': tld})
return DesignateAdapter.render('API_v2', tld, request=request)
@ -60,7 +58,7 @@ class TldsController(rest.RestController):
tlds = self.central_api.find_tlds(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(tlds)s"), {'tlds': tlds})
LOG.info("Retrieved %(tlds)s", {'tlds': tlds})
return DesignateAdapter.render('API_v2', tlds, request=request)
@ -79,7 +77,7 @@ class TldsController(rest.RestController):
# Create the tld
tld = self.central_api.create_tld(context, tld)
LOG.info(_LI("Created %(tld)s"), {'tld': tld})
LOG.info("Created %(tld)s", {'tld': tld})
response.status_int = 201
@ -111,7 +109,7 @@ class TldsController(rest.RestController):
tld = self.central_api.update_tld(context, tld)
LOG.info(_LI("Updated %(tld)s"), {'tld': tld})
LOG.info("Updated %(tld)s", {'tld': tld})
response.status_int = 200
@ -127,7 +125,7 @@ class TldsController(rest.RestController):
tld = self.central_api.delete_tld(context, tld_id)
LOG.info(_LI("Deleted %(tld)s"), {'tld': tld})
LOG.info("Deleted %(tld)s", {'tld': tld})
response.status_int = 204

View File

@ -21,8 +21,6 @@ from designate import utils
from designate.api.v2.controllers import rest
from designate.objects import TsigKey
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -40,7 +38,7 @@ class TsigKeysController(rest.RestController):
tsigkey = self.central_api.get_tsigkey(context, tsigkey_id)
LOG.info(_LI("Retrieved %(tsigkey)s"), {'tsigkey': tsigkey})
LOG.info("Retrieved %(tsigkey)s", {'tsigkey': tsigkey})
return DesignateAdapter.render('API_v2', tsigkey, request=request)
@ -62,7 +60,7 @@ class TsigKeysController(rest.RestController):
tsigkeys = self.central_api.find_tsigkeys(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(tsigkeys)s"), {'tsigkeys': tsigkeys})
LOG.info("Retrieved %(tsigkeys)s", {'tsigkeys': tsigkeys})
return DesignateAdapter.render('API_v2', tsigkeys, request=request)
@ -82,7 +80,7 @@ class TsigKeysController(rest.RestController):
tsigkey = self.central_api.create_tsigkey(
context, tsigkey)
LOG.info(_LI("Created %(tsigkey)s"), {'tsigkey': tsigkey})
LOG.info("Created %(tsigkey)s", {'tsigkey': tsigkey})
tsigkey = DesignateAdapter.render('API_v2', tsigkey, request=request)
@ -115,7 +113,7 @@ class TsigKeysController(rest.RestController):
# Update and persist the resource
tsigkey = self.central_api.update_tsigkey(context, tsigkey)
LOG.info(_LI("Updated %(tsigkey)s"), {'tsigkey': tsigkey})
LOG.info("Updated %(tsigkey)s", {'tsigkey': tsigkey})
response.status_int = 200
@ -131,7 +129,7 @@ class TsigKeysController(rest.RestController):
tsigkey = self.central_api.delete_tsigkey(context, tsigkey_id)
LOG.info(_LI("Deleted %(tsigkey)s"), {'tsigkey': tsigkey})
LOG.info("Deleted %(tsigkey)s", {'tsigkey': tsigkey})
response.status_int = 204

View File

@ -25,8 +25,6 @@ from designate.api.v2.controllers.zones import tasks
from designate.api.v2.controllers.zones import nameservers
from designate import objects
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
CONF = cfg.CONF
@ -54,7 +52,7 @@ class ZonesController(rest.RestController):
zone = self.central_api.get_zone(context, zone_id)
LOG.info(_LI("Retrieved %(zone)s"), {'zone': zone})
LOG.info("Retrieved %(zone)s", {'zone': zone})
return DesignateAdapter.render(
'API_v2',
@ -80,7 +78,7 @@ class ZonesController(rest.RestController):
zones = self.central_api.find_zones(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(zones)s"), {'zones': zones})
LOG.info("Retrieved %(zones)s", {'zones': zones})
return DesignateAdapter.render('API_v2', zones, request=request)
@ -107,7 +105,7 @@ class ZonesController(rest.RestController):
# Create the zone
zone = self.central_api.create_zone(context, zone)
LOG.info(_LI("Created %(zone)s"), {'zone': zone})
LOG.info("Created %(zone)s", {'zone': zone})
# Prepare the response headers
# If the zone has been created asynchronously
@ -178,7 +176,7 @@ class ZonesController(rest.RestController):
zone = self.central_api.update_zone(
context, zone, increment_serial=increment_serial)
LOG.info(_LI("Updated %(zone)s"), {'zone': zone})
LOG.info("Updated %(zone)s", {'zone': zone})
if zone.status == 'PENDING':
response.status_int = 202
@ -198,6 +196,6 @@ class ZonesController(rest.RestController):
zone = self.central_api.delete_zone(context, zone_id)
response.status_int = 202
LOG.info(_LI("Deleted %(zone)s"), {'zone': zone})
LOG.info("Deleted %(zone)s", {'zone': zone})
return DesignateAdapter.render('API_v2', zone, request=request)

View File

@ -18,8 +18,6 @@ from oslo_log import log as logging
from designate.api.v2.controllers import rest
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -37,7 +35,7 @@ class NameServersController(rest.RestController):
ns_records = self.central_api.get_zone_ns_records(context, zone_id)
LOG.info(_LI("Created %(ns_records)s"), {'ns_records': ns_records})
LOG.info("Created %(ns_records)s", {'ns_records': ns_records})
return {
"nameservers": DesignateAdapter.render(

View File

@ -17,8 +17,6 @@ from oslo_log import log as logging
from designate import utils
from designate.api.v2.controllers import rest
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -40,7 +38,7 @@ class AbandonController(rest.RestController):
zone = self.central_api.delete_zone(context, zone_id)
if zone.deleted_at:
response.status_int = 204
LOG.info(_LI("Abandoned %(zone)s"), {'zone': zone})
LOG.info("Abandoned %(zone)s", {'zone': zone})
else:
response.status_int = 500

View File

@ -21,8 +21,6 @@ from designate import policy
from designate import utils
from designate.api.v2.controllers import rest
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -61,7 +59,7 @@ class ZoneExportCreateController(rest.RestController):
context, zone_id)
response.status_int = 202
LOG.info(_LI("Created %(zone_export)s"), {'zone_export': zone_export})
LOG.info("Created %(zone_export)s", {'zone_export': zone_export})
zone_export = DesignateAdapter.render(
'API_v2', zone_export, request=request)
@ -86,7 +84,7 @@ class ZoneExportsController(rest.RestController):
zone_export = self.central_api.get_zone_export(context, export_id)
LOG.info(_LI("Retrieved %(export)s"), {'export': zone_export})
LOG.info("Retrieved %(export)s", {'export': zone_export})
return DesignateAdapter.render(
'API_v2',
@ -110,7 +108,7 @@ class ZoneExportsController(rest.RestController):
zone_exports = self.central_api.find_zone_exports(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(exports)s"),
LOG.info("Retrieved %(exports)s",
{'exports': zone_exports})
return DesignateAdapter.render(
@ -131,7 +129,7 @@ class ZoneExportsController(rest.RestController):
zone_export = self.central_api.delete_zone_export(
context, zone_export_id)
LOG.info(_LI("Deleted %(zone_export)s"), {'zone_export': zone_export})
LOG.info("Deleted %(zone_export)s", {'zone_export': zone_export})
response.status_int = 204

View File

@ -21,7 +21,6 @@ from designate import exceptions
from designate import utils
from designate.api.v2.controllers import rest
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -41,7 +40,7 @@ class ZoneImportController(rest.RestController):
zone_import = self.central_api.get_zone_import(
context, import_id)
LOG.info(_LI("Retrieved %(import)s"), {'import': zone_import})
LOG.info("Retrieved %(import)s", {'import': zone_import})
return DesignateAdapter.render(
'API_v2',
@ -65,8 +64,7 @@ class ZoneImportController(rest.RestController):
zone_imports = self.central_api.find_zone_imports(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(imports)s"),
{'imports': zone_imports})
LOG.info("Retrieved %(imports)s", {'imports': zone_imports})
return DesignateAdapter.render(
'API_v2',
@ -93,7 +91,7 @@ class ZoneImportController(rest.RestController):
context, body)
response.status_int = 202
LOG.info(_LI("Created %(zone_import)s"), {'zone_import': zone_import})
LOG.info("Created %(zone_import)s", {'zone_import': zone_import})
zone_import = DesignateAdapter.render(
'API_v2', zone_import, request=request)
@ -115,7 +113,7 @@ class ZoneImportController(rest.RestController):
zone_import = self.central_api.delete_zone_import(
context, zone_import_id)
LOG.info(_LI("Deleted %(zone_import)s"), {'zone_import': zone_import})
LOG.info("Deleted %(zone_import)s", {'zone_import': zone_import})
response.status_int = 204

View File

@ -20,8 +20,6 @@ from designate import utils
from designate.api.v2.controllers import rest
from designate.objects import ZoneTransferAccept
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -41,7 +39,7 @@ class TransferAcceptsController(rest.RestController):
transfer_accepts = self.central_api.get_zone_transfer_accept(
context, transfer_accept_id)
LOG.info(_LI("Retrieved %(transfer_accepts)s"),
LOG.info("Retrieved %(transfer_accepts)s",
{'transfer_accepts': transfer_accepts})
return DesignateAdapter.render(
@ -63,7 +61,7 @@ class TransferAcceptsController(rest.RestController):
zone_transfer_accepts = self.central_api.find_zone_transfer_accepts(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(zone_transfer_accepts)s"),
LOG.info("Retrieved %(zone_transfer_accepts)s",
{'zone_transfer_accepts': zone_transfer_accepts})
return DesignateAdapter.render(
@ -90,7 +88,7 @@ class TransferAcceptsController(rest.RestController):
context, zone_transfer_accept)
response.status_int = 201
LOG.info(_LI("Created %(zone_transfer_accept)s"),
LOG.info("Created %(zone_transfer_accept)s",
{'zone_transfer_accept': zone_transfer_accept})
zone_transfer_accept = DesignateAdapter.render(

View File

@ -21,8 +21,6 @@ from designate import exceptions
from designate.api.v2.controllers import rest
from designate.objects import ZoneTransferRequest
from designate.objects.adapters import DesignateAdapter
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -42,7 +40,7 @@ class TransferRequestsController(rest.RestController):
transfer_request = self.central_api.get_zone_transfer_request(
context, zone_transfer_request_id)
LOG.info(_LI("Retrieved %(transfer_request)s"),
LOG.info("Retrieved %(transfer_request)s",
{'transfer_request': transfer_request})
return DesignateAdapter.render(
@ -67,7 +65,7 @@ class TransferRequestsController(rest.RestController):
zone_transfer_requests = self.central_api.find_zone_transfer_requests(
context, criterion, marker, limit, sort_key, sort_dir)
LOG.info(_LI("Retrieved %(zone_transfer_requests)s"),
LOG.info("Retrieved %(zone_transfer_requests)s",
{'zone_transfer_requests': zone_transfer_requests})
return DesignateAdapter.render(
@ -102,7 +100,7 @@ class TransferRequestsController(rest.RestController):
context, zone_transfer_request)
response.status_int = 201
LOG.info(_LI("Created %(zone_transfer_request)s"),
LOG.info("Created %(zone_transfer_request)s",
{'zone_transfer_request': zone_transfer_request})
zone_transfer_request = DesignateAdapter.render(
@ -137,7 +135,7 @@ class TransferRequestsController(rest.RestController):
zone_transfer_request = self.central_api.update_zone_transfer_request(
context, zone_transfer_request)
LOG.info(_LI("Updated %(zt_request)s"),
LOG.info("Updated %(zt_request)s",
{'zt_request': zone_transfer_request})
response.status_int = 200
@ -158,7 +156,7 @@ class TransferRequestsController(rest.RestController):
response.status_int = 204
LOG.info(_LI("Deleted %(zone_transfer_request)s"),
LOG.info("Deleted %(zone_transfer_request)s",
{'zone_transfer_request': zone_transfer_request})
# NOTE: This is a hack and a half.. But Pecan needs it.

View File

@ -18,8 +18,6 @@ from oslo_log import log as logging
from designate import utils
from designate.api.v2.controllers import rest
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -36,7 +34,7 @@ class XfrController(rest.RestController):
zone = self.central_api.get_zone(context, zone_id)
LOG.info(_LI("Triggered XFR for %(zone)s"), {'zone': zone})
LOG.info("Triggered XFR for %(zone)s", {'zone': zone})
self.central_api.xfr_zone(context, zone_id)
response.status_int = 202

View File

@ -36,7 +36,6 @@ import dns.opcode
from oslo_config import cfg
from oslo_log import log as logging
from designate.i18n import _LI, _LW
from designate.backend import base
from designate import exceptions
from designate.mdns import rpcapi as mdns_api
@ -119,8 +118,7 @@ class AgentPoolBackend(base.Backend):
retry = 0
response = None
LOG.info(_LI("Sending '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'."),
LOG.info("Sending '%(msg)s' for '%(zone)s' to '%(server)s:%(port)d'.",
{'msg': str(opcode),
'zone': zone_name, 'server': dest_ip,
'port': dest_port})
@ -128,18 +126,18 @@ class AgentPoolBackend(base.Backend):
dns_message, dest_ip, dest_port, timeout)
if isinstance(response, dns.exception.Timeout):
LOG.warning(_LW("Got Timeout while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'"),
LOG.warning("Got Timeout while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. "
"Timeout='%(timeout)d' seconds. Retry='%(retry)d'",
{'msg': str(opcode),
'zone': zone_name, 'server': dest_ip,
'port': dest_port, 'timeout': timeout,
'retry': retry})
response = None
elif isinstance(response, dns_query.BadResponse):
LOG.warning(_LW("Got BadResponse while trying to send '%(msg)s' "
"for '%(zone)s' to '%(server)s:%(port)d'. Timeout"
"='%(timeout)d' seconds. Retry='%(retry)d'"),
LOG.warning("Got BadResponse while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. "
"Timeout='%(timeout)d' seconds. Retry='%(retry)d'",
{'msg': str(opcode),
'zone': zone_name, 'server': dest_ip,
'port': dest_port, 'timeout': timeout,
@ -150,9 +148,9 @@ class AgentPoolBackend(base.Backend):
# authoritative answer
elif not (response.flags & dns.flags.AA) or dns.rcode.from_flags(
response.flags, response.ednsflags) != dns.rcode.NOERROR:
LOG.warning(_LW("Failed to get expected response while trying to "
"send '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'. Response message: %(resp)s"),
LOG.warning("Failed to get expected response while trying to "
"send '%(msg)s' for '%(zone)s' to "
"'%(server)s:%(port)d'. Response message: %(resp)s",
{'msg': str(opcode),
'zone': zone_name, 'server': dest_ip,
'port': dest_port, 'resp': str(response)})

View File

@ -21,7 +21,7 @@ LOG = logging.getLogger(__name__)
def get_backend(backend_driver, agent_service):
LOG.debug("Loading backend driver: %s" % backend_driver)
LOG.debug("Loading backend driver: %s", backend_driver)
cls = base.AgentBackend.get_driver(backend_driver)

View File

@ -24,8 +24,6 @@ from oslo_log import log as logging
from designate.backend.agent_backend import base
from designate import exceptions
from designate import utils
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
CFG_GROUP = 'backend:agent:bind9'
@ -62,10 +60,10 @@ class Bind9Backend(base.AgentBackend):
return [(bind9_group, bind9_opts)]
def start(self):
LOG.info(_LI("Started bind9 backend"))
LOG.info("Started bind9 backend")
def find_zone_serial(self, zone_name):
LOG.debug("Finding %s" % zone_name)
LOG.debug("Finding %s", zone_name)
resolver = dns.resolver.Resolver()
resolver.nameservers = [cfg.CONF[CFG_GROUP].query_destination]
try:
@ -75,11 +73,11 @@ class Bind9Backend(base.AgentBackend):
return rdata.serial
def create_zone(self, zone):
LOG.debug("Creating %s" % zone.origin.to_text())
LOG.debug("Creating %s", zone.origin.to_text())
self._sync_zone(zone, new_zone_flag=True)
def update_zone(self, zone):
LOG.debug("Updating %s" % zone.origin.to_text())
LOG.debug("Updating %s", zone.origin.to_text())
self._sync_zone(zone)
def delete_zone(self, zone_name):

View File

@ -25,8 +25,6 @@ from oslo_log import log as logging
from designate.backend.agent_backend import base
from designate import exceptions
from designate import utils
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
CFG_GROUP = 'backend:agent:denominator'
@ -117,13 +115,13 @@ class DenominatorBackend(base.AgentBackend):
return [(denominator_group, denominator_opts)]
def start(self):
LOG.info(_LI("Started Denominator backend"))
LOG.info("Started Denominator backend")
def stop(self):
LOG.info(_LI("Stopped Denominator backend"))
LOG.info("Stopped Denominator backend")
def find_zone_serial(self, zone_name):
LOG.debug("Finding %s" % zone_name)
LOG.debug("Finding %s", zone_name)
zone_name = zone_name.rstrip('.')
output = self.denominator.get_record(
@ -140,7 +138,7 @@ class DenominatorBackend(base.AgentBackend):
return rdata.serial
def create_zone(self, zone):
LOG.debug("Creating %s" % zone.origin.to_text())
LOG.debug("Creating %s", zone.origin.to_text())
zone_name = zone.origin.to_text(omit_final_dot=True).decode('utf-8')
# Use SOA TTL as zone default TTL
@ -172,7 +170,7 @@ class DenominatorBackend(base.AgentBackend):
data=data)
def update_zone(self, zone):
LOG.debug("Updating %s" % zone.origin)
LOG.debug("Updating %s", zone.origin)
zone_name = zone.origin.to_text(omit_final_dot=True).decode('utf-8')
soa_record = zone.find_rrset(zone.origin, dns.rdatatype.SOA)

View File

@ -53,8 +53,6 @@ from oslo_log import log as logging
from designate import exceptions
from designate import utils
from designate.backend.agent_backend import base
from designate.i18n import _LI
from designate.i18n import _LE
from designate.utils import execute
@ -116,7 +114,7 @@ def filter_exceptions(fn):
except exceptions.Backend:
raise
except Exception as e:
LOG.error(_LE("Unhandled exception %s"), str(e), exc_info=True)
LOG.error("Unhandled exception %s", e, exc_info=True)
raise exceptions.Backend(str(e))
return wrapper
@ -140,8 +138,8 @@ class DjbdnsBackend(base.AgentBackend):
self._resolver.nameservers = [cfg.CONF[CFG_GROUP].query_destination]
self._masters = [utils.split_host_port(ns)
for ns in cfg.CONF['service:agent'].masters]
LOG.info(_LI("Resolvers: %r"), self._resolver.nameservers)
LOG.info(_LI("AXFR masters: %r"), self._masters)
LOG.info("Resolvers: %r", self._resolver.nameservers)
LOG.info("AXFR masters: %r", self._masters)
if not self._masters:
raise exceptions.Backend("Missing agent AXFR masters")
@ -154,7 +152,7 @@ class DjbdnsBackend(base.AgentBackend):
# Usually /var/lib/djbdns/root/data.cdb
self._tinydns_cdb_filename = os.path.join(tinydns_root_dir, 'data.cdb')
LOG.info(_LI("data.cdb path: %r"), self._tinydns_cdb_filename)
LOG.info("data.cdb path: %r", self._tinydns_cdb_filename)
# Where the agent puts the zone datafiles,
# usually /var/lib/djbdns/datafiles
@ -179,7 +177,7 @@ class DjbdnsBackend(base.AgentBackend):
def start(self):
"""Start the backend"""
LOG.info(_LI("Started djbdns backend"))
LOG.info("Started djbdns backend")
def find_zone_serial(self, zone_name):
"""Query the local resolver for a zone
@ -204,7 +202,7 @@ class DjbdnsBackend(base.AgentBackend):
with open(zone_fn) as zf:
data_f.write(zf.read())
LOG.info(_LI("Loaded %d zone datafiles."), zone_cnt)
LOG.info("Loaded %d zone datafiles.", zone_cnt)
def _rebuild_data_cdb(self):
"""Rebuild data.cdb file from zone datafiles
@ -225,7 +223,7 @@ class DjbdnsBackend(base.AgentBackend):
self._concatenate_zone_datafiles(data_fn,
self._datafiles_path_glob)
# Generate the data.cdb file
LOG.info(_LI("Updating data.cdb"))
LOG.info("Updating data.cdb")
LOG.debug("Convert %s to %s", data_fn, tmp_cdb_fn)
try:
out, err = execute(
@ -233,10 +231,12 @@ class DjbdnsBackend(base.AgentBackend):
cwd=tmpdir
)
except ProcessExecutionError as e:
LOG.error(_LE("Failed to generate data.cdb"))
LOG.error(_LE("Command output: %(out)r Stderr: %(err)r"), {
'out': e.stdout, 'err': e.stderr
})
LOG.error("Failed to generate data.cdb")
LOG.error("Command output: %(out)r Stderr: %(err)r",
{
'out': e.stdout,
'err': e.stderr
})
raise exceptions.Backend("Failed to generate data.cdb")
LOG.debug("Move %s to %s", tmp_cdb_fn, self._tinydns_cdb_filename)
@ -244,7 +244,7 @@ class DjbdnsBackend(base.AgentBackend):
os.rename(tmp_cdb_fn, self._tinydns_cdb_filename)
except OSError:
os.remove(tmp_cdb_fn)
LOG.error(_LE("Unable to move data.cdb to %s"),
LOG.error("Unable to move data.cdb to %s",
self._tinydns_cdb_filename)
raise exceptions.Backend("Unable to move data.cdb")
@ -286,10 +286,12 @@ class DjbdnsBackend(base.AgentBackend):
try:
out, err = execute(*cmd)
except ProcessExecutionError as e:
LOG.error(_LE("Error executing AXFR as %r"), ' '.join(cmd))
LOG.error(_LE("Command output: %(out)r Stderr: %(err)r"), {
'out': e.stdout, 'err': e.stderr
})
LOG.error("Error executing AXFR as %r", ' '.join(cmd))
LOG.error("Command output: %(out)r Stderr: %(err)r",
{
'out': e.stdout,
'err': e.stderr
})
raise exceptions.Backend(str(e))
finally:
@ -348,7 +350,7 @@ class DjbdnsBackend(base.AgentBackend):
LOG.debug('Deleted Zone: %s', zone_name)
except OSError as e:
if os.errno.ENOENT == e.errno:
LOG.info(_LI("Zone datafile %s was already deleted"), zone_fn)
LOG.info("Zone datafile %s was already deleted", zone_fn)
return
raise

View File

@ -16,7 +16,6 @@
from oslo_log import log as logging
from designate.backend.agent_backend import base
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -25,20 +24,20 @@ class FakeBackend(base.AgentBackend):
__plugin_name__ = 'fake'
def start(self):
LOG.info(_LI("Started fake backend, Pool Manager will not work!"))
LOG.info("Started fake backend, Pool Manager will not work!")
def stop(self):
LOG.info(_LI("Stopped fake backend"))
LOG.info("Stopped fake backend")
def find_zone_serial(self, zone_name):
LOG.debug("Finding %s" % zone_name)
LOG.debug("Finding %s", zone_name)
return 0
def create_zone(self, zone):
LOG.debug("Creating %s" % zone.origin.to_text())
LOG.debug("Creating %s", zone.origin.to_text())
def update_zone(self, zone):
LOG.debug("Updating %s" % zone.origin.to_text())
LOG.debug("Updating %s", zone.origin.to_text())
def delete_zone(self, zone_name):
LOG.debug('Delete Zone: %s' % zone_name)
LOG.debug('Delete Zone: %s', zone_name)

View File

@ -51,9 +51,6 @@ from oslo_log import log as logging
from designate import utils
from designate import exceptions
from designate.backend.agent_backend import base
from designate.i18n import _LI
from designate.i18n import _LE
LOG = logging.getLogger(__name__)
CFG_GROUP = 'backend:agent:gdnsd'
@ -92,7 +89,7 @@ def filter_exceptions(fn):
except exceptions.Backend as e:
raise e
except Exception as e:
LOG.error(_LE("Unhandled exception %s"), e, exc_info=True)
LOG.error("Unhandled exception %s", e, exc_info=True)
raise exceptions.Backend(e)
return wrapper
@ -111,15 +108,15 @@ class GdnsdBackend(base.AgentBackend):
super(GdnsdBackend, self).__init__(*a, **kw)
self._gdnsd_cmd_name = cfg.CONF[CFG_GROUP].gdnsd_cmd_name
LOG.info(_LI("gdnsd command: %r"), self._gdnsd_cmd_name)
LOG.info("gdnsd command: %r", self._gdnsd_cmd_name)
self._confdir_path = cfg.CONF[CFG_GROUP].confdir_path
self._zonedir_path = os.path.join(self._confdir_path, 'zones')
LOG.info(_LI("gdnsd conf directory: %r"), self._confdir_path)
LOG.info("gdnsd conf directory: %r", self._confdir_path)
self._resolver = dns.resolver.Resolver(configure=False)
self._resolver.timeout = SOA_QUERY_TIMEOUT
self._resolver.lifetime = SOA_QUERY_TIMEOUT
self._resolver.nameservers = [cfg.CONF[CFG_GROUP].query_destination]
LOG.info(_LI("Resolvers: %r"), self._resolver.nameservers)
LOG.info("Resolvers: %r", self._resolver.nameservers)
self._check_dirs(self._zonedir_path)
def start(self):
@ -127,7 +124,7 @@ class GdnsdBackend(base.AgentBackend):
:raises: exception.Backend on invalid configuration
"""
LOG.info(_LI("Started gdnsd backend"))
LOG.info("Started gdnsd backend")
self._check_conf()
def _check_conf(self):
@ -140,9 +137,11 @@ class GdnsdBackend(base.AgentBackend):
run_as_root=False,
)
except ProcessExecutionError as e:
LOG.error(_LE("Command output: %(out)r Stderr: %(err)r"), {
'out': e.stdout, 'err': e.stderr
})
LOG.error("Command output: %(out)r Stderr: %(err)r",
{
'out': e.stdout,
'err': e.stderr
})
raise exceptions.Backend("Configuration check failed")
def _check_dirs(self, *dirnames):
@ -247,6 +246,6 @@ class GdnsdBackend(base.AgentBackend):
LOG.debug('Deleted Zone: %s', zone_name)
except OSError as e:
if os.errno.ENOENT == e.errno:
LOG.info(_LI("Zone datafile %s was already deleted"), zone_fn)
LOG.info("Zone datafile %s was already deleted", zone_fn)
return
raise

View File

@ -45,8 +45,6 @@ from oslo_log import log as logging
from designate import exceptions
from designate.backend.agent_backend import base
from designate.i18n import _LI
from designate.i18n import _LE
from designate.utils import execute
@ -101,7 +99,7 @@ class Knot2Backend(base.AgentBackend):
def start(self):
"""Start the backend"""
LOG.info(_LI("Started knot2 backend"))
LOG.info("Started knot2 backend")
def _execute_knotc(self, *knotc_args, **kw):
"""Run the Knot client and check the output
@ -121,17 +119,19 @@ class Knot2Backend(base.AgentBackend):
try:
out, err = execute(self._knotc_cmd_name, *knotc_args)
out = out.rstrip()
LOG.debug("Command output: %r" % out)
LOG.debug("Command output: %r", out)
if out != expected:
if expected_alt is not None and out == expected_alt:
LOG.info(_LI("Ignoring error: %r"), out)
LOG.info("Ignoring error: %r", out)
else:
raise ProcessExecutionError(stdout=out, stderr=err)
except ProcessExecutionError as e:
LOG.error(_LE("Command output: %(out)r Stderr: %(err)r"), {
'out': e.stdout, 'err': e.stderr
})
LOG.error("Command output: %(out)r Stderr: %(err)r",
{
'out': e.stdout,
'err': e.stderr
})
raise exceptions.Backend(e)
def _start_minidns_to_knot_axfr(self, zone_name):
@ -155,7 +155,7 @@ class Knot2Backend(base.AgentBackend):
# self._execute_knotc('conf-diff')
except Exception as e:
self._execute_knotc('conf-abort')
LOG.info(_LI("Zone change aborted: %r"), e)
LOG.info("Zone change aborted: %r", e)
raise
else:
self._execute_knotc('conf-commit')
@ -178,16 +178,18 @@ class Knot2Backend(base.AgentBackend):
# Zone not found
return None
LOG.error(_LE("Command output: %(out)r Stderr: %(err)r"), {
'out': e.stdout, 'err': e.stderr
})
LOG.error("Command output: %(out)r Stderr: %(err)r",
{
'out': e.stdout,
'err': e.stderr
})
raise exceptions.Backend(e)
try:
serial = out.split('|')[1].split()[1]
return int(serial)
except Exception as e:
LOG.error(_LE("Unable to parse knotc output: %r"), out)
LOG.error("Unable to parse knotc output: %r", out)
raise exceptions.Backend("Unexpected knotc zone-status output")
def create_zone(self, zone):

View File

@ -23,8 +23,6 @@ from os_win import exceptions as os_win_exc
from designate.backend.agent_backend import base
from designate import exceptions
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
CFG_GROUP = 'backend:agent:msdns'
@ -59,7 +57,7 @@ class MSDNSBackend(base.AgentBackend):
# Only ip addresses are needed
self._masters = [ns.split(":")[0] for ns in masters]
LOG.info(_LI("AXFR masters: %r"), self._masters)
LOG.info("AXFR masters: %r", self._masters)
@classmethod
def get_cfg_opts(cls):
@ -67,12 +65,12 @@ class MSDNSBackend(base.AgentBackend):
def start(self):
"""Start the backend"""
LOG.info(_LI("Started msdns backend"))
LOG.info("Started msdns backend")
def find_zone_serial(self, zone_name):
"""Return the zone's serial"""
zone_name = zone_name.rstrip(".")
LOG.debug("Finding zone: %s" % zone_name)
LOG.debug("Finding zone: %s", zone_name)
try:
return self._dnsutils.get_zone_serial(zone_name)
except os_win_exc.DNSZoneNotFound:
@ -82,7 +80,7 @@ class MSDNSBackend(base.AgentBackend):
def create_zone(self, zone):
"""Create a new DNS Zone"""
zone_name = zone.origin.to_text(omit_final_dot=True).decode('utf-8')
LOG.debug("Creating zone: %s" % zone_name)
LOG.debug("Creating zone: %s", zone_name)
try:
self._dnsutils.zone_create(
zone_name=zone_name,
@ -107,7 +105,7 @@ class MSDNSBackend(base.AgentBackend):
"""Instruct MSDNS to request an AXFR from MiniDNS.
"""
zone_name = zone.origin.to_text(omit_final_dot=True).decode('utf-8')
LOG.debug("Updating zone: %s" % zone_name)
LOG.debug("Updating zone: %s", zone_name)
self._dnsutils.zone_update(zone_name)
def delete_zone(self, zone_name):

View File

@ -18,12 +18,10 @@ import abc
from oslo_config import cfg
from oslo_log import log as logging
from designate.i18n import _LI
from designate.context import DesignateContext
from designate.plugin import DriverPlugin
from designate.mdns import rpcapi as mdns_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -58,10 +56,10 @@ class Backend(DriverPlugin):
self.delay = CONF['service:pool_manager'].poll_delay
def start(self):
LOG.info(_LI('Starting %s backend'), self.get_canonical_name())
LOG.info('Starting %s backend', self.get_canonical_name())
def stop(self):
LOG.info(_LI('Stopped %s backend'), self.get_canonical_name())
LOG.info('Stopped %s backend', self.get_canonical_name())
@property
def mdns_api(self):

View File

@ -144,13 +144,13 @@ class EnhancedDNSClient(object):
return zone
def getZone(self, zoneName):
LOG.debug("Performing getZone with zoneName: %s" % zoneName)
LOG.debug("Performing getZone with zoneName: %s", zoneName)
zoneName = self._sanitizeZoneName(zoneName)
try:
return self.client.service.getZone(zoneName=zoneName)
except Exception as e:
raise EnhancedDNSException('Akamai Communication Failure: %s' % e)
raise EnhancedDNSException('Akamai Communication Failure: %s', e)
def setZones(self, zones):
LOG.debug("Performing setZones")
@ -169,7 +169,7 @@ class EnhancedDNSClient(object):
% e)
def setZone(self, zone):
LOG.debug("Performing setZone with zoneName: %s" % zone.zoneName)
LOG.debug("Performing setZone with zoneName: %s", zone.zoneName)
try:
self.client.service.setZone(zone=zone)
except Exception as e:

View File

@ -28,8 +28,6 @@ from designate import exceptions
from designate import utils
from designate.backend import base
from designate.utils import DEFAULT_MDNS_PORT
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
DEFAULT_MASTER_PORT = DEFAULT_MDNS_PORT
@ -140,5 +138,5 @@ class Bind9Backend(base.Backend):
LOG.debug('Executing RNDC call: %r', rndc_call)
utils.execute(*rndc_call)
except utils.processutils.ProcessExecutionError as e:
LOG.info(_LI('RNDC call failure: %s'), e)
LOG.info('RNDC call failure: %s', e)
raise exceptions.Backend(e)

View File

@ -21,8 +21,6 @@ from keystoneauth1 import session as ks_session
from oslo_log import log as logging
from designate.backend import base
from designate.i18n import _LI
from designate.i18n import _LW
LOG = logging.getLogger(__name__)
@ -86,19 +84,19 @@ class DesignateBackend(base.Backend):
return self._client
def create_zone(self, context, zone):
msg = _LI('Creating zone %(d_id)s / %(d_name)s')
LOG.info(msg, {'d_id': zone['id'], 'd_name': zone['name']})
LOG.info('Creating zone %(d_id)s / %(d_name)s',
{'d_id': zone['id'], 'd_name': zone['name']})
masters = ["%s:%s" % (i.host, i.port) for i in self.masters]
self.client.zones.create(
zone.name, 'SECONDARY', masters=masters)
def delete_zone(self, context, zone):
msg = _LI('Deleting zone %(d_id)s / %(d_name)s')
LOG.info(msg, {'d_id': zone['id'], 'd_name': zone['name']})
LOG.info('Deleting zone %(d_id)s / %(d_name)s',
{'d_id': zone['id'], 'd_name': zone['name']})
try:
self.client.zones.delete(zone.name)
except exceptions.NotFound:
msg = _LW("Zone %s not found on remote Designate, Ignoring")
LOG.warning(msg, zone.id)
LOG.warning("Zone %s not found on remote Designate, Ignoring",
zone.id)

View File

@ -25,9 +25,6 @@ from requests.adapters import HTTPAdapter
from designate import exceptions
from designate import utils
from designate.backend import base
from designate.i18n import _LI
from designate.i18n import _LW
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -147,7 +144,7 @@ class DynClient(object):
header = "-H '%s: %s'" % (element, kwargs['headers'][element])
string_parts.append(header)
LOG.debug("REQ: %s" % " ".join(string_parts))
LOG.debug("REQ: %s", " ".join(string_parts))
if 'data' in kwargs:
LOG.debug("REQ BODY: %s\n" % (kwargs['data']))
@ -214,7 +211,7 @@ class DynClient(object):
if resp.status_code >= 400:
LOG.debug(
"Request returned failure status: %s" %
"Request returned failure status: %s",
resp.status_code)
raise DynClientError.from_response(resp)
return resp
@ -231,7 +228,7 @@ class DynClient(object):
while status == 307:
time.sleep(1)
url = response.headers.get('Location')
LOG.debug("Polling %s" % url)
LOG.debug("Polling %s", url)
polled_response = self.get(url)
status = response.status
@ -345,7 +342,7 @@ class DynECTBackend(base.Backend):
timings=CONF[CFG_GROUP].timings)
def create_zone(self, context, zone):
LOG.info(_LI('Creating zone %(d_id)s / %(d_name)s'),
LOG.info('Creating zone %(d_id)s / %(d_name)s',
{'d_id': zone['id'], 'd_name': zone['name']})
url = '/Secondary/%s' % zone['name'].rstrip('.')
@ -366,9 +363,8 @@ class DynECTBackend(base.Backend):
except DynClientError as e:
for emsg in e.msgs:
if emsg['ERR_CD'] == 'TARGET_EXISTS':
msg = _LI("Zone already exists, updating existing "
"zone instead %s")
LOG.info(msg, zone['name'])
LOG.info("Zone already exists, updating existing "
"zone instead %s", zone['name'])
client.put(url, data=data)
break
else:
@ -378,7 +374,7 @@ class DynECTBackend(base.Backend):
client.logout()
def delete_zone(self, context, zone):
LOG.info(_LI('Deleting zone %(d_id)s / %(d_name)s'),
LOG.info('Deleting zone %(d_id)s / %(d_name)s',
{'d_id': zone['id'], 'd_name': zone['name']})
url = '/Zone/%s' % zone['name'].rstrip('.')
client = self.get_client()
@ -386,9 +382,9 @@ class DynECTBackend(base.Backend):
client.delete(url)
except DynClientError as e:
if e.http_status == 404:
LOG.warning(_LW("Attempt to delete %(d_id)s / %(d_name)s "
"caused 404, ignoring.") %
{'d_id': zone['id'], 'd_name': zone['name']})
LOG.warning("Attempt to delete %(d_id)s / %(d_name)s "
"caused 404, ignoring.",
{'d_id': zone['id'], 'd_name': zone['name']})
pass
else:
raise

View File

@ -15,7 +15,6 @@
# under the License.
from oslo_log import log as logging
from designate.i18n import _LI
from designate.backend import base
@ -26,7 +25,7 @@ class FakeBackend(base.Backend):
__plugin_name__ = 'fake'
def create_zone(self, context, zone):
LOG.info(_LI('Create Zone %r'), zone)
LOG.info('Create Zone %r', zone)
def delete_zone(self, context, zone):
LOG.info(_LI('Delete Zone %r'), zone)
LOG.info('Delete Zone %r', zone)

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from designate.backend import base
from designate import exceptions
from designate.i18n import _LI
from designate.backend.impl_infoblox import connector
from designate.backend.impl_infoblox import object_manipulator
@ -43,7 +42,7 @@ class InfobloxBackend(base.Backend):
"Infoblox only supports mDNS instances on port 53")
def create_zone(self, context, zone):
LOG.info(_LI('Create Zone %r'), zone)
LOG.info('Create Zone %r', zone)
dns_net_view = self.infoblox.get_dns_view(context.tenant)
self.infoblox.create_zone_auth(
@ -52,8 +51,8 @@ class InfobloxBackend(base.Backend):
)
def delete_zone(self, context, zone):
LOG.info(_LI('Delete Zone %r'), zone)
LOG.info('Delete Zone %r', zone)
self.infoblox.delete_zone_auth(zone['name'][0:-1])
def ping(self, context):
LOG.info(_LI('Ping'))
LOG.info('Ping')

View File

@ -62,7 +62,7 @@ class Infoblox(object):
setattr(self, opt, options.get(opt) or getattr(config, opt))
for opt in reqd_opts:
LOG.debug("self.%s = %s" % (opt, getattr(self, opt)))
LOG.debug("self.%s = %s", opt, getattr(self, opt))
if not getattr(self, opt):
raise exc.InfobloxIsMisconfigured(option=opt)
@ -90,7 +90,7 @@ class Infoblox(object):
if extattrs:
attrs_queries = []
for key, value in extattrs.items():
LOG.debug("key: %s, value: %s" % (key, value))
LOG.debug("key: %s, value: %s", key, value)
attrs_queries.append('*' + key + '=' + value['value'])
query += '&'.join(attrs_queries)
if query_params:

View File

@ -90,9 +90,9 @@ class InfobloxObjectManipulator(object):
net_view_name=net_view,
dns_view_name=dns_view)
except exc.InfobloxException as e:
LOG.warning(_("Issue happens during views creating: %s"), e)
LOG.warning("Issue happens during views creating: %s", e)
LOG.debug("net_view: %s, dns_view: %s" % (net_view, dns_view))
LOG.debug("net_view: %s, dns_view: %s", net_view, dns_view)
return dns_view
def get_dns_view(self, tenant):
@ -142,15 +142,15 @@ class InfobloxObjectManipulator(object):
if check_if_exists:
ib_object = self._get_infoblox_object_or_none(obj_type, payload)
if ib_object:
LOG.info(_(
"Infoblox %(obj_type)s already exists: %(ib_object)s"),
LOG.info(
"Infoblox %(obj_type)s already exists: %(ib_object)s",
{'obj_type': obj_type, 'ib_object': ib_object})
if not ib_object:
payload.update(additional_create_kwargs)
ib_object = self.connector.create_object(obj_type, payload,
return_fields)
LOG.info(_("Infoblox %(obj_type)s was created: %(ib_object)s"),
LOG.info("Infoblox %(obj_type)s was created: %(ib_object)s",
{'obj_type': obj_type, 'ib_object': ib_object})
return ib_object
@ -186,7 +186,7 @@ class InfobloxObjectManipulator(object):
def _update_infoblox_object_by_ref(self, ref, update_kwargs):
self.connector.update_object(ref, update_kwargs)
LOG.info(_('Infoblox object was updated: %s'), ref)
LOG.info('Infoblox object was updated: %s', ref)
def _delete_infoblox_object(self, obj_type, payload):
ib_object_ref = None
@ -204,4 +204,4 @@ class InfobloxObjectManipulator(object):
if ib_object_ref:
self.connector.delete_object(ib_object_ref)
LOG.info(_('Infoblox object was deleted: %s'), ib_object_ref)
LOG.info('Infoblox object was deleted: %s', ib_object_ref)

View File

@ -66,8 +66,8 @@ class NSD4Backend(base.Backend):
def _execute_nsd4(self, command):
try:
LOG.debug('Executing NSD4 control call: %s on %s' % (command,
self.host))
LOG.debug('Executing NSD4 control call: %s on %s',
command, self.host)
result = self._command(command)
except (ssl.SSLError, socket.error) as e:
LOG.debug('NSD4 control call failure: %s' % e)

View File

@ -25,7 +25,6 @@ from oslo_utils import excutils
from sqlalchemy.sql import select
from designate import exceptions
from designate.i18n import _LC
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
@ -163,9 +162,8 @@ class PowerDNSBackend(base.Backend):
except exceptions.ZoneNotFound:
# If the Zone is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a zone which is '
'not present in the backend. ID: %s') %
zone['id'])
LOG.critical('Attempted to delete a zone which is not present '
'in the backend. ID: %s', zone['id'])
return
except Exception:
with excutils.save_and_reraise_exception():

View File

@ -16,8 +16,6 @@
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Column, Boolean
from designate.i18n import _LW
LOG = logging.getLogger(__name__)
meta = MetaData()
@ -52,8 +50,8 @@ def upgrade(migrate_engine):
'dns.records.ttl = designate.domains.ttl WHERE powerdns.records'
'.inherit_ttl = 1;')
LOG.warning(_LW('**** A manual post-migration step is required ****'))
LOG.warning(_LW('Please issue this query: %s') % pmq)
LOG.warning('**** A manual post-migration step is required ****')
LOG.warning('Please issue this query: %s', pmq)
def downgrade(migrate_engine):

View File

@ -18,9 +18,6 @@ import sys
from oslo_log import log as logging
from sqlalchemy import MetaData, Table
from designate.i18n import _LW
from designate.i18n import _LE
LOG = logging.getLogger(__name__)
meta = MetaData()
@ -29,7 +26,7 @@ meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
LOG.warning(_LW('It will not be possible to downgrade from schema #11'))
LOG.warning('It will not be possible to downgrade from schema #11')
records_table = Table('records', meta, autoload=True)
records_table.c.designate_id.drop()
@ -37,5 +34,5 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
LOG.error(_LE('It is not possible to downgrade from schema #11'))
LOG.error('It is not possible to downgrade from schema #11')
sys.exit(1)

View File

@ -35,10 +35,6 @@ import oslo_messaging as messaging
from oslo_log import log as logging
from oslo_concurrency import lockutils
from designate.i18n import _LI
from designate.i18n import _LC
from designate.i18n import _LE
from designate.i18n import _LW
from designate import context as dcontext
from designate import exceptions
from designate import dnsutils
@ -236,8 +232,8 @@ class Service(service.RPCService, service.Service):
if (cfg.CONF['service:central'].managed_resource_tenant_id ==
"00000000-0000-0000-0000-000000000000"):
msg = _LW("Managed Resource Tenant ID is not properly configured")
LOG.warning(msg)
LOG.warning("Managed Resource Tenant ID is not properly "
"configured")
super(Service, self).start()
@ -363,8 +359,8 @@ class Service(service.RPCService, service.Service):
Check that the placement of the requested rrset belongs to any of the
zones subzones..
"""
LOG.debug("Checking if %s belongs in any of %s subzones" %
(recordset_name, zone.name))
LOG.debug("Checking if %s belongs in any of %s subzones",
recordset_name, zone.name)
criterion = criterion or {}
@ -426,14 +422,13 @@ class Service(service.RPCService, service.Service):
signal.setitimer(signal.ITIMER_REAL, 0)
except Timeout:
LOG.critical(_LC(
LOG.critical(
'Blacklist regex (%(pattern)s) took too long to evaluate '
'against zone name (%(zone_name)s') %
{
'pattern': blacklist.pattern,
'zone_name': zone_name
}
)
'against zone name (%(zone_name)s',
{
'pattern': blacklist.pattern,
'zone_name': zone_name
})
return True
@ -879,10 +874,10 @@ class Service(service.RPCService, service.Service):
# Handle super-zones appropriately
subzones = self._is_superzone(context, zone.name, zone.pool_id)
msg = 'Unable to create zone because another tenant owns a ' \
'subzone of the zone'
msg = ('Unable to create zone because another tenant owns a subzone '
'of the zone')
if subzones:
LOG.debug("Zone '{0}' is a superzone.".format(zone.name))
LOG.debug("Zone '%s' is a superzone.", zone.name)
for subzone in subzones:
if subzone.tenant_id != zone.tenant_id:
raise exceptions.IllegalParentZone(msg)
@ -896,8 +891,8 @@ class Service(service.RPCService, service.Service):
pool_ns_records = self._get_pool_ns_records(context, zone.pool_id)
if len(pool_ns_records) == 0:
LOG.critical(_LC('No nameservers configured. '
'Please create at least one nameserver'))
LOG.critical('No nameservers configured. Please create at least '
'one nameserver')
raise exceptions.NoServersConfigured()
# End of pre-flight checks, create zone
@ -923,9 +918,8 @@ class Service(service.RPCService, service.Service):
# If zone is a superzone, update subzones
# with new parent IDs
for subzone in subzones:
LOG.debug("Updating subzone '{0}' parent ID "
"using superzone ID '{1}'"
.format(subzone.name, zone.id))
LOG.debug("Updating subzone '%s' parent ID using "
"superzone ID '%s'", subzone.name, zone.id)
subzone.parent_zone_id = zone.id
self.update_zone(context, subzone)
@ -1105,7 +1099,7 @@ class Service(service.RPCService, service.Service):
'before deleting this zone')
if hasattr(context, 'abandon') and context.abandon:
LOG.info(_LI("Abandoning zone '%(zone)s'"), {'zone': zone.name})
LOG.info("Abandoning zone '%(zone)s'", {'zone': zone.name})
zone = self.storage.delete_zone(context, zone.id)
else:
zone = self._delete_zone_in_storage(context, zone)
@ -1133,8 +1127,8 @@ class Service(service.RPCService, service.Service):
policy.check('purge_zones', context, criterion)
LOG.debug("Performing purge with limit of %r and criterion of %r"
% (limit, criterion))
LOG.debug("Performing purge with limit of %r and criterion of %r",
limit, criterion)
return self.storage.purge_zones(context, criterion, limit)
@ -1161,11 +1155,9 @@ class Service(service.RPCService, service.Service):
# Perform XFR if serial's are not equal
if serial > zone.serial:
msg = _LI(
"Serial %(srv_serial)d is not equal to zone's %(serial)d,"
" performing AXFR")
LOG.info(
msg, {"srv_serial": serial, "serial": zone.serial})
LOG.info("Serial %(srv_serial)d is not equal to zone's "
"%(serial)d, performing AXFR",
{"srv_serial": serial, "serial": zone.serial})
self.mdns_api.perform_zone_xfr(context, zone)
def count_zones(self, context, criterion=None):
@ -1935,8 +1927,7 @@ class Service(service.RPCService, service.Service):
fip_ptr['ptrdname'] = record['data']
fip_ptr['description'] = record['description']
else:
LOG.debug("No record information found for %s" %
value[0]['id'])
LOG.debug("No record information found for %s", value[0]['id'])
# Store the "fip_record" with the region and it's id as key
fips.append(fip_ptr)
@ -2019,12 +2010,15 @@ class Service(service.RPCService, service.Service):
zone = self.storage.find_zone(
elevated_context, {'name': zone_name})
except exceptions.ZoneNotFound:
msg = _LI(
'Creating zone for %(fip_id)s:%(region)s - '
'%(fip_addr)s zone %(zonename)s'), \
{'fip_id': floatingip_id, 'region': region,
'fip_addr': fip['address'], 'zonename': zone_name}
LOG.info(msg)
LOG.info(
'Creating zone for %(fip_id)s:%(region)s - %(fip_addr)s '
'zone %(zonename)s',
{
'fip_id': floatingip_id,
'region': region,
'fip_addr': fip['address'],
'zonename': zone_name
})
email = cfg.CONF['service:central'].managed_resource_email
tenant_id = cfg.CONF['service:central'].managed_resource_tenant_id
@ -2296,7 +2290,7 @@ class Service(service.RPCService, service.Service):
criterion={'pool_id': pool_id, 'action': '!DELETE'})
# If there are existing zones, do not delete the pool
LOG.debug("Zones is None? %r " % zones)
LOG.debug("Zones is None? %r", zones)
if len(zones) == 0:
pool = self.storage.delete_pool(context, pool_id)
else:
@ -2333,12 +2327,12 @@ class Service(service.RPCService, service.Service):
zone, status, serial)
if zone.status != 'DELETED':
LOG.debug('Setting zone %s, serial %s: action %s, status %s'
% (zone.id, zone.serial, zone.action, zone.status))
LOG.debug('Setting zone %s, serial %s: action %s, status %s',
zone.id, zone.serial, zone.action, zone.status)
self.storage.update_zone(context, zone)
if deleted:
LOG.debug('update_status: deleting %s' % zone.name)
LOG.debug('update_status: deleting %s', zone.name)
self.storage.delete_zone(context, zone.id)
return zone
@ -2375,18 +2369,18 @@ class Service(service.RPCService, service.Service):
record, status, serial)
if record.obj_what_changed():
LOG.debug('Setting record %s, serial %s: action %s, status %s'
% (record.id, record.serial,
record.action, record.status))
LOG.debug('Setting record %s, serial %s: action %s, '
'status %s', record.id, record.serial,
record.action, record.status)
self.storage.update_record(context, record)
# TODO(Ron): Including this to retain the current logic.
# We should NOT be deleting records. The record status should
# be used to indicate the record has been deleted.
if deleted:
LOG.debug('Deleting record %s, serial %s: action %s, status %s'
% (record.id, record.serial,
record.action, record.status))
LOG.debug('Deleting record %s, serial %s: action %s, '
'status %s', record.id, record.serial,
record.action, record.status)
self.storage.delete_record(context, record.id)
@ -2470,7 +2464,7 @@ class Service(service.RPCService, service.Service):
zone_transfer_request = self.storage.get_zone_transfer_request(
elevated_context, zone_transfer_request_id)
LOG.info(_LI('Target Tenant ID found - using scoped policy'))
LOG.info('Target Tenant ID found - using scoped policy')
target = {
'target_tenant_id': zone_transfer_request.target_tenant_id,
'tenant_id': zone_transfer_request.tenant_id,
@ -2698,8 +2692,7 @@ class Service(service.RPCService, service.Service):
zone_import.message = 'An SOA record is required.'
zone_import.status = 'ERROR'
except Exception as e:
msg = _LE('An undefined error occurred during zone import')
LOG.exception(msg)
LOG.exception('An undefined error occurred during zone import')
msg = 'An undefined error occurred. %s'\
% six.text_type(e)[:130]
zone_import.message = msg
@ -2726,9 +2719,8 @@ class Service(service.RPCService, service.Service):
zone_import.status = 'ERROR'
zone_import.message = six.text_type(e)
except Exception as e:
msg = _LE('An undefined error occurred during zone '
'import creation')
LOG.exception(msg)
LOG.exception('An undefined error occurred during zone '
'import creation')
msg = 'An undefined error occurred. %s'\
% six.text_type(e)[:130]
zone_import.message = msg

View File

@ -20,14 +20,12 @@ from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
import debtcollector
from designate.i18n import _LE
from designate import service
from designate import utils
from designate import version
from designate import hookpoints
from designate.pool_manager import service as pool_manager_service
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('workers', 'designate.pool_manager',
@ -45,10 +43,10 @@ def main():
# NOTE(timsim): This is to ensure people don't start the wrong
# services when the worker model is enabled.
if cfg.CONF['service:worker'].enabled:
LOG.error(_LE('You have designate-worker enabled, starting '
'designate-pool-manager is incompatible with '
'designate-worker. You need to start '
'designate-worker instead.'))
LOG.error('You have designate-worker enabled, starting '
'designate-pool-manager is incompatible with '
'designate-worker. You need to start '
'designate-worker instead.')
sys.exit(1)
debtcollector.deprecate('designate-pool-manager is deprecated in favor of '

View File

@ -19,14 +19,12 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from designate.i18n import _LE
from designate import hookpoints
from designate import service
from designate import utils
from designate import version
from designate.producer import service as producer_service
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('workers', 'designate.producer', group='service:producer')
@ -41,9 +39,9 @@ def main():
# NOTE(timsim): This is to ensure people don't start the wrong
# services when the worker model is enabled.
if not cfg.CONF['service:worker'].enabled:
LOG.error(_LE('You do not have designate-worker enabled, starting '
'designate-producer is not allowed. '
'You need to start designate-zone-manager instead.'))
LOG.error('You do not have designate-worker enabled, starting '
'designate-producer is not allowed. '
'You need to start designate-zone-manager instead.')
sys.exit(1)
hookpoints.log_hook_setup()

View File

@ -19,14 +19,12 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from designate.i18n import _LE
from designate import hookpoints
from designate import service
from designate import utils
from designate import version
from designate.worker import service as worker_service
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('workers', 'designate.worker', group='service:worker')
@ -41,9 +39,9 @@ def main():
# NOTE(timsim): This is to ensure people don't start the wrong
# services when the worker model is enabled.
if not cfg.CONF['service:worker'].enabled:
LOG.error(_LE('You do not have designate-worker enabled, starting '
'designate-worker is not allowed. '
'You need to start designate-pool-manager instead.'))
LOG.error('You do not have designate-worker enabled, starting '
'designate-worker is not allowed. '
'You need to start designate-pool-manager instead.')
sys.exit(1)
hookpoints.log_hook_setup()

View File

@ -20,8 +20,6 @@ from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
import debtcollector
from designate.i18n import _LE
from designate.i18n import _LW
from designate import service
from designate import utils
from designate import version
@ -45,10 +43,10 @@ def main():
# services when the worker model is enabled.
if cfg.CONF['service:worker'].enabled:
LOG.error(_LE('You have designate-worker enabled, starting '
'designate-zone-manager is incompatible with '
'designate-worker. You need to start '
'designate-producer instead.'))
LOG.error('You have designate-worker enabled, starting '
'designate-zone-manager is incompatible with '
'designate-worker. You need to start '
'designate-producer instead.')
sys.exit(1)
debtcollector.deprecate('designate-zone-manager Is deprecated in '
@ -56,7 +54,7 @@ def main():
version='newton',
removal_version='rocky')
LOG.warning(_LW('Starting designate-producer under the zone-manager name'))
LOG.warning('Starting designate-producer under the zone-manager name')
server = producer_service.Service(
threads=CONF['service:zone_manager'].threads)

View File

@ -20,7 +20,6 @@ from oslo_context import context
from oslo_log import log as logging
from designate import policy
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -120,7 +119,7 @@ class DesignateContext(context.RequestContext):
policy.check('use_sudo', self)
LOG.info(_LI('Accepted sudo from user %(user)s to tenant %(tenant)s'),
LOG.info('Accepted sudo from user %(user)s to tenant %(tenant)s',
{'user': self.user, 'tenant': tenant})
self.original_tenant = self.tenant
self.tenant = tenant

View File

@ -25,10 +25,6 @@ import tenacity
import tooz.coordination
from designate.utils import generate_uuid
from designate.i18n import _LI
from designate.i18n import _LW
from designate.i18n import _LE
LOG = log.getLogger(__name__)
@ -85,10 +81,9 @@ class CoordinationMixin(object):
self._coordinator_run_watchers)
else:
msg = _LW("No coordination backend configured, distributed "
"coordination functionality will be disabled. "
"Please configure a coordination backend.")
LOG.warning(msg)
LOG.warning("No coordination backend configured, distributed "
"coordination functionality will be disabled. "
"Please configure a coordination backend.")
super(CoordinationMixin, self).start()
@ -111,8 +106,7 @@ class CoordinationMixin(object):
self._coordination_started = True
except Exception:
LOG.warning(_LW("Failed to start Coordinator:"),
exc_info=True)
LOG.warning("Failed to start Coordinator:", exc_info=True)
time.sleep(15)
def stop(self):
@ -134,8 +128,7 @@ class CoordinationMixin(object):
try:
self._coordinator.heartbeat()
except tooz.coordination.ToozError:
LOG.exception(_LE('Error sending a heartbeat to coordination '
'backend.'))
LOG.exception('Error sending a heartbeat to coordination backend.')
def _coordinator_run_watchers(self):
if not self._coordination_started:
@ -156,9 +149,9 @@ class Partitioner(object):
self._callbacks = []
def _warn_no_backend(self):
LOG.warning(_LW('No coordination backend configured, assuming we are '
'the only worker. Please configure a coordination '
'backend'))
LOG.warning('No coordination backend configured, assuming we are '
'the only worker. Please configure a coordination '
'backend')
@tenacity.retry(stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_random(max=2),
@ -170,17 +163,17 @@ class Partitioner(object):
return get_members_req.get()
except tooz.coordination.GroupNotCreated:
LOG.error(_LE('Attempting to partition over a non-existent group: '
'%s'), self._group_id)
LOG.error('Attempting to partition over a non-existent group: %s',
self._group_id)
raise
except tooz.coordination.ToozError:
LOG.error(_LE('Error getting group membership info from '
'coordination backend.'))
LOG.error('Error getting group membership info from coordination '
'backend.')
raise
def _on_group_change(self, event):
LOG.debug("Received member change %s" % event)
LOG.debug("Received member change %s", event)
members, self._my_partitions = self._update_partitions()
self._run_callbacks(members, event)
@ -235,7 +228,7 @@ class Partitioner(object):
self._started = True
def watch_partition_change(self, callback):
LOG.debug("Watching for change %s" % self._group_id)
LOG.debug("Watching for change %s", self._group_id)
self._callbacks.append(callback)
if self._started:
if not self._coordinator:
@ -256,14 +249,14 @@ class LeaderElection(object):
self._leader = False
def _warn_no_backend(self):
LOG.warning(_LW('No coordination backend configured, assuming we are '
'the leader. Please configure a coordination backend'))
LOG.warning('No coordination backend configured, assuming we are the '
'leader. Please configure a coordination backend')
def start(self):
self._started = True
if self._coordinator:
LOG.info(_LI('Starting leader election for group %(group)s'),
LOG.info('Starting leader election for group %(group)s',
{'group': self._group_id})
# Nominate myself for election
@ -280,7 +273,7 @@ class LeaderElection(object):
self._started = False
if self._coordinator:
LOG.info(_LI('Stopping leader election for group %(group)s'),
LOG.info('Stopping leader election for group %(group)s',
{'group': self._group_id})
# Remove the elected_as_leader callback
@ -289,14 +282,14 @@ class LeaderElection(object):
if self._leader:
# Tell Tooz we no longer wish to be the leader
LOG.info(_LI('Standing down as leader candidate for group '
'%(group)s'), {'group': self._group_id})
LOG.info('Standing down as leader candidate for group '
'%(group)s', {'group': self._group_id})
self._leader = False
self._coordinator.stand_down_group_leader(self._group_id)
elif self._leader:
LOG.info(_LI('Standing down as leader candidate for group '
'%(group)s'), {'group': self._group_id})
LOG.info('Standing down as leader candidate for group %(group)s',
{'group': self._group_id})
self._leader = False
@property
@ -304,7 +297,7 @@ class LeaderElection(object):
return self._leader
def _on_elected_leader(self, event):
LOG.info(_LI('Successfully elected as leader of group %(group)s'),
LOG.info('Successfully elected as leader of group %(group)s',
{'group': self._group_id})
self._leader = True

View File

@ -31,8 +31,6 @@ from oslo_config import cfg
from designate import context
from designate import exceptions
from designate import objects
from designate.i18n import _LE
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -106,29 +104,26 @@ class SerializationMiddleware(DNSMiddleware):
}
except dns.message.UnknownTSIGKey:
LOG.error(_LE("Unknown TSIG key from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
LOG.error("Unknown TSIG key from %(host)s:%(port)d",
{'host': request['addr'][0], 'port': request['addr'][1]})
response = self._build_error_response()
except dns.tsig.BadSignature:
LOG.error(_LE("Invalid TSIG signature from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
LOG.error("Invalid TSIG signature from %(host)s:%(port)d",
{'host': request['addr'][0], 'port': request['addr'][1]})
response = self._build_error_response()
except dns.exception.DNSException:
LOG.error(_LE("Failed to deserialize packet from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
LOG.error("Failed to deserialize packet from %(host)s:%(port)d",
{'host': request['addr'][0], 'port': request['addr'][1]})
response = self._build_error_response()
except Exception:
LOG.exception(_LE("Unknown exception deserializing packet "
"from %(host)s %(port)d") %
LOG.exception("Unknown exception deserializing packet "
"from %(host)s %(port)d",
{'host': request['addr'][0],
'port': request['addr'][1]})
@ -145,8 +140,7 @@ class SerializationMiddleware(DNSMiddleware):
yield response.get_wire()
else:
LOG.error(_LE("Unexpected response %(resp)s") %
repr(response))
LOG.error("Unexpected response %r", response)
class TsigInfoMiddleware(DNSMiddleware):
@ -346,7 +340,7 @@ def do_axfr(zone_name, servers, timeout=None, source=None):
to = eventlet.Timeout(timeout)
log_info = {'name': zone_name, 'host': srv}
try:
LOG.info(_LI("Doing AXFR for %(name)s from %(host)s"), log_info)
LOG.info("Doing AXFR for %(name)s from %(host)s", log_info)
xfr = dns.query.xfr(srv['host'], zone_name, relativize=False,
timeout=1, port=srv['port'], source=source)
@ -354,30 +348,26 @@ def do_axfr(zone_name, servers, timeout=None, source=None):
break
except eventlet.Timeout as t:
if t == to:
msg = _LE("AXFR timed out for %(name)s from %(host)s")
LOG.error(msg % log_info)
LOG.error("AXFR timed out for %(name)s from %(host)s",
log_info)
continue
except dns.exception.FormError:
msg = _LE("Zone %(name)s is not present on %(host)s."
"Trying next server.")
LOG.error(msg % log_info)
LOG.error("Zone %(name)s is not present on %(host)s."
"Trying next server.", log_info)
except socket.error:
msg = _LE("Connection error when doing AXFR for %(name)s from "
"%(host)s")
LOG.error(msg % log_info)
LOG.error("Connection error when doing AXFR for %(name)s from "
"%(host)s", log_info)
except Exception:
msg = _LE("Problem doing AXFR %(name)s from %(host)s. "
"Trying next server.")
LOG.exception(msg % log_info)
LOG.exception("Problem doing AXFR %(name)s from %(host)s. "
"Trying next server.", log_info)
finally:
to.cancel()
continue
else:
msg = _LE("XFR failed for %(name)s. No servers in %(servers)s was "
"reached.")
raise exceptions.XFRFailure(
msg % {"name": zone_name, "servers": servers})
"XFR failed for %(name)s. No servers in %(servers)s was reached." %
{"name": zone_name, "servers": servers})
LOG.debug("AXFR Successful for %s" % raw_zone.origin.to_text())
LOG.debug("AXFR Successful for %s", raw_zone.origin.to_text())
return raw_zone

View File

@ -22,7 +22,6 @@ from designate import exceptions
from designate import policy
from designate import rpc
from designate.i18n import _ # noqa
from designate.i18n import _LI
from designate.objects import pool as pool_object
from designate.backend import impl_akamai
from designate.central import rpcapi as central_rpcapi
@ -79,7 +78,7 @@ class AkamaiCommands(base.Commands):
client = impl_akamai.EnhancedDNSClient(
target.options.get("username"), target.options.get("password"))
LOG.info(_LI("Doing batches of %i"), batch_size)
LOG.info("Doing batches of %i", batch_size)
criterion = {"pool_id": pool_id}
marker = None
@ -94,7 +93,7 @@ class AkamaiCommands(base.Commands):
update = []
if len(zones) == 0:
LOG.info(_LI("Stopping as there are no more zones."))
LOG.info("Stopping as there are no more zones.")
break
else:
marker = zones[-1]['id']
@ -103,6 +102,6 @@ class AkamaiCommands(base.Commands):
z = impl_akamai.build_zone(client, target, zone)
update.append(z)
LOG.info(_LI('Uploading %d Zones'), len(update))
LOG.info('Uploading %d Zones', len(update))
client.setZones(update)

View File

@ -22,8 +22,6 @@ import oslo_messaging as messaging
from designate import exceptions
from designate import rpc
from designate.i18n import _LI
from designate.i18n import _LC
from designate import objects
from designate.central import rpcapi as central_rpcapi
from designate.manage import base
@ -53,8 +51,8 @@ class PoolCommands(base.Commands):
try:
pools = self.central_api.find_pools(self.context)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response received from designate-central. "
"Check it is running, and retry"))
LOG.critical("No response received from designate-central. "
"Check it is running, and retry")
sys.exit(1)
with open(file, 'w') as stream:
yaml.dump(
@ -71,8 +69,8 @@ class PoolCommands(base.Commands):
try:
pools = self.central_api.find_pools(self.context)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response received from designate-central. "
"Check it is running, and retry"))
LOG.critical("No response received from designate-central. "
"Check it is running, and retry")
sys.exit(1)
r_pools = objects.PoolList()
for pool in pools:
@ -102,8 +100,8 @@ class PoolCommands(base.Commands):
default_flow_style=False))
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response received from designate-central. "
"Check it is running, and retry"))
LOG.critical("No response received from designate-central. "
"Check it is running, and retry")
sys.exit(1)
@base.args('--file', help='The path to the yaml file describing the pools',
@ -137,14 +135,14 @@ class PoolCommands(base.Commands):
pool = self.central_api.get_pool(
self.context, xpool['id'])
except Exception:
LOG.critical(
_LC("Bad ID Supplied for pool %s"), xpool['name'])
LOG.critical("Bad ID Supplied for pool %s",
xpool['name'])
continue
else:
pool = self.central_api.find_pool(
self.context, {"name": xpool['name']})
LOG.info(_LI('Updating existing pool: %s'), pool)
LOG.info('Updating existing pool: %s', pool)
# TODO(kiall): Move the below into the pool object
@ -176,11 +174,11 @@ class PoolCommands(base.Commands):
if dry_run:
output_msg.append("Create Pool: %s" % pool)
else:
LOG.info(_LI('Creating new pool: %s'), pool)
LOG.info('Creating new pool: %s', pool)
self.central_api.create_pool(self.context, pool)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response received from designate-central."
" Check it is running, and retry"))
LOG.critical("No response received from designate-central. "
"Check it is running, and retry")
sys.exit(1)
if delete:
@ -200,13 +198,13 @@ class PoolCommands(base.Commands):
output_msg.append("Delete Pool: %s" % p)
else:
LOG.info(_LI('Deleting %s'), p)
LOG.info('Deleting %s', p)
self.central_api.delete_pool(self.context, p.id)
except messaging.exceptions.MessagingTimeout:
LOG.critical(_LC("No response received from "
"designate-central. "
"Check it is running, and retry"))
LOG.critical(
"No response received from designate-central. "
"Check it is running, and retry")
sys.exit(1)
for line in output_msg:

View File

@ -22,8 +22,6 @@ from designate import exceptions
from designate import objects
from designate import rpc
from designate.central import rpcapi as central_rpcapi
from designate.i18n import _LI
from designate.i18n import _LE
from designate.manage import base
from designate.schema import format
@ -116,7 +114,7 @@ class TLDCommands(base.Commands):
if not os.path.exists(input_file):
raise Exception('TLD Input file Not Found')
LOG.info(_LI("Importing TLDs from %s"), input_file)
LOG.info("Importing TLDs from %s", input_file)
error_lines = []
tlds_added = 0
@ -136,11 +134,11 @@ class TLDCommands(base.Commands):
tlds_added += self._validate_and_create_tld(line,
error_lines)
LOG.info(_LI("Number of tlds added: %d"), tlds_added)
LOG.info("Number of tlds added: %d", tlds_added)
errors = len(error_lines)
if errors > 0:
LOG.error(_LE("Number of errors: %d") % errors)
LOG.error("Number of errors: %d", errors)
# Sorting the errors and printing them so that it is easier to
# read the errors
LOG.error(_LE("Error Lines:\n%s") % '\n'.join(sorted(error_lines)))
LOG.error("Error Lines:\n%s", '\n'.join(sorted(error_lines)))

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from designate.pool_manager import rpcapi as pool_mngr_api
from designate.central import rpcapi as central_api
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -29,7 +28,7 @@ class BaseEndpoint(object):
RPC_API_VERSION = None
def __init__(self, tg):
LOG.info(_LI("Initialized mDNS %s endpoint"), self.RPC_API_NAMESPACE)
LOG.info("Initialized mDNS %s endpoint", self.RPC_API_NAMESPACE)
self.tg = tg
self.target = messaging.Target(
namespace=self.RPC_API_NAMESPACE,

View File

@ -26,8 +26,6 @@ from oslo_log import log as logging
from designate import exceptions
from designate.mdns import xfr
from designate.central import rpcapi as central_api
from designate.i18n import _LI
from designate.i18n import _LW
LOG = logging.getLogger(__name__)
@ -132,9 +130,8 @@ class RequestHandler(xfr.XFRMixin):
# We'll reply but don't do anything with the NOTIFY.
master_addr = zone.get_master_by_ip(notify_addr)
if not master_addr:
msg = _LW("NOTIFY for %(name)s from non-master server "
"%(addr)s, refusing.")
LOG.warning(msg % {"name": zone.name, "addr": notify_addr})
LOG.warning("NOTIFY for %(name)s from non-master server %(addr)s, "
"refusing.", {"name": zone.name, "addr": notify_addr})
response.set_rcode(dns.rcode.REFUSED)
yield response
raise StopIteration
@ -146,13 +143,11 @@ class RequestHandler(xfr.XFRMixin):
soa_answer = resolver.query(zone.name, 'SOA')
soa_serial = soa_answer[0].serial
if soa_serial == zone.serial:
msg = _LI("Serial %(serial)s is the same for master and us for "
"%(zone_id)s")
LOG.info(msg, {"serial": soa_serial, "zone_id": zone.id})
LOG.info("Serial %(serial)s is the same for master and us for "
"%(zone_id)s", {"serial": soa_serial, "zone_id": zone.id})
else:
msg = _LI("Scheduling AXFR for %(zone_id)s from %(master_addr)s")
info = {"zone_id": zone.id, "master_addr": master_addr}
LOG.info(msg, info)
LOG.info("Scheduling AXFR for %(zone_id)s from %(master_addr)s",
{"zone_id": zone.id, "master_addr": master_addr})
self.tg.add_thread(self.zone_sync, context, zone,
[master_addr])
@ -233,15 +228,15 @@ class RequestHandler(xfr.XFRMixin):
zone = self.storage.find_zone(context, criterion)
except exceptions.ZoneNotFound:
LOG.warning(_LW("ZoneNotFound while handling axfr request. "
"Question was %(qr)s") % {'qr': q_rrset})
LOG.warning("ZoneNotFound while handling axfr request. "
"Question was %(qr)s", {'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
raise StopIteration
except exceptions.Forbidden:
LOG.warning(_LW("Forbidden while handling axfr request. "
"Question was %(qr)s") % {'qr': q_rrset})
LOG.warning("Forbidden while handling axfr request. "
"Question was %(qr)s", {'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
raise StopIteration
@ -267,8 +262,7 @@ class RequestHandler(xfr.XFRMixin):
max_message_size = CONF['service:mdns'].max_message_size
if max_message_size > 65535:
LOG.warning(_LW('MDNS max message size must not be greater than '
'65535'))
LOG.warning('MDNS max message size must not be greater than 65535')
max_message_size = 65535
if request.had_tsig:
@ -304,9 +298,9 @@ class RequestHandler(xfr.XFRMixin):
if renderer.counts[dns.renderer.ANSWER] == 0:
# We've received a TooBig from the first attempted RRSet in
# this packet. Log a warning and abort the AXFR.
LOG.warning(_LW('Aborted AXFR of %(zone)s, a single RR '
'(%(rrset_type)s %(rrset_name)s) '
'exceeded the max message size.'),
LOG.warning('Aborted AXFR of %(zone)s, a single RR '
'(%(rrset_type)s %(rrset_name)s) '
'exceeded the max message size.',
{'zone': zone.name,
'rrset_type': record[1],
'rrset_name': record[3]})
@ -372,13 +366,13 @@ class RequestHandler(xfr.XFRMixin):
#
# To simply things currently this returns a REFUSED in all cases.
# If zone transfers needs different errors, we could revisit this.
LOG.info(_LI("NotFound, refusing. Question was %(qr)s"),
LOG.info("NotFound, refusing. Question was %(qr)s",
{'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
raise StopIteration
except exceptions.Forbidden:
LOG.info(_LI("Forbidden, refusing. Question was %(qr)s"),
LOG.info("Forbidden, refusing. Question was %(qr)s",
{'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
raise StopIteration
@ -389,14 +383,14 @@ class RequestHandler(xfr.XFRMixin):
zone = self.storage.find_zone(context, criterion)
except exceptions.ZoneNotFound:
LOG.warning(_LW("ZoneNotFound while handling query request"
". Question was %(qr)s") % {'qr': q_rrset})
LOG.warning("ZoneNotFound while handling query request. "
"Question was %(qr)s", {'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
raise StopIteration
except exceptions.Forbidden:
LOG.warning(_LW("Forbidden while handling query request. "
"Question was %(qr)s") % {'qr': q_rrset})
LOG.warning("Forbidden while handling query request. "
"Question was %(qr)s", {'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
raise StopIteration

View File

@ -28,8 +28,6 @@ import dns.opcode
from oslo_config import cfg
from oslo_log import log as logging
from designate.i18n import _LI
from designate.i18n import _LW
from designate.mdns import base
from designate.metrics import metrics
@ -150,12 +148,12 @@ class NotifyEndpoint(base.BaseEndpoint):
break
retries_left -= retry_cnt
msg = _LW("Got lower serial for '%(zone)s' to '%(host)s:"
"%(port)s'. Expected:'%(es)d'. Got:'%(as)s'."
"Retries left='%(retries)d'") % {
'zone': zone.name, 'host': host, 'port': port,
'es': zone.serial, 'as': actual_serial,
'retries': retries_left}
msg = ("Got lower serial for '%(zone)s' to '%(host)s:"
"%(port)s'. Expected:'%(es)d'. Got:'%(as)s'."
"Retries left='%(retries)d'") % {
'zone': zone.name, 'host': host, 'port': port,
'es': zone.serial, 'as': actual_serial,
'retries': retries_left}
if not retries_left:
# return with error
@ -197,8 +195,8 @@ class NotifyEndpoint(base.BaseEndpoint):
while retry < max_retries:
retry += 1
LOG.info(_LI("Sending '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'."),
LOG.info("Sending '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.",
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port})
@ -211,23 +209,22 @@ class NotifyEndpoint(base.BaseEndpoint):
raise # unknown error, let it traceback
# Initial workaround for bug #1558096
LOG.info(
_LW("Got EAGAIN while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
LOG.info("Got EAGAIN while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. "
"Timeout='%(timeout)d' seconds. Retry='%(retry)d'",
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
# retry sending the message
time.sleep(retry_interval)
continue
except dns.exception.Timeout:
LOG.warning(
_LW("Got Timeout while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'") %
"Got Timeout while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. "
"Timeout='%(timeout)d' seconds. Retry='%(retry)d'",
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
@ -237,14 +234,13 @@ class NotifyEndpoint(base.BaseEndpoint):
continue
except dns_query.BadResponse:
LOG.warning(
_LW("Got BadResponse while trying to send '%(msg)s' "
"for '%(zone)s' to '%(server)s:%(port)d'. Timeout"
"='%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
LOG.warning("Got BadResponse while trying to send '%(msg)s' "
"for '%(zone)s' to '%(server)s:%(port)d'. "
"Timeout='%(timeout)d' seconds. Retry='%(retry)d'",
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
break # no retries after BadResponse
# either we have a good response or an error that we don't want to
@ -261,18 +257,17 @@ class NotifyEndpoint(base.BaseEndpoint):
dns.rcode.SERVFAIL)) or \
(response.rcode() == dns.rcode.NOERROR and
not bool(response.answer)):
LOG.info(_LI("%(zone)s not found on %(server)s:%(port)d") %
LOG.info("%(zone)s not found on %(server)s:%(port)d",
{'zone': zone.name, 'server': host, 'port': port})
elif not (response.flags & dns.flags.AA) or dns.rcode.from_flags(
response.flags, response.ednsflags) != dns.rcode.NOERROR:
LOG.warning(
_LW("Failed to get expected response while trying to "
"send '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.\nResponse message:\n%(resp)s\n") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'resp': str(response)})
LOG.warning("Failed to get expected response while trying to "
"send '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.\nResponse message:\n%(resp)s\n",
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'resp': str(response)})
response = None
return response, retry

View File

@ -16,7 +16,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.i18n import _LI
from designate import rpc
from designate.loggingutils import rpc_logging
@ -82,10 +81,15 @@ class MdnsAPI(object):
LOG.debug('Letting worker send NOTIFYs instead')
return True
LOG.info(_LI("notify_zone_changed: Calling mdns for zone '%(zone)s', "
"serial '%(serial)s' to nameserver '%(host)s:%(port)s'"),
{'zone': zone.name, 'serial': zone.serial,
'host': host, 'port': port})
LOG.info(
"notify_zone_changed: Calling mdns for zone '%(zone)s', "
"serial '%(serial)s' to nameserver '%(host)s:%(port)s'",
{
'zone': zone.name,
'serial': zone.serial,
'host': host,
'port': port
})
# The notify_zone_changed method is a cast rather than a call since the
# caller need not wait for the notify to complete.
return self.notify_client.cast(
@ -97,10 +101,14 @@ class MdnsAPI(object):
def poll_for_serial_number(self, context, zone, nameserver, timeout,
retry_interval, max_retries, delay):
LOG.info(
_LI("poll_for_serial_number: Calling mdns for zone '%(zone)s', "
"serial '%(serial)s' on nameserver '%(host)s:%(port)s'"),
{'zone': zone.name, 'serial': zone.serial,
'host': nameserver.host, 'port': nameserver.port})
"poll_for_serial_number: Calling mdns for zone '%(zone)s', "
"serial '%(serial)s' on nameserver '%(host)s:%(port)s'",
{
'zone': zone.name,
'serial': zone.serial,
'host': nameserver.host,
'port': nameserver.port
})
# The poll_for_serial_number method is a cast rather than a call since
# the caller need not wait for the poll to complete. Mdns informs pool
# manager of the return value using update_status
@ -113,10 +121,14 @@ class MdnsAPI(object):
def get_serial_number(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
LOG.info(
_LI("get_serial_number: Calling mdns for zone '%(zone)s', serial "
"%(serial)s' on nameserver '%(host)s:%(port)s'"),
{'zone': zone.name, 'serial': zone.serial,
'host': host, 'port': port})
"get_serial_number: Calling mdns for zone '%(zone)s', serial "
"%(serial)s' on nameserver '%(host)s:%(port)s'",
{
'zone': zone.name,
'serial': zone.serial,
'host': host,
'port': port
})
cctxt = self.notify_client.prepare()
return cctxt.call(
context, 'get_serial_number', zone=zone,
@ -125,6 +137,6 @@ class MdnsAPI(object):
delay=delay)
def perform_zone_xfr(self, context, zone):
LOG.info(_LI("perform_zone_xfr: Calling mdns for zone %(zone)s"),
LOG.info("perform_zone_xfr: Calling mdns for zone %(zone)s",
{"zone": zone.name})
return self.xfr_client.cast(context, 'perform_zone_xfr', zone=zone)

View File

@ -41,8 +41,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from designate.i18n import _LI
stats_client = importutils.import_any('monascastatsd',
'designate.metrics_client.noop')
@ -88,15 +86,16 @@ class Metrics(object):
"""
conf = cfg.CONF[CFG_GROUP]
if conf.enabled:
LOG.info(_LI("Statsd reports to %(host)s %(port)d") % {
'host': conf.hostname,
'port': conf.port
})
LOG.info("Statsd reports to %(host)s %(port)d",
{
'host': conf.hostname,
'port': conf.port
})
self._client.connection._flush_buffer()
self._client.connection.close_buffer()
self._client.connection.connect(conf.hostname, conf.port)
else:
LOG.info(_LI("Statsd disabled"))
LOG.info("Statsd disabled")
# The client cannot be disabled: mock out report()
self._client.connection.report = lambda *a, **kw: None
# There's no clean way to drain the outgoing buffer

View File

@ -13,9 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
#
from designate.i18n import _LE
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@ -32,7 +29,7 @@ class NoopConnection(object):
pass
def connect(self, *a, **kw):
LOG.error(_LE('Using noop metrics client. Metrics will be ignored.'))
LOG.error('Using noop metrics client. Metrics will be ignored.')
pass
def open_buffer(self):

View File

@ -22,8 +22,6 @@ from oslo_log import log as logging
from oslo_service import threadgroup
from designate import exceptions
from designate.i18n import _LW
from designate.i18n import _LE
from designate.network_api import base
@ -121,13 +119,12 @@ class NeutronNetworkAPI(base.NetworkAPI):
# NOTE: 401 might be that the user doesn't have neutron
# activated in a particular region, we'll just log the failure
# and go on with our lives.
LOG.warning(_LW("Calling Neutron resulted in a 401, "
"please investigate."))
LOG.warning("Calling Neutron resulted in a 401, "
"please investigate.")
LOG.exception(e)
return
except Exception as e:
LOG.error(_LE('Failed calling Neutron '
'%(region)s - %(endpoint)s'),
LOG.error('Failed calling Neutron %(region)s - %(endpoint)s',
{'region': region, 'endpoint': endpoint})
LOG.exception(e)
failed.append((e, endpoint, region))

View File

@ -24,7 +24,6 @@ import re
from designate import exceptions
from designate.central import rpcapi as central_rpcapi
from designate.context import DesignateContext
from designate.i18n import _LW
from designate.objects import Record
from designate.objects import RecordSet
from designate.plugin import ExtensionPlugin
@ -140,10 +139,10 @@ class BaseAddressHandler(NotificationHandler):
:param resource_id: The managed resource ID
"""
if not managed:
LOG.warning(_LW(
LOG.warning(
'Deprecation notice: Unmanaged designate-sink records are '
'being deprecated please update the call '
'to remove managed=False'))
'to remove managed=False')
LOG.debug('Using Zone ID: %s', zone_id)
zone = self.get_zone(zone_id)
LOG.debug('Domain: %r', zone)
@ -204,10 +203,10 @@ class BaseAddressHandler(NotificationHandler):
:param criterion: Criterion to search and destroy records
"""
if not managed:
LOG.warning(_LW(
LOG.warning(
'Deprecation notice: Unmanaged designate-sink records are '
'being deprecated please update the call '
'to remove managed=False'))
'to remove managed=False')
criterion = criterion or {}
context = DesignateContext().elevated()

View File

@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import six
from designate.i18n import _LW
from designate.plugin import DriverPlugin
from designate import objects
from designate import rpc
@ -52,7 +51,7 @@ def send_api_fault(context, url, status, exception):
def init_notification_plugin():
LOG.debug("Loading notification plugin: %s" % cfg.CONF.notification_plugin)
LOG.debug("Loading notification plugin: %s", cfg.CONF.notification_plugin)
cls = NotificationPlugin.get_driver(cfg.CONF.notification_plugin)
global NOTIFICATION_PLUGIN
@ -163,10 +162,10 @@ class Audit(NotificationPlugin):
(int, float, bool,
six.string_types, type(None)))
for val in (old_value, new_value)):
LOG.warning("Nulling notification values after "
"unexpected values (%s, %s)",
old_value, new_value)
old_value, new_value = None, None
msg = _LW("Nulling notification values after "
"unexpected values %s")
LOG.warning(msg, (old_value, new_value))
if old_value == new_value:
continue

View File

@ -20,7 +20,6 @@ from oslo_versionedobjects import fields
from designate import objects
from designate import utils
from designate import exceptions
from designate.i18n import _LE, _LI
LOG = log.getLogger(__name__)
@ -170,8 +169,8 @@ class DesignateAdapter(object):
@classmethod
def parse(cls, format_, values, output_object, *args, **kwargs):
LOG.debug("Creating %s object with values %r" %
(output_object.obj_name(), values))
LOG.debug("Creating %s object with values %r",
output_object.obj_name(), values)
LOG.debug(output_object)
try:
@ -189,33 +188,45 @@ class DesignateAdapter(object):
values, output_object, *args, **kwargs)
except TypeError as e:
LOG.exception(_LE("TypeError creating %(name)s with values"
" %(values)r") %
{"name": output_object.obj_name(), "values": values})
LOG.exception(
"TypeError creating %(name)s with values %(values)r",
{
"name": output_object.obj_name(),
"values": values
})
error_message = (u'Provided object is not valid. '
u'Got a TypeError with message {}'.format(
six.text_type(e)))
raise exceptions.InvalidObject(error_message)
except AttributeError as e:
LOG.exception(_LE("AttributeError creating %(name)s "
"with values %(values)r") %
{"name": output_object.obj_name(), "values": values})
LOG.exception(
"AttributeError creating %(name)s with values %(values)r",
{
"name": output_object.obj_name(),
"values": values
})
error_message = (u'Provided object is not valid. '
u'Got an AttributeError with message {}'.format(
six.text_type(e)))
raise exceptions.InvalidObject(error_message)
except exceptions.InvalidObject:
LOG.info(_LI("InvalidObject creating %(name)s with "
"values %(values)r"),
{"name": output_object.obj_name(), "values": values})
LOG.info(
"InvalidObject creating %(name)s with values %(values)r",
{
"name": output_object.obj_name(),
"values": values
})
raise
except Exception as e:
LOG.exception(_LE("Exception creating %(name)s with "
"values %(values)r") %
{"name": output_object.obj_name(), "values": values})
LOG.exception(
"Exception creating %(name)s with values %(values)r",
{
"name": output_object.obj_name(),
"values": values
})
error_message = (u'Provided object is not valid. '
u'Got a {} error with message {}'.format(
type(e).__name__, six.text_type(e)))

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from oslo_versionedobjects import exception
from oslo_utils import excutils
from designate.i18n import _
from designate.i18n import _LE
from oslo_versionedobjects import base
from oslo_versionedobjects.base import VersionedObjectDictCompat as DictObjectMixin # noqa
@ -517,7 +516,10 @@ class DesignateRegistry(base.VersionedObjectRegistry):
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
_LE('Error setting %{obj_name}s.%{field_name}s'),
{"obj_name": self.obj_name(), "field_name": name})
'Error setting %{obj_name}s.%{field_name}s',
{
"obj_name": self.obj_name(),
"field_name": name
})
setattr(cls, name, property(getter, setter, attr.fdel))

View File

@ -131,8 +131,8 @@ class RecordSet(base.DesignateObject, base.DictObjectMixin,
changes = record_cls.get_recordset_schema_changes()
old_fields = {}
if changes:
LOG.debug("Record %s is overriding the RecordSet schema with: %s" %
(record_cls.obj_name(), changes))
LOG.debug("Record %s is overriding the RecordSet schema with: %s",
record_cls.obj_name(), changes)
old_fields = deepcopy(self.FIELDS)
self.FIELDS = utils.deep_dict_merge(self.FIELDS, changes)

View File

@ -36,7 +36,7 @@ class Plugin(object):
def __init__(self):
self.name = self.get_canonical_name()
LOG.debug("Loaded plugin %s" % self.name)
LOG.debug("Loaded plugin %s", self.name)
@classmethod
def get_canonical_name(cls):
@ -112,7 +112,7 @@ class DriverPlugin(Plugin):
def get_driver(cls, name):
"""Load a single driver"""
LOG.debug('Looking for driver %s in %s' % (name, cls.__plugin_ns__))
LOG.debug('Looking for driver %s in %s', name, cls.__plugin_ns__)
mgr = driver.DriverManager(cls.__plugin_ns__, name)

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from oslo_policy import policy
from oslo_policy import opts
from designate.i18n import _
from designate import exceptions
from designate.common import policies
@ -83,10 +82,10 @@ def check(rule, ctxt, target=None, do_raise=True, exc=exceptions.Forbidden):
extra = {'policy': {'rule': rule, 'target': target}}
if result:
LOG.trace(_("Policy check succeeded for rule '%(rule)s' "
"on target %(target)s"),
LOG.trace("Policy check succeeded for rule '%(rule)s' "
"on target %(target)s",
{'rule': rule, 'target': repr(target)}, extra=extra)
else:
LOG.info(_("Policy check failed for rule '%(rule)s' "
"on target %(target)s"),
LOG.info("Policy check failed for rule '%(rule)s' "
"on target %(target)s",
{'rule': rule, 'target': repr(target)}, extra=extra)

View File

@ -18,11 +18,9 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.i18n import _LI
from designate import rpc
from designate.loggingutils import rpc_logging
LOG = logging.getLogger(__name__)
MNGR_API = None
@ -71,9 +69,12 @@ class PoolManagerAPI(object):
return MNGR_API
def target_sync(self, context, pool_id, target_id, timestamp):
LOG.info(_LI("target_sync: Syncing target %(target) since "
"%(timestamp)d."),
{'target': target_id, 'timestamp': timestamp})
LOG.info(
"target_sync: Syncing target %(target) since %(timestamp)d.",
{
'target': target_id,
'timestamp': timestamp
})
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, pool_id)

View File

@ -33,9 +33,6 @@ from designate.pool_manager import rpcapi as pool_manager_rpcapi
from designate.mdns import rpcapi as mdns_api
from designate import service
from designate.context import DesignateContext
from designate.i18n import _LE
from designate.i18n import _LI
from designate.i18n import _LW
from designate.pool_manager import cache
@ -71,7 +68,7 @@ def _constant_retries(num_attempts, sleep_interval):
"""
for cnt in range(num_attempts):
if cnt != 0:
LOG.debug(_LI("Executing retry n. %d"), cnt)
LOG.debug("Executing retry n. %d", cnt)
if cnt < num_attempts - 1:
yield False
time.sleep(sleep_interval)
@ -124,7 +121,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
# and masters
self.target_backends[target.id] = backend.get_backend(target)
LOG.info(_LI('%d targets setup'), len(self.pool.targets))
LOG.info('%d targets setup', len(self.pool.targets))
if not self.target_backends:
raise exceptions.NoPoolTargetsConfigured()
@ -139,7 +136,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
topic = super(Service, self)._rpc_topic
topic = '%s.%s' % (topic, CONF['service:pool_manager'].pool_id)
LOG.info(_LI('Using topic %(topic)s for this pool manager instance.'),
LOG.info('Using topic %(topic)s for this pool manager instance.',
{'topic': topic})
return topic
@ -161,12 +158,12 @@ class Service(service.RPCService, coordination.CoordinationMixin,
if len(self.pool.targets) > 0:
has_targets = True
else:
LOG.error(_LE("No targets for %s found."), self.pool)
LOG.error("No targets for %s found.", self.pool)
time.sleep(5)
# Pool data may not have migrated to the DB yet
except exceptions.PoolNotFound:
LOG.error(_LE("Pool ID %s not found."), pool_id)
LOG.error("Pool ID %s not found.", pool_id)
time.sleep(5)
# designate-central service may not have started yet
except messaging.exceptions.MessagingTimeout:
@ -174,8 +171,8 @@ class Service(service.RPCService, coordination.CoordinationMixin,
# designate-central failed in an unknown way, don't allow another
# failing / not started service to cause pool-manager to crash.
except Exception:
LOG.exception(_LE("An unknown exception occurred while "
"fetching pool details"))
LOG.exception("An unknown exception occurred while "
"fetching pool details")
time.sleep(5)
# Create the necessary Backend instances for each target
@ -194,14 +191,14 @@ class Service(service.RPCService, coordination.CoordinationMixin,
if CONF['service:pool_manager'].enable_recovery_timer:
interval = CONF['service:pool_manager'].periodic_recovery_interval
LOG.info(_LI('Starting periodic recovery timer every'
' %(interval)s s') % {'interval': interval})
LOG.info('Starting periodic recovery timer every'
' %(interval)s s', {'interval': interval})
self.tg.add_timer(interval, self.periodic_recovery, interval)
if CONF['service:pool_manager'].enable_sync_timer:
interval = CONF['service:pool_manager'].periodic_sync_interval
LOG.info(_LI('Starting periodic synchronization timer every'
' %(interval)s s') % {'interval': interval})
LOG.info('Starting periodic synchronization timer every'
' %(interval)s s', {'interval': interval})
self.tg.add_timer(interval, self.periodic_sync, interval)
def stop(self):
@ -244,33 +241,33 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return
context = self._get_admin_context_all_tenants()
LOG.info(_LI("Starting Periodic Recovery"))
LOG.info("Starting Periodic Recovery")
try:
# Handle Deletion Failures
zones = self._get_failed_zones(context, DELETE_ACTION)
LOG.info(_LI("periodic_recovery:delete_zone needed on %d zones"),
LOG.info("periodic_recovery:delete_zone needed on %d zones",
len(zones))
for zone in zones:
self.pool_manager_api.delete_zone(context, zone)
# Handle Creation Failures
zones = self._get_failed_zones(context, CREATE_ACTION)
LOG.info(_LI("periodic_recovery:create_zone needed on %d zones"),
LOG.info("periodic_recovery:create_zone needed on %d zones",
len(zones))
for zone in zones:
self.pool_manager_api.create_zone(context, zone)
# Handle Update Failures
zones = self._get_failed_zones(context, UPDATE_ACTION)
LOG.info(_LI("periodic_recovery:update_zone needed on %d zones"),
LOG.info("periodic_recovery:update_zone needed on %d zones",
len(zones))
for zone in zones:
self.pool_manager_api.update_zone(context, zone)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
'recovery occurred'))
LOG.exception('An unhandled exception in periodic '
'recovery occurred')
def periodic_sync(self):
"""Periodically sync all the zones that are not in ERROR status
@ -280,7 +277,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
if not self._pool_election.is_leader:
return
LOG.info(_LI("Starting Periodic Synchronization"))
LOG.info("Starting Periodic Synchronization")
context = self._get_admin_context_all_tenants()
zones = self._fetch_healthy_zones(context)
zones = set(zones)
@ -300,8 +297,8 @@ class Service(service.RPCService, coordination.CoordinationMixin,
if not success:
zones_in_error.append(zone)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
'synchronization occurred.'))
LOG.exception('An unhandled exception in periodic '
'synchronization occurred.')
zones_in_error.append(zone)
if not zones_in_error:
@ -327,7 +324,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
if target is None:
raise exceptions.BadRequest('Please supply a valid target id.')
LOG.info(_LI('Starting Target Sync'))
LOG.info('Starting Target Sync')
criterion = {
'pool_id': pool_id,
@ -405,7 +402,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
:param zone: Zone to be created
:return: None
"""
LOG.info(_LI("Creating new zone %s"), zone.name)
LOG.info("Creating new zone %s", zone.name)
results = []
@ -422,8 +419,8 @@ class Service(service.RPCService, coordination.CoordinationMixin,
else:
LOG.warning(_LW('Consensus not reached for creating zone %(zone)s'
' on pool targets') % {'zone': zone.name})
LOG.warning('Consensus not reached for creating zone %(zone)s '
'on pool targets', {'zone': zone.name})
self.central_api.update_status(
context, zone.id, ERROR_STATUS, zone.serial)
@ -464,9 +461,9 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return True
except Exception:
retries += 1
LOG.exception(_LE(
LOG.exception(
"Failed to create zone %(zone)s on "
"target %(target)s on attempt %(attempt)d"),
"target %(target)s on attempt %(attempt)d",
{
'zone': zone.name,
'target': target.id,
@ -483,7 +480,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
:param zone: Zone to be updated
:return: consensus reached (bool)
"""
LOG.info(_LI("Updating zone %s"), zone.name)
LOG.info("Updating zone %s", zone.name)
# Update the zone on each of the Pool Targets
success_count = 0
@ -493,14 +490,14 @@ class Service(service.RPCService, coordination.CoordinationMixin,
success_count += 1
if not self._exceed_or_meet_threshold(success_count):
LOG.warning(_LW('Consensus not reached for updating zone %(zone)s'
' on pool targets') % {'zone': zone.name})
LOG.warning('Consensus not reached for updating zone %(zone)s '
'on pool targets', {'zone': zone.name})
self.central_api.update_status(context, zone.id, ERROR_STATUS,
zone.serial)
return False
LOG.debug('Consensus reached for updating zone %(zone)s '
'on pool targets' % {'zone': zone.name})
'on pool targets', {'zone': zone.name})
# The zone status will be updated asynchronously by MiniDNS
@ -541,13 +538,13 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return True
except Exception:
LOG.exception(_LE("Failed to update zone %(zone)s on target "
"%(target)s"),
LOG.exception("Failed to update zone %(zone)s on target "
"%(target)s",
{'zone': zone.name, 'target': target.id})
return False
def _update_zone_on_also_notify(self, context, also_notify, zone):
LOG.info(_LI('Updating zone %(zone)s on also_notify %(server)s.'),
LOG.info('Updating zone %(zone)s on also_notify %(server)s.',
{'zone': zone.name,
'server': self._get_destination(also_notify)})
@ -561,7 +558,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
:param zone: Zone to be deleted
:return: None
"""
LOG.info(_LI("Deleting zone %s"), zone.name)
LOG.info("Deleting zone %s", zone.name)
results = []
@ -572,8 +569,8 @@ class Service(service.RPCService, coordination.CoordinationMixin,
if not self._exceed_or_meet_threshold(
results.count(True), MAXIMUM_THRESHOLD):
LOG.warning(_LW('Consensus not reached for deleting zone %(zone)s'
' on pool targets') % {'zone': zone.name})
LOG.warning('Consensus not reached for deleting zone %(zone)s '
'on pool targets', {'zone': zone.name})
self.central_api.update_status(
context, zone.id, ERROR_STATUS, zone.serial)
@ -612,14 +609,14 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return True
except Exception:
retries += 1
LOG.exception(_LE(
LOG.exception(
"Failed to delete zone %(zone)s on "
"target %(target)s on attempt %(attempt)d"),
{
'zone': zone.name,
'target': target.id,
'attempt': retries
})
"target %(target)s on attempt %(attempt)d",
{
'zone': zone.name,
'target': target.id,
'attempt': retries
})
time.sleep(self.retry_interval)
return False
@ -639,8 +636,8 @@ class Service(service.RPCService, coordination.CoordinationMixin,
server for the zone.
:return: None
"""
LOG.debug("Calling update_status for %s : %s : %s : %s" %
(zone.name, zone.action, status, actual_serial))
LOG.debug("Calling update_status for %s : %s : %s : %s",
zone.name, zone.action, status, actual_serial)
action = UPDATE_ACTION if zone.action == 'NONE' else zone.action
with lockutils.lock('update-status-%s' % zone.id):
@ -654,20 +651,19 @@ class Service(service.RPCService, coordination.CoordinationMixin,
cache_serial = current_status.serial_number
LOG.debug('For zone %s : %s on nameserver %s the cache serial '
'is %s and the actual serial is %s.' %
(zone.name, action,
self._get_destination(nameserver),
cache_serial, actual_serial))
'is %s and the actual serial is %s.',
zone.name, action, self._get_destination(nameserver),
cache_serial, actual_serial)
if actual_serial and cache_serial <= actual_serial:
current_status.status = status
current_status.serial_number = actual_serial
self.cache.store(context, current_status)
LOG.debug('Attempting to get consensus serial for %s' %
LOG.debug('Attempting to get consensus serial for %s',
zone.name)
consensus_serial = self._get_consensus_serial(context, zone)
LOG.debug('Consensus serial for %s is %s' %
(zone.name, consensus_serial))
LOG.debug('Consensus serial for %s is %s',
zone.name, consensus_serial)
# If there is a valid consensus serial we can still send a success
# for that serial.
@ -675,10 +671,12 @@ class Service(service.RPCService, coordination.CoordinationMixin,
# the error serial.
if consensus_serial != 0 and cache_serial <= consensus_serial \
and zone.status != 'ACTIVE':
LOG.info(_LI('For zone %(zone)s '
'the consensus serial is %(consensus_serial)s.'),
{'zone': zone.name,
'consensus_serial': consensus_serial})
LOG.info('For zone %(zone)s the consensus serial is '
'%(consensus_serial)s.',
{
'zone': zone.name,
'consensus_serial': consensus_serial
})
self.central_api.update_status(
context, zone.id, SUCCESS_STATUS, consensus_serial)
@ -686,10 +684,12 @@ class Service(service.RPCService, coordination.CoordinationMixin,
error_serial = self._get_error_serial(
context, zone, consensus_serial)
if error_serial > consensus_serial or error_serial == 0:
LOG.warning(_LW('For zone %(zone)s '
'the error serial is %(error_serial)s.') %
{'zone': zone.name,
'error_serial': error_serial})
LOG.warning('For zone %(zone)s '
'the error serial is %(error_serial)s.',
{
'zone': zone.name,
'error_serial': error_serial
})
self.central_api.update_status(
context, zone.id, ERROR_STATUS, error_serial)
@ -698,8 +698,8 @@ class Service(service.RPCService, coordination.CoordinationMixin,
self.central_api.update_status(
context, zone.id, NO_ZONE_STATUS, 0)
else:
LOG.warning(_LW('Zone %(zone)s is not present in some '
'targets') % {'zone': zone.name})
LOG.warning('Zone %(zone)s is not present in some targets',
{'zone': zone.name})
self.central_api.update_status(
context, zone.id, NO_ZONE_STATUS, 0)
@ -745,9 +745,11 @@ class Service(service.RPCService, coordination.CoordinationMixin,
stale_zones = self.central_api.find_zones(context, stale_criterion)
if stale_zones:
LOG.warning(
_LW('Found %(len)d zones PENDING for more than %(sec)d '
'seconds'), {'len': len(stale_zones),
'sec': self.max_prop_time})
'Found %(len)d zones PENDING for more than %(sec)d seconds',
{
'len': len(stale_zones),
'sec': self.max_prop_time
})
error_zones.extend(stale_zones)
return error_zones
@ -897,9 +899,9 @@ class Service(service.RPCService, coordination.CoordinationMixin,
self.delay)
except messaging.MessagingException as msg_ex:
LOG.debug('Could not retrieve status and serial for zone %s on '
'nameserver %s with action %s (%s: %s)' %
(zone.name, self._get_destination(nameserver), action,
type(msg_ex), str(msg_ex)))
'nameserver %s with action %s (%s: %s)',
zone.name, self._get_destination(nameserver), action,
type(msg_ex), str(msg_ex))
return None
pool_manager_status = self._build_status_object(
@ -918,10 +920,10 @@ class Service(service.RPCService, coordination.CoordinationMixin,
pool_manager_status.serial_number = actual_serial or 0
LOG.debug('Retrieved status %s and serial %s for zone %s '
'on nameserver %s with action %s from mdns.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
zone.name, self._get_destination(nameserver), action))
'on nameserver %s with action %s from mdns.',
pool_manager_status.status,
pool_manager_status.serial_number,
zone.name, self._get_destination(nameserver), action)
self.cache.store(context, pool_manager_status)
return pool_manager_status
@ -939,18 +941,18 @@ class Service(service.RPCService, coordination.CoordinationMixin,
context, nameserver.id, zone.id, action)
LOG.debug('Cache hit! Retrieved status %s and serial %s '
'for zone %s on nameserver %s with action %s from '
'the cache.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
zone.name,
self._get_destination(nameserver), action))
'the cache.',
pool_manager_status.status,
pool_manager_status.serial_number,
zone.name,
self._get_destination(nameserver), action)
except exceptions.PoolManagerStatusNotFound:
LOG.debug('Cache miss! Did not retrieve status and serial '
'for zone %s on nameserver %s with action %s from '
'the cache. Getting it from the server.' %
(zone.name,
self._get_destination(nameserver),
action))
'the cache. Getting it from the server.',
zone.name,
self._get_destination(nameserver),
action)
pool_manager_status = self._retrieve_from_mdns(
context, nameserver, zone, action)

View File

@ -17,7 +17,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.i18n import _LI
from designate import coordination
from designate import quota
from designate import service
@ -93,5 +92,5 @@ class Service(service.RPCService, coordination.CoordinationMixin,
self.tg.add_timer(interval, task)
def _rebalance(self, my_partitions, members, event):
LOG.info(_LI("Received rebalance event %s"), event)
LOG.info("Received rebalance event %s", event)
self.partition_range = my_partitions

View File

@ -21,7 +21,6 @@ from designate import rpc
from designate.central import rpcapi
from designate.worker import rpcapi as worker_rpcapi
from designate.pool_manager import rpcapi as pool_manager_rpcapi
from designate.i18n import _LI
from oslo_config import cfg
from oslo_log import log as logging
@ -143,8 +142,12 @@ class DeletedZonePurgeTask(PeriodicTask):
expiration time and sharding range.
"""
pstart, pend = self._my_range()
msg = _LI("Performing deleted zone purging for %(start)s to %(end)s")
LOG.info(msg, {"start": pstart, "end": pend})
LOG.info(
"Performing deleted zone purging for %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
delta = datetime.timedelta(seconds=self.options.time_threshold)
time_threshold = timeutils.utcnow() - delta
@ -187,8 +190,12 @@ class PeriodicExistsTask(PeriodicTask):
def __call__(self):
pstart, pend = self._my_range()
msg = _LI("Emitting zone exist events for shards %(start)s to %(end)s")
LOG.info(msg, {"start": pstart, "end": pend})
LOG.info(
"Emitting zone exist events for shards %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
@ -211,9 +218,14 @@ class PeriodicExistsTask(PeriodicTask):
self.notifier.info(ctxt, 'dns.domain.exists', zone_data)
self.notifier.info(ctxt, 'dns.zone.exists', zone_data)
LOG.info(_LI("Finished emitting %(counter)d events for shards "
"%(start)s to %(end)s"),
{"start": pstart, "end": pend, "counter": counter})
LOG.info(
"Finished emitting %(counter)d events for shards "
"%(start)s to %(end)s",
{
"start": pstart,
"end": pend,
"counter": counter
})
class PeriodicSecondaryRefreshTask(PeriodicTask):
@ -228,8 +240,12 @@ class PeriodicSecondaryRefreshTask(PeriodicTask):
def __call__(self):
pstart, pend = self._my_range()
msg = _LI("Refreshing zones for shards %(start)s to %(end)s")
LOG.info(msg, {"start": pstart, "end": pend})
LOG.info(
"Refreshing zones for shards %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
@ -307,8 +323,13 @@ class PeriodicGenerateDelayedNotifyTask(PeriodicTask):
sort_dir='asc',
)
msg = _LI("Performing delayed NOTIFY for %(start)s to %(end)s: %(n)d")
LOG.debug(msg % dict(start=pstart, end=pend, n=len(zones)))
LOG.debug(
"Performing delayed NOTIFY for %(start)s to %(end)s: %(n)d",
{
'start': pstart,
'end': pend,
'n': len(zones)
})
for z in zones:
self.zone_api.update_zone(ctxt, z)
@ -338,8 +359,12 @@ class WorkerPeriodicRecovery(PeriodicTask):
return
pstart, pend = self._my_range()
msg = _LI("Recovering zones for shards %(start)s to %(end)s")
LOG.info(msg, {"start": pstart, "end": pend})
LOG.info(
"Recovering zones for shards %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True

View File

@ -42,7 +42,7 @@ cfg.CONF.register_opts(quota_opts)
def get_quota():
quota_driver = cfg.CONF.quota_driver
LOG.debug("Loading quota driver: %s" % quota_driver)
LOG.debug("Loading quota driver: %s", quota_driver)
cls = base.Quota.get_driver(quota_driver)

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from stevedore import named
from designate import exceptions
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -45,7 +44,7 @@ class Scheduler(object):
self.filters = [x.plugin(storage=self.storage) for x in filters]
for filter in self.filters:
LOG.info(_LI("Loaded Scheduler Filter: %s") % filter.name)
LOG.info("Loaded Scheduler Filter: %s", filter.name)
else:
raise exceptions.NoFiltersConfigured('There are no scheduling '

View File

@ -33,9 +33,6 @@ from oslo_service import sslutils
from oslo_utils import netutils
from designate.i18n import _
from designate.i18n import _LE
from designate.i18n import _LI
from designate.i18n import _LW
from designate.metrics import metrics
from designate import policy
from designate import rpc
@ -91,12 +88,14 @@ class Service(service.Service):
def start(self):
super(Service, self).start()
LOG.info(_('Starting %(name)s service (version: %(version)s)'),
{'name': self.service_name,
'version': version.version_info.version_string()})
LOG.info('Starting %(name)s service (version: %(version)s)',
{
'name': self.service_name,
'version': version.version_info.version_string()
})
def stop(self):
LOG.info(_('Stopping %(name)s service'), {'name': self.service_name})
LOG.info('Stopping %(name)s service', {'name': self.service_name})
super(Service, self).stop()
@ -116,8 +115,8 @@ class Service(service.Service):
port = self._service_config.port
if host or port is not None:
LOG.warning(_LW("host and port config options used, the 'listen' "
"option has been ignored"))
LOG.warning("host and port config options used, the 'listen' "
"option has been ignored")
host = host or "0.0.0.0"
# "port" might be 0 to pick a free port, usually during testing
@ -140,7 +139,7 @@ class RPCService(object):
def __init__(self, *args, **kwargs):
super(RPCService, self).__init__(*args, **kwargs)
LOG.debug("Creating RPC Server on topic '%s'" % self._rpc_topic)
LOG.debug("Creating RPC Server on topic '%s'", self._rpc_topic)
self._rpc_server = rpc.get_server(
messaging.Target(topic=self._rpc_topic, server=self._host),
self._rpc_endpoints)
@ -169,7 +168,7 @@ class RPCService(object):
def start(self):
super(RPCService, self).start()
LOG.debug("Starting RPC server on topic '%s'" % self._rpc_topic)
LOG.debug("Starting RPC server on topic '%s'", self._rpc_topic)
self._rpc_server.start()
# TODO(kiall): This probably belongs somewhere else, maybe the base
@ -183,7 +182,7 @@ class RPCService(object):
self.heartbeat_emitter.start()
def stop(self):
LOG.debug("Stopping RPC server on topic '%s'" % self._rpc_topic)
LOG.debug("Stopping RPC server on topic '%s'", self._rpc_topic)
self.heartbeat_emitter.stop()
for e in self._rpc_endpoints:
@ -309,7 +308,7 @@ class DNSService(object):
sock_udp.close()
def _dns_handle_tcp(self, sock_tcp):
LOG.info(_LI("_handle_tcp thread started"))
LOG.info("_handle_tcp thread started")
while True:
try:
@ -319,10 +318,10 @@ class DNSService(object):
if self._service_config.tcp_recv_timeout:
client.settimeout(self._service_config.tcp_recv_timeout)
LOG.debug("Handling TCP Request from: %(host)s:%(port)d" %
LOG.debug("Handling TCP Request from: %(host)s:%(port)d",
{'host': addr[0], 'port': addr[1]})
if len(addr) == 4:
LOG.debug("Flow info: %(host)s scope: %(port)d" %
LOG.debug("Flow info: %(host)s scope: %(port)d",
{'host': addr[2], 'port': addr[3]})
# Dispatch a thread to handle the connection
@ -333,20 +332,19 @@ class DNSService(object):
# ensure no exceptions are generated from within.
except socket.timeout:
client.close()
LOG.warning(_LW("TCP Timeout from: %(host)s:%(port)d") %
LOG.warning("TCP Timeout from: %(host)s:%(port)d",
{'host': addr[0], 'port': addr[1]})
except socket.error as e:
client.close()
errname = errno.errorcode[e.args[0]]
LOG.warning(
_LW("Socket error %(err)s from: %(host)s:%(port)d") %
{'host': addr[0], 'port': addr[1], 'err': errname})
LOG.warning("Socket error %(err)s from: %(host)s:%(port)d",
{'host': addr[0], 'port': addr[1], 'err': errname})
except Exception:
client.close()
LOG.exception(_LE("Unknown exception handling TCP request "
"from: %(host)s:%(port)d") %
LOG.exception("Unknown exception handling TCP request from: "
"%(host)s:%(port)d",
{'host': addr[0], 'port': addr[1]})
def _dns_handle_tcp_conn(self, addr, client):
@ -403,22 +401,21 @@ class DNSService(object):
client.sendall(tcp_response)
except socket.timeout:
LOG.info(_LI("TCP Timeout from: %(host)s:%(port)d"),
LOG.info("TCP Timeout from: %(host)s:%(port)d",
{'host': host, 'port': port})
except socket.error as e:
errname = errno.errorcode[e.args[0]]
LOG.warning(_LW("Socket error %(err)s from: %(host)s:%(port)d"),
LOG.warning("Socket error %(err)s from: %(host)s:%(port)d",
{'host': host, 'port': port, 'err': errname})
except struct.error:
LOG.warning(_LW("Invalid packet from: %(host)s:%(port)d"),
LOG.warning("Invalid packet from: %(host)s:%(port)d",
{'host': host, 'port': port})
except Exception:
LOG.exception(_LE("Unknown exception handling TCP request "
"from: %(host)s:%(port)d"),
{'host': host, 'port': port})
LOG.exception("Unknown exception handling TCP request from: "
"%(host)s:%(port)d", {'host': host, 'port': port})
finally:
client.close()
@ -429,7 +426,7 @@ class DNSService(object):
:type sock_udp: socket
:raises: None
"""
LOG.info(_LI("_handle_udp thread started"))
LOG.info("_handle_udp thread started")
while True:
try:
@ -437,7 +434,7 @@ class DNSService(object):
# UDP recvfrom.
payload, addr = sock_udp.recvfrom(8192)
LOG.debug("Handling UDP Request from: %(host)s:%(port)d" %
LOG.debug("Handling UDP Request from: %(host)s:%(port)d",
{'host': addr[0], 'port': addr[1]})
# Dispatch a thread to handle the query
@ -446,13 +443,12 @@ class DNSService(object):
except socket.error as e:
errname = errno.errorcode[e.args[0]]
LOG.warning(
_LW("Socket error %(err)s from: %(host)s:%(port)d") %
{'host': addr[0], 'port': addr[1], 'err': errname})
LOG.warning("Socket error %(err)s from: %(host)s:%(port)d",
{'host': addr[0], 'port': addr[1], 'err': errname})
except Exception:
LOG.exception(_LE("Unknown exception handling UDP request "
"from: %(host)s:%(port)d") %
LOG.exception("Unknown exception handling UDP request from: "
"%(host)s:%(port)d",
{'host': addr[0], 'port': addr[1]})
def _dns_handle_udp_query(self, sock, addr, payload):
@ -477,8 +473,8 @@ class DNSService(object):
sock.sendto(response, addr)
except Exception:
LOG.exception(_LE("Unhandled exception while processing request "
"from %(host)s:%(port)d") %
LOG.exception("Unhandled exception while processing request from "
"%(host)s:%(port)d",
{'host': addr[0], 'port': addr[1]})

View File

@ -18,7 +18,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.i18n import _LW
from designate import notification_handler
from designate import rpc
from designate import service
@ -49,7 +48,7 @@ class Service(service.Service):
enabled_notification_handlers)
if len(notification_handlers) == 0:
LOG.warning(_LW('No designate-sink handlers enabled or loaded'))
LOG.warning('No designate-sink handlers enabled or loaded')
return notification_handlers
@ -112,5 +111,5 @@ class Service(service.Service):
if event_type in self._get_handler_event_types():
for handler in self.handlers:
if event_type in handler.get_event_types():
LOG.debug('Found handler for: %s' % event_type)
LOG.debug('Found handler for: %s', event_type)
handler.process_notification(context, event_type, payload)

View File

@ -26,7 +26,6 @@ from oslo_db.sqlalchemy.migration_cli import manager
from oslo_log import log
from designate.i18n import _
from designate.i18n import _LW
from designate import exceptions
@ -97,7 +96,7 @@ def sort_query(query, table, sort_keys, sort_dir=None, sort_dirs=None):
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
LOG.warning('Id not in sort_keys; is sort_keys unique?')
assert(not (sort_dir and sort_dirs))

View File

@ -24,8 +24,6 @@ from oslo_db import exception as db_exception
from oslo_utils import excutils
from designate.storage import base
from designate.i18n import _LW
LOG = logging.getLogger(__name__)
RETRY_STATE = threading.local()
@ -43,7 +41,7 @@ def _retry_on_deadlock(exc):
# TODO(kiall): This is a total leak of the SQLA Driver, we'll need a better
# way to handle this.
if isinstance(exc, db_exception.DBDeadlock):
LOG.warning(_LW("Deadlock detected. Retrying..."))
LOG.warning("Deadlock detected. Retrying...")
return True
return False

View File

@ -24,7 +24,6 @@ from sqlalchemy.sql.expression import or_
from designate import exceptions
from designate import objects
from designate.i18n import _LI
from designate.sqlalchemy import base as sqlalchemy_base
from designate.storage import base as storage_base
from designate.storage.impl_sqlalchemy import tables
@ -476,10 +475,10 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
limit=limit,
)
if not zones:
LOG.info(_LI("No zones to be purged"))
LOG.info("No zones to be purged")
return
LOG.debug(_LI("Purging %d zones"), len(zones))
LOG.debug("Purging %d zones", len(zones))
zones_by_id = {z.id: z for z in zones}
@ -492,11 +491,11 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
values(parent_zone_id=surviving_parent_id)
resultproxy = self.session.execute(query)
LOG.debug(_LI("%d child zones updated"), resultproxy.rowcount)
LOG.debug("%d child zones updated", resultproxy.rowcount)
self.purge_zone(context, zone)
LOG.info(_LI("Purged %d zones"), len(zones))
LOG.info("Purged %d zones", len(zones))
return len(zones)
def count_zones(self, context, criterion=None):

View File

@ -27,13 +27,13 @@ LOG = logging.getLogger(__name__)
def create_tables(tables):
for table in tables:
LOG.debug("Creating table %s" % table)
LOG.debug("Creating table %s", table)
table.create()
def drop_tables(tables):
for table in tables:
LOG.debug("Dropping table %s" % table)
LOG.debug("Dropping table %s", table)
table.drop()

View File

@ -21,14 +21,12 @@ from oslo_log import log as logging
from sqlalchemy import Boolean
from sqlalchemy.schema import Column, MetaData, Table, Index
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
LOG.info(_LI("Adding boolean column delayed_notify to table 'zones'"))
LOG.info("Adding boolean column delayed_notify to table 'zones'")
meta.bind = migrate_engine
zones_table = Table('zones', meta, autoload=True)
col = Column('delayed_notify', Boolean(), default=False)

View File

@ -1,6 +1,10 @@
# Copyright 2012 Managed I.T.
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -16,6 +20,7 @@
# under the License.
from __future__ import absolute_import
import logging as std_logging
import os
import random
import shutil
@ -36,7 +41,7 @@ from designate.sqlalchemy import utils as sqlalchemy_utils
"""Test fixtures
"""
_TRUE_VALUES = ('True', 'true', '1', 'yes')
LOG = logging.getLogger(__name__)
@ -124,10 +129,10 @@ class DatabaseFixture(fixtures.Fixture):
tmpfs_path = "/dev/shm"
if os.path.isdir(tmpfs_path):
tmp_dir = tmpfs_path
LOG.debug("Using tmpfs on %s as database tmp dir" % tmp_dir)
LOG.debug("Using tmpfs on %s as database tmp dir", tmp_dir)
else:
tmp_dir = "/tmp"
LOG.warning("Using %s as database tmp dir. Tests might be slow" %
LOG.warning("Using %s as database tmp dir. Tests might be slow",
tmp_dir)
_, path = tempfile.mkstemp(prefix='designate-', suffix='.sqlite',
@ -157,7 +162,7 @@ class DatabaseFixture(fixtures.Fixture):
def tearDown(self):
# This is currently unused
super(DatabaseFixture, self).tearDown()
LOG.debug("Deleting %s" % self.working_copy)
LOG.debug("Deleting %s", self.working_copy)
os.unlink(self.working_copy)
@ -180,6 +185,87 @@ class ZoneManagerTaskFixture(fixtures.Fixture):
self.task.on_partition_change(range(0, 4095), None, None)
# Logging handlers imported from Nova.
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
# At times we end up calling back into main() functions in
# testing. This has the possibility of calling logging.setup
# again, which completely unwinds the logging capture we've
# created here. Once we've setup the logging the way we want,
# disable the ability for the test to change this.
def fake_logging_setup(*args):
pass
self.useFixture(
fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
@contextmanager
def random_seed(seed):
"""Context manager to set random.seed() temporarily

View File

@ -19,6 +19,7 @@ import monascastatsd
from designate.metrics import Metrics
from designate.metrics_client import noop
from designate.tests import fixtures
from designate.tests import TestCase
from oslo_config import cfg
@ -29,16 +30,17 @@ class TestNoopMetrics(TestCase):
def setUp(self):
super(TestCase, self).setUp()
self.stdlog = fixtures.StandardLogging()
self.useFixture(self.stdlog)
self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
self.metrics = Metrics()
self.metrics._client = noop.Client()
def test_noop_metrics_enabled(self):
self.CONF.set_override('enabled', True, 'monasca:statsd')
with mock.patch('designate.metrics_client.noop.LOG') as log_mock:
self.metrics.init()
log_mock.error.assert_called_once_with(
"Using noop metrics client. Metrics will be ignored.")
self.metrics.init()
self.assertIn("Using noop metrics client. Metrics will be ignored.",
self.stdlog.logger.output)
def test_noop_metrics_disabled(self):
with mock.patch('designate.metrics_client.noop.LOG') as log_mock:
@ -67,21 +69,22 @@ class TestMonascaMetrics(TestCase):
def setUp(self):
super(TestCase, self).setUp()
self.stdlog = fixtures.StandardLogging()
self.useFixture(self.stdlog)
self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
self.metrics = Metrics()
def test_monasca_metrics_enabled(self):
self.CONF.set_override('enabled', True, 'monasca:statsd')
with mock.patch('designate.metrics.LOG') as log_mock:
self.metrics.init()
log_mock.info.assert_called_once_with(
"Statsd reports to 127.0.0.1 8125")
self.metrics.init()
self.assertIn("Statsd reports to 127.0.0.1 8125",
self.stdlog.logger.output)
def test_monasca_metrics_disabled(self):
with mock.patch('designate.metrics.LOG') as log_mock:
self.metrics.init()
log_mock.info.assert_called_once_with(
"Statsd disabled")
self.metrics.init()
self.assertIn(
"Statsd disabled",
self.stdlog.logger.output)
@mock.patch('socket.socket.connect')
@mock.patch('socket.socket.send')

View File

@ -33,8 +33,6 @@ from oslo_utils.netutils import is_valid_ipv6
from designate.common import config
from designate import exceptions
from designate.i18n import _
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
@ -481,7 +479,7 @@ def bind_tcp(host, port, tcp_backlog, tcp_keepidle=None):
:type tcp_keepidle: int
:returns: socket
"""
LOG.info(_LI('Opening TCP Listening Socket on %(host)s:%(port)d'),
LOG.info('Opening TCP Listening Socket on %(host)s:%(port)d',
{'host': host, 'port': port})
family = socket.AF_INET6 if is_valid_ipv6(host) else socket.AF_INET
sock_tcp = socket.socket(family, socket.SOCK_STREAM)
@ -492,7 +490,7 @@ def bind_tcp(host, port, tcp_backlog, tcp_keepidle=None):
try:
sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception:
LOG.info(_LI('SO_REUSEPORT not available, ignoring.'))
LOG.info('SO_REUSEPORT not available, ignoring.')
# This option isn't available in the OS X version of eventlet
if tcp_keepidle and hasattr(socket, 'TCP_KEEPIDLE'):
@ -504,7 +502,7 @@ def bind_tcp(host, port, tcp_backlog, tcp_keepidle=None):
sock_tcp.bind((host, port))
if port == 0:
newport = sock_tcp.getsockname()[1]
LOG.info(_LI('Listening on TCP port %(port)d'), {'port': newport})
LOG.info('Listening on TCP port %(port)d'), {'port': newport}
sock_tcp.listen(tcp_backlog)
@ -521,7 +519,7 @@ def bind_udp(host, port):
:type port: int
:returns: socket
"""
LOG.info(_LI('Opening UDP Listening Socket on %(host)s:%(port)d'),
LOG.info('Opening UDP Listening Socket on %(host)s:%(port)d',
{'host': host, 'port': port})
family = socket.AF_INET6 if is_valid_ipv6(host) else socket.AF_INET
sock_udp = socket.socket(family, socket.SOCK_DGRAM)
@ -531,13 +529,13 @@ def bind_udp(host, port):
try:
sock_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception:
LOG.info(_LI('SO_REUSEPORT not available, ignoring.'))
LOG.info('SO_REUSEPORT not available, ignoring.')
sock_udp.setblocking(True)
sock_udp.bind((host, port))
if port == 0:
newport = sock_udp.getsockname()[1]
LOG.info(_LI('Listening on UDP port %(port)d'), {'port': newport})
LOG.info('Listening on UDP port %(port)d', {'port': newport})
return sock_udp

View File

@ -19,8 +19,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.i18n import _LI
from designate.i18n import _LE
from designate import backend
from designate import exceptions
from designate import service
@ -51,7 +49,7 @@ class Service(service.RPCService, service.Service):
# Fetch an instance of the Backend class
target.backend = backend.get_backend(target)
LOG.info(_LI('%d targets setup'), len(pool.targets))
LOG.info('%d targets setup', len(pool.targets))
if len(pool.targets) == 0:
raise exceptions.NoPoolTargetsConfigured()
@ -72,12 +70,12 @@ class Service(service.RPCService, service.Service):
if len(pool.targets) > 0:
has_targets = True
else:
LOG.error(_LE("No targets for %s found."), pool)
LOG.error("No targets for %s found.", pool)
time.sleep(5)
# Pool data may not have migrated to the DB yet
except exceptions.PoolNotFound:
LOG.error(_LE("Pool ID %s not found."), pool_id)
LOG.error("Pool ID %s not found.", pool_id)
time.sleep(5)
# designate-central service may not have started yet
except messaging.exceptions.MessagingTimeout:
@ -111,13 +109,13 @@ class Service(service.RPCService, service.Service):
def get_pool(self, pool_id):
if pool_id not in self.pools_map:
LOG.info(_LI("Lazily loading pool %s"), pool_id)
LOG.info("Lazily loading pool %s", pool_id)
self.pools_map[pool_id] = self.load_pool(pool_id)
return self.pools_map[pool_id]
def start(self):
super(Service, self).start()
LOG.info(_LI('Started worker'))
LOG.info('Started worker')
def _do_zone_action(self, context, zone):
pool = self.get_pool(zone.pool_id)

View File

@ -20,8 +20,6 @@ import dns
from oslo_config import cfg
from oslo_log import log as logging
from designate.i18n import _LI
from designate.i18n import _LW
from designate.worker import utils as wutils
from designate.worker.tasks import base
from designate import exceptions
@ -69,8 +67,11 @@ class ZoneActionOnTarget(base.Task):
def __call__(self):
LOG.debug("Attempting %(action)s zone %(zone)s on %(target)s",
{'action': self.action, 'zone': self.zone.name,
'target': self.target})
{
'action': self.action,
'zone': self.zone.name,
'target': self.target
})
for retry in range(0, self.max_retries):
try:
@ -87,11 +88,16 @@ class ZoneActionOnTarget(base.Task):
self.action, self.zone.name, self.target)
return True
except Exception as e:
LOG.info(_LI('Failed to %(action)s zone %(zone)s on '
'target %(target)s on attempt %(attempt)d, '
'Error: %(error)s.'), {'action': self.action,
'zone': self.zone.name, 'target': self.target.id,
'attempt': retry + 1, 'error': str(e)})
LOG.info('Failed to %(action)s zone %(zone)s on '
'target %(target)s on attempt %(attempt)d, '
'Error: %(error)s.',
{
'action': self.action,
'zone': self.zone.name,
'target': self.target.id,
'attempt': retry + 1,
'error': str(e)
})
time.sleep(self.retry_interval)
return False
@ -119,14 +125,20 @@ class SendNotify(base.Task):
try:
wutils.notify(self.zone.name, host, port=port)
LOG.debug('Sent NOTIFY to %(host)s:%(port)s for zone '
'%(zone)s', {'host': host,
'port': port, 'zone': self.zone.name})
LOG.debug('Sent NOTIFY to %(host)s:%(port)s for zone %(zone)s',
{
'host': host,
'port': port,
'zone': self.zone.name
})
return True
except dns.exception.Timeout as e:
LOG.info(_LI('Timeout on NOTIFY to %(host)s:%(port)s for zone '
'%(zone)s'), {'host': host,
'port': port, 'zone': self.zone.name})
LOG.info('Timeout on NOTIFY to %(host)s:%(port)s for zone '
'%(zone)s', {
'host': host,
'port': port,
'zone': self.zone.name
})
raise e
return False
@ -169,9 +181,12 @@ class ZoneActor(base.Task, ThresholdMixin):
results.count(True), len(results))
if not met_action_threshold:
LOG.info(_LI('Could not %(action)s %(zone)s on enough targets. '
'Updating status to ERROR'),
{'action': self.zone.action, 'zone': self.zone.name})
LOG.info('Could not %(action)s %(zone)s on enough targets. '
'Updating status to ERROR',
{
'action': self.zone.action,
'zone': self.zone.name
})
self.zone.status = 'ERROR'
self._update_status()
return False
@ -216,8 +231,8 @@ class ZoneAction(base.Task):
return poller()
def __call__(self):
LOG.info(_LI('Attempting %(action)s on zone %(name)s'),
{'action': self.action, 'name': self.zone.name})
LOG.info('Attempting %(action)s on zone %(name)s',
{'action': self.action, 'name': self.zone.name})
if not self._zone_action_on_targets():
return False
@ -304,25 +319,39 @@ class PollForZone(base.Task):
def __call__(self):
LOG.debug('Polling for zone %(zone)s serial %(serial)s on %(ns)s',
{'zone': self.zone.name, 'serial': self.zone.serial,
'ns': self.ns})
{
'zone': self.zone.name,
'serial': self.zone.serial,
'ns': self.ns
})
try:
serial = self._get_serial()
LOG.debug('Found serial %(serial)d on %(host)s for zone '
'%(zone)s', {'serial': serial, 'host': self.ns.host,
'zone': self.zone.name})
LOG.debug('Found serial %(serial)d on %(host)s for zone %(zone)s',
{
'serial': serial,
'host': self.ns.host,
'zone': self.zone.name
})
return serial
# TODO(timsim): cache if it's higher than cache
except dns.exception.Timeout:
LOG.info(_LI('Timeout polling for serial %(serial)d '
'%(host)s for zone %(zone)s'), {'serial': self.zone.serial,
'host': self.ns.host, 'zone': self.zone.name})
LOG.info('Timeout polling for serial %(serial)d '
'%(host)s for zone %(zone)s',
{
'serial': self.zone.serial,
'host': self.ns.host,
'zone': self.zone.name
})
except Exception as e:
LOG.warning(_LW('Unexpected failure polling for serial %(serial)d '
'%(host)s for zone %(zone)s. Error: %(error)s'),
{'serial': self.zone.serial, 'host': self.ns.host,
'zone': self.zone.name, 'error': str(e)})
LOG.warning('Unexpected failure polling for serial %(serial)d '
'%(host)s for zone %(zone)s. Error: %(error)s',
{
'serial': self.zone.serial,
'host': self.ns.host,
'zone': self.zone.name,
'error': str(e)
})
return None
@ -378,9 +407,12 @@ class ZonePoller(base.Task, ThresholdMixin):
return query_result
def _on_failure(self, error_status):
LOG.info(_LI('Could not find %(serial)s for %(zone)s on enough '
'nameservers.'),
{'serial': self.zone.serial, 'zone': self.zone.name})
LOG.info('Could not find %(serial)s for %(zone)s on enough '
'nameservers.',
{
'serial': self.zone.serial,
'zone': self.zone.name
})
self.zone.status = error_status
@ -522,9 +554,11 @@ class RecoverShard(base.Task):
stale_zones = self.storage.find_zones(self.context, stale_criterion)
if stale_zones:
LOG.warn(_LW('Found %(len)d zones PENDING for more than %(sec)d '
'seconds'), {'len': len(stale_zones),
'sec': self.max_prop_time})
LOG.warn('Found %(len)d zones PENDING for more than %(sec)d '
'seconds', {
'len': len(stale_zones),
'sec': self.max_prop_time
})
error_zones.extend(stale_zones)
return error_zones

View File

@ -203,14 +203,13 @@ See https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html
.. code-block:: python
# Do not use "%" string formatting
# No localization for debug
# No localization for log messages
LOG.debug("... %s", variable)
LOG.info(_LI("... %s..."), variable)
# Use named interpolation when more than one replacement is done
LOG.info(_LI("... %(key)s ..."), {'key': 'value', ...})
LOG.warn(_LW("... %(key)s"), {'key': 'value'})
LOG.error(_LE("... %(key)s"), {'key': 'value'})
LOG.critical(_LC("... %(key)s"), {'key': 'value'})
LOG.info("... %(key)s ...", {'key': 'value', ...})
LOG.warn("... %(key)s", {'key': 'value'})
LOG.error("... %(key)s", {'key': 'value'})
LOG.critical("... %(key)s", {'key': 'value'})
.. _Gerrit workflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow
.. _blueprint: https://blueprints.launchpad.net/designate