Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: Ieec8028305099422e1b0f8fc84bc90c9ca6c694f
This commit is contained in:
liyi 2017-03-21 12:17:51 +08:00
parent 34df3cd915
commit 8f10215ffd
94 changed files with 559 additions and 705 deletions

View File

@ -20,13 +20,12 @@ An OpenStack REST API to Heat.
from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-api` script is deprecated. Please use the '
'system level heat binaries installed to start '
'any of the heat services.'))
LOG.warning('DEPRECATED: `heat-api` script is deprecated. Please use the '
'system level heat binaries installed to start '
'any of the heat services.')
import os
import sys

View File

@ -22,13 +22,12 @@ AMQP RPC to implement them.
from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-api-cfn` script is deprecated. Please use '
'the system level heat binaries installed to start '
'any of the heat services.'))
LOG.warning('DEPRECATED: `heat-api-cfn` script is deprecated. Please use '
'the system level heat binaries installed to start '
'any of the heat services.')
import os
import sys

View File

@ -22,13 +22,12 @@ implement them.
from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-api-cloudwatch` script is deprecated. '
'Please use the system level heat binaries installed to '
'start any of the heat services.'))
LOG.warning('DEPRECATED: `heat-api-cloudwatch` script is deprecated. '
'Please use the system level heat binaries installed to '
'start any of the heat services.')
import os
import sys

View File

@ -22,13 +22,12 @@ engine.
from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-engine` script is deprecated. '
'Please use the system level heat binaries installed to '
'start any of the heat services.'))
LOG.warning('DEPRECATED: `heat-engine` script is deprecated. '
'Please use the system level heat binaries installed to '
'start any of the heat services.')
import os
import sys

View File

@ -15,12 +15,11 @@
from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-manage` script is deprecated. Please use '
'the system level heat-manage binary.'))
LOG.warning('DEPRECATED: `heat-manage` script is deprecated. Please use '
'the system level heat-manage binary.')
import os
import sys

View File

@ -21,7 +21,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -554,8 +553,8 @@ def available_resource_mapping():
if DOCKER_INSTALLED:
return resource_mapping()
else:
LOG.warning(_LW("Docker plug-in loaded, but docker lib "
"not installed."))
LOG.warning("Docker plug-in loaded, but docker lib "
"not installed.")
return {}

View File

@ -21,12 +21,9 @@ from oslo_log import log as logging
from oslo_utils import importutils
from heat.common import exception
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
LOG = logging.getLogger('heat.common.keystoneclient')
LOG.info(_LI("Keystone V2 loaded"))
LOG.info("Keystone V2 loaded")
class KeystoneClientV2(object):
@ -100,8 +97,8 @@ class KeystoneClientV2(object):
kwargs['tenant_name'] = self.context.project_name
kwargs['tenant_id'] = self.context.tenant_id
else:
LOG.error(_LE("Keystone v2 API connection failed, no password "
"or auth_token!"))
LOG.error("Keystone v2 API connection failed, no password "
"or auth_token!")
raise exception.AuthorizationFailure()
kwargs['cacert'] = self._get_client_option('ca_file')
kwargs['insecure'] = self._get_client_option('insecure')
@ -115,7 +112,7 @@ class KeystoneClientV2(object):
if auth_kwargs:
# Sanity check
if not client.auth_ref.trust_scoped:
LOG.error(_LE("v2 trust token re-scoping failed!"))
LOG.error("v2 trust token re-scoping failed!")
raise exception.AuthorizationFailure()
# All OK so update the context with the token
self.context.auth_token = client.auth_ref.auth_token
@ -123,8 +120,8 @@ class KeystoneClientV2(object):
# Ensure the v2 API we're using is not impacted by keystone
# bug #1239303, otherwise we can't trust the user_id
if self.context.trustor_user_id != client.auth_ref.user_id:
LOG.error(_LE("Trust impersonation failed, bug #1239303 "
"suspected, you may need a newer keystone"))
LOG.error("Trust impersonation failed, bug #1239303 "
"suspected, you may need a newer keystone")
raise exception.AuthorizationFailure()
return client
@ -164,8 +161,8 @@ class KeystoneClientV2(object):
Returns the keystone ID of the resulting user
"""
if len(username) > 64:
LOG.warning(_LW("Truncating the username %s to the last 64 "
"characters."), username)
LOG.warning("Truncating the username %s to the last 64 "
"characters.", username)
# get the last 64 characters of the username
username = username[-64:]
user = self.client.users.create(username,
@ -188,8 +185,8 @@ class KeystoneClientV2(object):
self.client.roles.add_user_role(user.id, role_id,
self.context.tenant_id)
else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, "
"check role exists!"),
LOG.error("Failed to add user %(user)s to role %(role)s, "
"check role exists!",
{'user': username,
'role': cfg.CONF.heat_stack_user_role})

View File

@ -25,8 +25,6 @@ from swiftclient import utils as swiftclient_utils
from troveclient import client as tc
from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine.clients import client_plugin
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
@ -57,7 +55,7 @@ class RackspaceClientPlugin(client_plugin.ClientPlugin):
"""Create an authenticated client context."""
self.pyrax = pyrax.create_context("rackspace")
self.pyrax.auth_endpoint = self.context.auth_url
LOG.info(_LI("Authenticating username: %s"),
LOG.info("Authenticating username: %s",
self.context.username)
tenant = self.context.tenant_id
tenant_name = self.context.tenant
@ -65,9 +63,9 @@ class RackspaceClientPlugin(client_plugin.ClientPlugin):
tenant_id=tenant,
tenant_name=tenant_name)
if not self.pyrax.authenticated:
LOG.warning(_LW("Pyrax Authentication Failed."))
LOG.warning("Pyrax Authentication Failed.")
raise exception.AuthorizationFailure()
LOG.info(_LI("User %s authenticated successfully."),
LOG.info("User %s authenticated successfully.",
self.context.username)

View File

@ -19,7 +19,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import function
@ -1184,7 +1183,7 @@ class CloudLoadBalancer(resource.Resource):
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
function = attribute_function[key]
LOG.info(_LI('%(name)s.GetAtt(%(key)s) == %(function)s'),
LOG.info('%(name)s.GetAtt(%(key)s) == %(function)s',
{'name': self.name, 'key': key, 'function': function})
return function

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.nova import server
@ -187,7 +186,7 @@ class CloudServer(server.Server):
reason = server.metadata.get('rackconnect_unprocessable_reason',
None)
if reason is not None:
LOG.warning(_LW("RackConnect unprocessable reason: %s"),
LOG.warning("RackConnect unprocessable reason: %s",
reason)
msg = _("RackConnect automation has completed")

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
import six
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -108,8 +107,8 @@ class CloudNetwork(resource.Resource):
try:
self._network = self.cloud_networks().get(self.resource_id)
except NotFound:
LOG.warning(_LW("Could not find network %s but resource id is"
" set."), self.resource_id)
LOG.warning("Could not find network %s but resource id is"
" set.", self.resource_id)
return self._network
def cloud_networks(self):
@ -139,7 +138,7 @@ class CloudNetwork(resource.Resource):
try:
network.delete()
except NetworkInUse:
LOG.warning(_LW("Network '%s' still in use."), network.id)
LOG.warning("Network '%s' still in use.", network.id)
else:
self._delete_issued = True
return False

View File

@ -22,8 +22,6 @@ import webob
from heat.api.aws import exception
from heat.common import endpoint_utils
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common import wsgi
LOG = logging.getLogger(__name__)
@ -161,14 +159,14 @@ class EC2Token(wsgi.Middleware):
# here so that we can use both authentication methods.
# Returning here just means the user didn't supply AWS
# authentication and we'll let the app try native keystone next.
LOG.info(_LI("Checking AWS credentials.."))
LOG.info("Checking AWS credentials..")
signature = self._get_signature(req)
if not signature:
if 'X-Auth-User' in req.headers:
return self.application
else:
LOG.info(_LI("No AWS Signature found."))
LOG.info("No AWS Signature found.")
raise exception.HeatIncompleteSignatureError()
access = self._get_access(req)
@ -176,14 +174,14 @@ class EC2Token(wsgi.Middleware):
if 'X-Auth-User' in req.headers:
return self.application
else:
LOG.info(_LI("No AWSAccessKeyId/Authorization Credential"))
LOG.info("No AWSAccessKeyId/Authorization Credential")
raise exception.HeatMissingAuthenticationTokenError()
LOG.info(_LI("AWS credentials found, checking against keystone."))
LOG.info("AWS credentials found, checking against keystone.")
if not auth_uri:
LOG.error(_LE("Ec2Token authorization failed, no auth_uri "
"specified in config file"))
LOG.error("Ec2Token authorization failed, no auth_uri "
"specified in config file")
raise exception.HeatInternalFailureError(_('Service '
'misconfigured'))
# Make a copy of args for authentication and signature verification.
@ -207,7 +205,7 @@ class EC2Token(wsgi.Middleware):
headers = {'Content-Type': 'application/json'}
keystone_ec2_uri = self._conf_get_keystone_ec2_uri(auth_uri)
LOG.info(_LI('Authenticating with %s'), keystone_ec2_uri)
LOG.info('Authenticating with %s', keystone_ec2_uri)
response = requests.post(keystone_ec2_uri, data=creds_json,
headers=headers,
verify=self.ssl_options['verify'],
@ -220,7 +218,7 @@ class EC2Token(wsgi.Middleware):
roles = [role['name']
for role in result['token'].get('roles', [])]
except (AttributeError, KeyError):
LOG.info(_LI("AWS authentication failure."))
LOG.info("AWS authentication failure.")
# Try to extract the reason for failure so we can return the
# appropriate AWS error via raising an exception
try:
@ -235,7 +233,7 @@ class EC2Token(wsgi.Middleware):
else:
raise exception.HeatAccessDeniedError()
else:
LOG.info(_LI("AWS authentication successful."))
LOG.info("AWS authentication successful.")
# Authenticated!
ec2_creds = {'ec2Credentials': {'access': access,

View File

@ -19,7 +19,6 @@ import re
from oslo_log import log as logging
from heat.api.aws import exception
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__)
@ -98,7 +97,7 @@ def get_param_value(params, key):
try:
return params[key]
except KeyError:
LOG.error(_LE("Request does not contain %s parameter!"), key)
LOG.error("Request does not contain %s parameter!", key)
raise exception.HeatMissingParameterError(key)

View File

@ -22,7 +22,6 @@ from heat.api.aws import exception
from heat.api.aws import utils as api_utils
from heat.common import exception as heat_exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common import identifier
from heat.common import policy
from heat.common import template_format
@ -426,7 +425,7 @@ class StackController(object):
msg = _("The Template must be a JSON or YAML document.")
return exception.HeatInvalidParameterValueError(detail=msg)
LOG.info(_LI('validate_template'))
LOG.info('validate_template')
def format_validate_parameter(key, value):
"""Reformat engine output into AWS "ValidateTemplate" format."""

View File

@ -21,8 +21,6 @@ from heat.api.aws import exception
from heat.api.aws import utils as api_utils
from heat.common import exception as heat_exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LW
from heat.common import policy
from heat.common import wsgi
from heat.rpc import api as rpc_api
@ -199,7 +197,7 @@ class WatchController(object):
# Filter criteria not met, return None
return
except KeyError:
LOG.warning(_LW("Invalid filter key %s, ignoring"), f)
LOG.warning("Invalid filter key %s, ignoring", f)
return result
@ -250,8 +248,8 @@ class WatchController(object):
# need to process (each dict) for dimensions
metric_data = api_utils.extract_param_list(parms, prefix='MetricData')
if not len(metric_data):
LOG.error(_LE("Request does not contain required MetricData"))
return exception.HeatMissingParameterError("MetricData list")
LOG.error("Request does not contain required MetricData")
return exception.HeatMissingParameterError(_("MetricData list"))
watch_name = None
dimensions = []

View File

@ -24,7 +24,6 @@ from heat.api.openstack.v1.views import stacks_view
from heat.common import context
from heat.common import environment_format
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import param_utils
from heat.common import serializers
@ -324,7 +323,7 @@ class StackController(object):
not_tags=not_tags,
not_tags_any=not_tags_any)
except AttributeError as ex:
LOG.warning(_LW("Old Engine Version: %s"), ex)
LOG.warning("Old Engine Version: %s", ex)
return stacks_view.collection(req, stacks=stacks,
count=count,

View File

@ -30,7 +30,6 @@ from oslo_service import systemd
import six
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat.common import wsgi
@ -55,7 +54,7 @@ def launch_api(setup_logging=True):
port = cfg.CONF.heat_api.bind_port
host = cfg.CONF.heat_api.bind_host
LOG.info(_LI('Starting Heat REST API on %(host)s:%(port)s'),
LOG.info('Starting Heat REST API on %(host)s:%(port)s',
{'host': host, 'port': port})
profiler.setup('heat-api', host)
gmr.TextGuruMeditation.setup_autorun(version)

View File

@ -32,7 +32,6 @@ from oslo_service import systemd
import six
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat.common import wsgi
@ -59,7 +58,7 @@ def launch_cfn_api(setup_logging=True):
port = cfg.CONF.heat_api_cfn.bind_port
host = cfg.CONF.heat_api_cfn.bind_host
LOG.info(_LI('Starting Heat API on %(host)s:%(port)s'),
LOG.info('Starting Heat API on %(host)s:%(port)s',
{'host': host, 'port': port})
profiler.setup('heat-api-cfn', host)
gmr.TextGuruMeditation.setup_autorun(version)

View File

@ -32,7 +32,6 @@ from oslo_service import systemd
import six
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat.common import wsgi
@ -59,7 +58,7 @@ def launch_cloudwatch_api(setup_logging=True):
port = cfg.CONF.heat_api_cloudwatch.bind_port
host = cfg.CONF.heat_api_cloudwatch.bind_host
LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'),
LOG.info('Starting Heat CloudWatch API on %(host)s:%(port)s',
{'host': host, 'port': port})
profiler.setup('heat-api-cloudwatch', host)
gmr.TextGuruMeditation.setup_autorun(version)

View File

@ -32,7 +32,6 @@ from oslo_reports import guru_meditation_report as gmr
from oslo_service import service
from heat.common import config
from heat.common.i18n import _LC
from heat.common import messaging
from heat.common import profiler
from heat.engine import template
@ -60,7 +59,7 @@ def launch_engine(setup_logging=True):
try:
mgr = template._get_template_extension_manager()
except template.TemplatePluginNotRegistered as ex:
LOG.critical(_LC("%s"), ex)
LOG.critical("%s", ex)
if not mgr or not mgr.names():
sys.exit("ERROR: No template format plugins registered")

View File

@ -22,7 +22,6 @@ from osprofiler import opts as profiler
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import wsgi
@ -375,8 +374,8 @@ def startup_sanity_check():
not cfg.CONF.stack_user_domain_name):
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
LOG.warning(_LW('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default'))
LOG.warning('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default')
else:
domain_admin_user = cfg.CONF.stack_domain_admin
domain_admin_password = cfg.CONF.stack_domain_admin_password
@ -389,7 +388,7 @@ def startup_sanity_check():
auth_key_len = len(cfg.CONF.auth_encryption_key)
if auth_key_len in (16, 24):
LOG.warning(
_LW('Please update auth_encryption_key to be 32 characters.'))
'Please update auth_encryption_key to be 32 characters.')
elif auth_key_len != 32:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '
'must be 32 characters'))

View File

@ -28,7 +28,6 @@ import six
from heat.common import config
from heat.common import endpoint_utils
from heat.common import exception
from heat.common.i18n import _LE
from heat.common import policy
from heat.common import wsgi
from heat.db.sqlalchemy import api as db_api
@ -235,9 +234,9 @@ class RequestContext(context.RequestContext):
if auth_uri:
return auth_uri
else:
LOG.error(_LE('Keystone API endpoint not provided. Set '
'auth_uri in section [clients_keystone] '
'of the configuration file.'))
LOG.error('Keystone API endpoint not provided. Set '
'auth_uri in section [clients_keystone] '
'of the configuration file.')
raise exception.AuthorizationFailure()
@property
@ -247,8 +246,8 @@ class RequestContext(context.RequestContext):
cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=self.trust_id)
if not self._trusts_auth_plugin:
LOG.error(_LE('Please add the trustee credentials you need '
'to the %s section of your heat.conf file.'),
LOG.error('Please add the trustee credentials you need '
'to the %s section of your heat.conf file.',
TRUSTEE_CONF_GROUP)
raise exception.AuthorizationFailure()
@ -276,8 +275,8 @@ class RequestContext(context.RequestContext):
user_domain_id=self.user_domain,
auth_url=self.keystone_v3_endpoint)
LOG.error(_LE("Keystone API connection failed, no password "
"trust or auth_token!"))
LOG.error("Keystone API connection failed, no password "
"trust or auth_token!")
raise exception.AuthorizationFailure()
def reload_auth_plugin(self):

View File

@ -24,7 +24,6 @@ from oslo_utils import excutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LE
_FATAL_EXCEPTION_FORMAT_ERRORS = False
@ -69,9 +68,9 @@ class HeatException(Exception):
reraise=_FATAL_EXCEPTION_FORMAT_ERRORS):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
LOG.exception('Exception in string format operation')
for name, value in six.iteritems(kwargs):
LOG.error(_LE("%(name)s: %(value)s"),
LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value}) # noqa
if self.error_code:

View File

@ -27,16 +27,6 @@ _translators = i18n.TranslatorFactory(domain='heat')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def repr_wrapper(klass):
"""A decorator that defines __repr__ method under Python 2.

View File

@ -16,8 +16,6 @@
from oslo_log import log as logging
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import resources
LOG = logging.getLogger(__name__)
@ -42,19 +40,19 @@ def get_plug_point_class_instances():
slps = resources.global_env().get_stack_lifecycle_plugins()
pp_classes = [cls for name, cls in slps]
except Exception:
LOG.exception(_LE("failed to get lifecycle plug point classes"))
LOG.exception("failed to get lifecycle plug point classes")
for ppc in pp_classes:
try:
pp_class_instances.append(ppc())
except Exception:
LOG.exception(
_LE("failed to instantiate stack lifecycle class %s"), ppc)
"failed to instantiate stack lifecycle class %s", ppc)
try:
pp_class_instances = sorted(pp_class_instances,
key=lambda ppci: ppci.get_ordinal())
except Exception:
LOG.exception(_LE("failed to sort lifecycle plug point classes"))
LOG.exception("failed to sort lifecycle plug point classes")
return pp_class_instances
@ -108,13 +106,13 @@ def _do_ops(cinstances, opname, cnxt, stack, current_stack=None, action=None,
op(cnxt, stack, current_stack, action)
success_count += 1
except Exception as ex:
LOG.exception(_LE(
"%(opname)s %(ci)s failed for %(a)s on %(sid)s"),
LOG.exception(
"%(opname)s %(ci)s failed for %(a)s on %(sid)s",
{'opname': opname, 'ci': type(ci),
'a': action, 'sid': stack.id})
failure = True
failure_exception_message = ex.args[0] if ex.args else str(ex)
break
LOG.info(_LI("done with class=%(c)s, stackid=%(sid)s, action=%(a)s"),
LOG.info("done with class=%(c)s, stackid=%(sid)s, action=%(a)s",
{'c': type(ci), 'sid': stack.id, 'a': action})
return (failure, failure_exception_message, success_count)

View File

@ -26,7 +26,6 @@ import types
from oslo_log import log as logging
import six
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__)
@ -97,7 +96,7 @@ def load_modules(package, ignore_error=False):
try:
module = _import_module(importer, module_name, package)
except ImportError:
LOG.error(_LE('Failed to import module %s'), module_name)
LOG.error('Failed to import module %s', module_name)
if not ignore_error:
raise
else:

View File

@ -13,14 +13,13 @@
from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning(_LW('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.'),
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name,
'message': exception.message,
'name': entrypoint.name})

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
import osprofiler.initializer
from heat.common import context
from heat.common.i18n import _LW
cfg.CONF.import_opt('enabled', 'heat.common.config', group='profiler')
@ -31,14 +30,14 @@ def setup(binary, host):
project="heat",
service=binary,
host=host)
LOG.warning(_LW("OSProfiler is enabled.\nIt means that person who "
"knows any of hmac_keys that are specified in "
"/etc/heat/heat.conf can trace his requests. \n"
"In real life only operator can read this file so "
"there is no security issue. Note that even if person "
"can trigger profiler, only admin user can retrieve "
"trace information.\n"
"To disable OSprofiler set in heat.conf:\n"
"[profiler]\nenabled=false"))
LOG.warning("OSProfiler is enabled.\nIt means that person who "
"knows any of hmac_keys that are specified in "
"/etc/heat/heat.conf can trace his requests. \n"
"In real life only operator can read this file so "
"there is no security issue. Note that even if person "
"can trigger profiler, only admin user can retrieve "
"trace information.\n"
"To disable OSprofiler set in heat.conf:\n"
"[profiler]\nenabled=false")
else:
osprofiler.web.disable()

View File

@ -21,7 +21,6 @@ from six.moves import urllib
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
cfg.CONF.import_opt('max_template_size', 'heat.common.config')
@ -40,7 +39,7 @@ def get(url, allowed_schemes=('http', 'https')):
the allowed_schemes argument.
Raise an IOError if getting the data fails.
"""
LOG.info(_LI('Fetching data from %s'), url)
LOG.info('Fetching data from %s', url)
components = urllib.parse.urlparse(url)
@ -70,10 +69,11 @@ def get(url, allowed_schemes=('http', 'https')):
for chunk in reader:
result += chunk
if len(result) > cfg.CONF.max_template_size:
raise URLFetchError("Template exceeds maximum allowed size (%s"
" bytes)" % cfg.CONF.max_template_size)
raise URLFetchError(_("Template exceeds maximum allowed size "
"(%s bytes)") %
cfg.CONF.max_template_size)
return result
except exceptions.RequestException as ex:
LOG.info(_LI('Failed to retrieve template: %s') % ex)
LOG.info('Failed to retrieve template: %s', ex)
raise URLFetchError(_('Failed to retrieve template from %s') % url)

View File

@ -48,9 +48,6 @@ import webob.exc
from heat.api.aws import exception as aws_exception
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import serializers
@ -275,7 +272,7 @@ class Server(object):
def kill_children(self, *args):
"""Kills the entire process group."""
LOG.error(_LE('SIGTERM received'))
LOG.error('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False
@ -283,7 +280,7 @@ class Server(object):
def hup(self, *args):
"""Reloads configuration files with zero down time."""
LOG.error(_LE('SIGHUP received'))
LOG.error('SIGHUP received')
signal.signal(signal.SIGHUP, signal.SIG_IGN)
raise exception.SIGHUPInterrupt
@ -315,7 +312,7 @@ class Server(object):
else:
childs_num = workers
LOG.info(_LI("Starting %d workers"), workers)
LOG.info("Starting %d workers", workers)
signal.signal(signal.SIGTERM, self.kill_children)
signal.signal(signal.SIGINT, self.kill_children)
signal.signal(signal.SIGHUP, self.hup)
@ -333,7 +330,7 @@ class Server(object):
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
LOG.info('Caught keyboard interrupt. Exiting.')
os.killpg(0, signal.SIGTERM)
break
except exception.SIGHUPInterrupt:
@ -417,22 +414,22 @@ class Server(object):
def _remove_children(self, pid):
if pid in self.children:
self.children.remove(pid)
LOG.info(_LI('Removed dead child %s'), pid)
LOG.info('Removed dead child %s', pid)
elif pid in self.stale_children:
self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s'), pid)
LOG.info('Removed stale child %s', pid)
else:
LOG.warning(_LW('Unrecognised child %s'), pid)
LOG.warning('Unrecognised child %s', pid)
def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0:
LOG.debug('No stale children')
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
LOG.error(_LE('Not respawning child %d, cannot '
'recover from termination'), pid)
LOG.error('Not respawning child %d, cannot '
'recover from termination', pid)
if not self.children and not self.stale_children:
LOG.info(
_LI('All workers have terminated. Exiting'))
'All workers have terminated. Exiting')
self.running = False
else:
if len(self.children) < self.conf.workers:
@ -509,12 +506,12 @@ class Server(object):
# exit on sighup
self._sock = None
self.run_server()
LOG.info(_LI('Child %d exiting normally'), os.getpid())
LOG.info('Child %d exiting normally', os.getpid())
# self.pool.waitall() is now called in wsgi's server so
# it's safe to exit here
sys.exit(0)
else:
LOG.info(_LI('Started child %s'), pid)
LOG.info('Started child %s', pid)
self.children.add(pid)
def run_server(self):
@ -541,7 +538,7 @@ class Server(object):
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
LOG.info(_LI("Starting single process server"))
LOG.info("Starting single process server")
eventlet.wsgi.server(sock, application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
@ -838,7 +835,7 @@ class Resource(object):
action_result = self.dispatch(self.controller, action,
request, **action_args)
except TypeError as err:
LOG.error(_LE('Exception handling resource: %s'), err)
LOG.error('Exception handling resource: %s', err)
msg = _('The server could not comply with the request since '
'it is either malformed or otherwise incorrect.')
err = webob.exc.HTTPBadRequest(msg)
@ -860,7 +857,7 @@ class Resource(object):
raise
if isinstance(err, webob.exc.HTTPServerError):
LOG.error(
_LE("Returning %(code)s to user: %(explanation)s"),
"Returning %(code)s to user: %(explanation)s",
{'code': err.code, 'explanation': err.explanation})
http_exc = translate_exception(err, request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc)
@ -899,8 +896,7 @@ class Resource(object):
err_body = action_result.get_unserialized_body()
serializer.default(action_result, err_body)
except Exception:
LOG.warning(_LW("Unable to serialize exception "
"response"))
LOG.warning("Unable to serialize exception response")
return action_result
@ -934,7 +930,7 @@ class Resource(object):
def log_exception(err, exc_info):
args = {'exc_info': exc_info} if cfg.CONF.debug else {}
LOG.error(_LE("Unexpected error occurred serving API: %s"), err,
LOG.error("Unexpected error occurred serving API: %s", err,
**args)

View File

@ -35,9 +35,6 @@ from sqlalchemy.orm import aliased as orm_aliased
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.db.sqlalchemy import filters as db_filters
from heat.db.sqlalchemy import migration
from heat.db.sqlalchemy import models
@ -1294,7 +1291,7 @@ def _purge_stacks(stack_infos, engine, meta):
syncpoint = sqlalchemy.Table('sync_point', meta, autoload=True)
stack_info_str = ','.join([str(i) for i in stack_infos])
LOG.info("Purging stacks %s" % stack_info_str)
LOG.info("Purging stacks %s", stack_info_str)
# TODO(cwolfe): find a way to make this re-entrant with
# reasonably sized transactions (good luck), or add
@ -1475,8 +1472,8 @@ def _db_encrypt_or_decrypt_template_params(
for raw_template in next_batch:
try:
if verbose:
LOG.info(_LI("Processing raw_template %(id)d..."),
{'id': raw_template.id})
LOG.info("Processing raw_template %s...",
raw_template.id)
env = raw_template.environment
needs_update = False
@ -1524,16 +1521,16 @@ def _db_encrypt_or_decrypt_template_params(
raw_template_update(ctxt, raw_template.id,
{'environment': newenv})
except Exception as exc:
LOG.exception(_LE('Failed to %(crypt_action)s parameters '
'of raw template %(id)d'),
LOG.exception('Failed to %(crypt_action)s parameters '
'of raw template %(id)d',
{'id': raw_template.id,
'crypt_action': _crypt_action(encrypt)})
excs.append(exc)
continue
finally:
if verbose:
LOG.info(_LI("Finished %(crypt_action)s processing of "
"raw_template %(id)d."),
LOG.info("Finished %(crypt_action)s processing of "
"raw_template %(id)d.",
{'id': raw_template.id,
'crypt_action': _crypt_action(encrypt)})
next_batch = list(itertools.islice(template_batches, batch_size))
@ -1560,8 +1557,8 @@ def _db_encrypt_or_decrypt_resource_prop_data_legacy(
continue
try:
if verbose:
LOG.info(_LI("Processing resource %(id)d..."),
{'id': resource.id})
LOG.info("Processing resource %s...",
resource.id)
if encrypt:
result = crypt.encrypted_dict(resource.properties_data,
encryption_key)
@ -1573,16 +1570,16 @@ def _db_encrypt_or_decrypt_resource_prop_data_legacy(
'properties_data_encrypted': encrypt},
resource.atomic_key)
except Exception as exc:
LOG.exception(_LE('Failed to %(crypt_action)s '
'properties_data of resource %(id)d') %
LOG.exception('Failed to %(crypt_action)s '
'properties_data of resource %(id)d' %
{'id': resource.id,
'crypt_action': _crypt_action(encrypt)})
excs.append(exc)
continue
finally:
if verbose:
LOG.info(_LI("Finished processing resource "
"%(id)d."), {'id': resource.id})
LOG.info("Finished processing resource %s.",
resource.id)
next_batch = list(itertools.islice(resource_batches, batch_size))
return excs
@ -1607,8 +1604,8 @@ def _db_encrypt_or_decrypt_resource_prop_data(
continue
try:
if verbose:
LOG.info(_LI("Processing resource_properties_data "
"%(id)d..."), {'id': rpd.id})
LOG.info("Processing resource_properties_data "
"%s...", rpd.id)
if encrypt:
result = crypt.encrypted_dict(rpd.data,
encryption_key)
@ -1619,8 +1616,8 @@ def _db_encrypt_or_decrypt_resource_prop_data(
'encrypted': encrypt})
except Exception as exc:
LOG.exception(
_LE("Failed to %(crypt_action)s "
"data of resource_properties_data %(id)d") %
"Failed to %(crypt_action)s "
"data of resource_properties_data %(id)d" %
{'id': rpd.id,
'crypt_action': _crypt_action(encrypt)})
excs.append(exc)
@ -1628,8 +1625,8 @@ def _db_encrypt_or_decrypt_resource_prop_data(
finally:
if verbose:
LOG.info(
_LI("Finished processing resource_properties_data"
" %(id)d."), {'id': rpd.id})
"Finished processing resource_properties_data"
" %s.", rpd.id)
next_batch = list(itertools.islice(rpd_batches, batch_size))
return excs
@ -1706,10 +1703,10 @@ def db_properties_data_migrate(ctxt, batch_size=50):
encrypted = resource.properties_data_encrypted
if encrypted is None:
LOG.warning(
_LW('Unexpected: resource.encrypted is None for '
'resource id %(id)d for legacy '
'resource.properties_data, assuming False.'),
{'id': resource.id})
'Unexpected: resource.encrypted is None for '
'resource id %s for legacy '
'resource.properties_data, assuming False.',
resource.id)
encrypted = False
rsrc_prop_data = resource_prop_data_create(
ctxt, {'encrypted': encrypted,
@ -1720,8 +1717,8 @@ def db_properties_data_migrate(ctxt, batch_size=50):
'rsrc_prop_data_id': rsrc_prop_data.id},
resource.atomic_key)
except Exception:
LOG.exception(_LE('Failed to migrate properties_data for '
'resource %(id)d'), {'id': resource.id})
LOG.exception('Failed to migrate properties_data for '
'resource %d', resource.id)
continue
next_batch = list(itertools.islice(resource_batches, batch_size))
@ -1743,8 +1740,8 @@ def db_properties_data_migrate(ctxt, batch_size=50):
event.update({'resource_properties': None,
'rsrc_prop_data_id': rsrc_prop_data.id})
except Exception:
LOG.exception(_LE('Failed to migrate resource_properties '
'for event %(id)d'), {'id': event.id})
LOG.exception('Failed to migrate resource_properties '
'for event %d', event.id)
continue
next_batch = list(itertools.islice(event_batches, batch_size))

View File

@ -18,7 +18,6 @@ from oslo_utils import timeutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common import param_utils
from heat.common import template_format
from heat.common import timeutils as heat_timeutils
@ -41,7 +40,7 @@ def extract_args(params):
try:
timeout = int(timeout_mins)
except (ValueError, TypeError):
LOG.exception(_LE('Timeout conversion failed'))
LOG.exception('Timeout conversion failed')
else:
if timeout > 0:
kwargs[rpc_api.PARAM_TIMEOUT] = timeout
@ -481,7 +480,7 @@ def format_watch_data(wd, rule_names):
if len(metric) == 1:
metric_name, metric_data = metric[0]
else:
LOG.error(_LE("Unexpected number of keys in watch_data.data!"))
LOG.error("Unexpected number of keys in watch_data.data!")
return
result = {

View File

@ -17,7 +17,6 @@ from oslo_utils import strutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common.i18n import repr_wrapper
from heat.engine import constraints as constr
from heat.engine import support
@ -182,35 +181,35 @@ class Attributes(collections.Mapping):
def _validate_type(self, attrib, value):
if attrib.schema.type == attrib.schema.STRING:
if not isinstance(value, six.string_types):
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
'att_type': attrib.schema.STRING})
elif attrib.schema.type == attrib.schema.LIST:
if (not isinstance(value, collections.Sequence)
or isinstance(value, six.string_types)):
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
'att_type': attrib.schema.LIST})
elif attrib.schema.type == attrib.schema.MAP:
if not isinstance(value, collections.Mapping):
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
'att_type': attrib.schema.MAP})
elif attrib.schema.type == attrib.schema.INTEGER:
if not isinstance(value, int):
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
'att_type': attrib.schema.INTEGER})
elif attrib.schema.type == attrib.schema.BOOLEAN:
try:
strutils.bool_from_string(value, strict=True)
except ValueError:
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
'att_type': attrib.schema.BOOLEAN})

View File

@ -21,8 +21,6 @@ import functools
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack as parser
@ -69,7 +67,7 @@ class CheckResource(object):
return False
def _trigger_rollback(self, stack):
LOG.info(_LI("Triggering rollback of %(stack_name)s %(action)s "),
LOG.info("Triggering rollback of %(stack_name)s %(action)s ",
{'action': stack.action, 'stack_name': stack.name})
stack.rollback()
@ -113,7 +111,7 @@ class CheckResource(object):
stack, self.msg_queue)
except resource.UpdateReplace:
new_res_id = rsrc.make_replacement(tmpl.id)
LOG.info(_LI("Replacing resource with new id %s"),
LOG.info("Replacing resource with new id %s",
new_res_id)
rpc_data = sync_point.serialize_input_data(self.input_data)
self._rpc_client.check_resource(cnxt,
@ -177,7 +175,7 @@ class CheckResource(object):
if (resource_id, True) in graph:
# not is_update evaluates to True below, which means update
key = (resource_id, not is_update)
LOG.info(_LI('Re-trigger resource: (%(key1)s, %(key2)s)'),
LOG.info('Re-trigger resource: (%(key1)s, %(key2)s)',
{'key1': key[0], 'key2': key[1]})
predecessors = set(graph[key])
@ -340,7 +338,7 @@ def _check_for_message(msg_queue):
if message == rpc_api.THREAD_CANCEL:
raise CancelOperation
LOG.error(_LE('Unknown message "%s" received'), message)
LOG.error('Unknown message "%s" received', message)
def check_resource_update(rsrc, template_id, resource_data, engine_id,

View File

@ -21,7 +21,6 @@ from stevedore import enabled
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import pluginutils
LOG = logging.getLogger(__name__)
@ -83,7 +82,7 @@ class OpenStackClients(object):
client = getattr(self, method_name)()
self._clients[name] = client
return client
LOG.warning(_LW('Requested client "%s" not found'), name)
LOG.warning('Requested client "%s" not found', name)
class ClientBackend(object):

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
from heat.engine import constraints
@ -137,16 +136,16 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
return True
if vol.status in ('in-use', 'detaching'):
LOG.debug('%s - volume still in use' % vol_id)
LOG.debug('%s - volume still in use', vol_id)
return False
LOG.debug('Volume %(id)s - status: %(status)s' % {
LOG.debug('Volume %(id)s - status: %(status)s', {
'id': vol.id, 'status': vol.status})
if vol.status not in ('available', 'deleting'):
LOG.debug("Detachment failed - volume %(vol)s "
"is in %(status)s status" % {"vol": vol.id,
"status": vol.status})
"is in %(status)s status",
{"vol": vol.id, "status": vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume detachment failed'))
@ -157,19 +156,19 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
vol = self.client().volumes.get(vol_id)
if vol.status in ('available', 'attaching'):
LOG.debug("Volume %(id)s is being attached - "
"volume status: %(status)s" % {'id': vol_id,
'status': vol.status})
"volume status: %(status)s",
{'id': vol_id, 'status': vol.status})
return False
if vol.status != 'in-use':
LOG.debug("Attachment failed - volume %(vol)s is "
"in %(status)s status" % {"vol": vol_id,
"status": vol.status})
"in %(status)s status",
{"vol": vol_id, "status": vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume attachment failed'))
LOG.info(_LI('Attaching volume %(id)s complete'), {'id': vol_id})
LOG.info('Attaching volume %(id)s complete', {'id': vol_id})
return True

View File

@ -29,8 +29,6 @@ from oslo_utils import importutils
from heat.common import context
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LW
LOG = logging.getLogger('heat.engine.clients.keystoneclient')
@ -96,7 +94,7 @@ class KsClientWrapper(object):
self.domain_admin_user = cfg.CONF.stack_domain_admin
self.domain_admin_password = cfg.CONF.stack_domain_admin_password
LOG.debug('Using stack domain %s' % self.stack_domain)
LOG.debug('Using stack domain %s', self.stack_domain)
@property
def context(self):
@ -140,7 +138,7 @@ class KsClientWrapper(object):
try:
auth.get_token(self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Domain admin client authentication failed"))
LOG.error("Domain admin client authentication failed")
raise exception.AuthorizationFailure()
self._domain_admin_auth = auth
@ -167,17 +165,17 @@ class KsClientWrapper(object):
try:
auth_ref = self.context.auth_plugin.get_access(self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Keystone client authentication failed"))
LOG.error("Keystone client authentication failed")
raise exception.AuthorizationFailure()
if self.context.trust_id:
# Sanity check
if not auth_ref.trust_scoped:
LOG.error(_LE("trust token re-scoping failed!"))
LOG.error("trust token re-scoping failed!")
raise exception.AuthorizationFailure()
# Sanity check that impersonation is effective
if self.context.trustor_user_id != auth_ref.user_id:
LOG.error(_LE("Trust impersonation failed"))
LOG.error("Trust impersonation failed")
raise exception.AuthorizationFailure()
return client
@ -202,7 +200,7 @@ class KsClientWrapper(object):
trustee_user_id = self.context.trusts_auth_plugin.get_user_id(
self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Domain admin client authentication failed"))
LOG.error("Domain admin client authentication failed")
raise exception.AuthorizationFailure()
trustor_user_id = self.context.auth_plugin.get_user_id(self.session)
@ -241,8 +239,8 @@ class KsClientWrapper(object):
def _get_username(self, username):
if(len(username) > 64):
LOG.warning(_LW("Truncating the username %s to the last 64 "
"characters."), username)
LOG.warning("Truncating the username %s to the last 64 "
"characters.", username)
# get the last 64 characters of the username
return username[-64:]
@ -268,15 +266,15 @@ class KsClientWrapper(object):
name=self._get_username(username), password=password,
default_project=self.context.tenant_id)
# Add user to heat_stack_user_role
LOG.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id})
LOG.debug("Adding user %(user)s to role %(role)s",
{'user': user.id, 'role': role_id})
self.client.roles.grant(role=role_id, user=user.id,
project=self.context.tenant_id)
else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, "
"check role exists!"), {
'user': username,
'role': cfg.CONF.heat_stack_user_role})
LOG.error("Failed to add user %(user)s to role %(role)s, "
"check role exists!",
{'user': username,
'role': cfg.CONF.heat_stack_user_role})
raise exception.Error(_("Can't find role %s")
% cfg.CONF.heat_stack_user_role)
@ -331,13 +329,13 @@ class KsClientWrapper(object):
name=self._get_username(username), password=password,
default_project=project_id, domain=self.stack_domain_id)
# Add to stack user role
LOG.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id})
LOG.debug("Adding user %(user)s to role %(role)s",
{'user': user.id, 'role': role_id})
self.domain_admin_client.roles.grant(role=role_id, user=user.id,
project=project_id)
else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, "
"check role exists!"),
LOG.error("Failed to add user %(user)s to role %(role)s, "
"check role exists!",
{'user': username,
'role': cfg.CONF.heat_stack_user_role})
raise exception.Error(_("Can't find role %s")
@ -351,7 +349,7 @@ class KsClientWrapper(object):
try:
access = self.domain_admin_auth.get_access(self.session)
except ks_exception.Unauthorized:
LOG.error(_LE("Keystone client authentication failed"))
LOG.error("Keystone client authentication failed")
raise exception.AuthorizationFailure()
self._stack_domain_id = access.domain_id
@ -417,12 +415,12 @@ class KsClientWrapper(object):
except ks_exception.NotFound:
return
except ks_exception.Forbidden:
LOG.warning(_LW('Unable to get details for project %s, '
'not deleting'), project_id)
LOG.warning('Unable to get details for project %s, '
'not deleting', project_id)
return
if project.domain_id != self.stack_domain_id:
LOG.warning(_LW('Not deleting non heat-domain project'))
LOG.warning('Not deleting non heat-domain project')
return
try:

View File

@ -31,8 +31,6 @@ import tenacity
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
from heat.engine import constraints
@ -144,15 +142,15 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try:
server = self.client().servers.get(server_id)
except exceptions.OverLimit as exc:
LOG.warning(_LW("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s"),
LOG.warning("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s",
{'id': server_id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warning(_LW("Received the following exception when "
"fetching server (%(id)s) : %(exception)s"),
LOG.warning("Received the following exception when "
"fetching server (%(id)s) : %(exception)s",
{'id': server_id,
'exception': exc})
else:
@ -167,17 +165,17 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try:
server.get()
except exceptions.OverLimit as exc:
LOG.warning(_LW("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s"),
LOG.warning("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s",
{'name': server.name,
'id': server.id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warning(_LW('Server "%(name)s" (%(id)s) received the '
'following exception during server.get(): '
'%(exception)s'),
LOG.warning('Server "%(name)s" (%(id)s) received the '
'following exception during server.get(): '
'%(exception)s',
{'name': server.name,
'id': server.id,
'exception': exc})
@ -568,7 +566,7 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
try:
server = self.client().servers.get(server)
except exceptions.NotFound as ex:
LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'),
LOG.warning('Instance (%(server)s) not found: %(ex)s',
{'server': server, 'ex': ex})
else:
for n in sorted(server.networks, reverse=True):
@ -691,12 +689,12 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
self.client().volumes.get_server_volume(server_id, attach_id)
except Exception as ex:
self.ignore_not_found(ex)
LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"),
LOG.info("Volume %(vol)s is detached from server %(srv)s",
{'vol': attach_id, 'srv': server_id})
return True
else:
LOG.debug("Server %(srv)s still has attachment %(att)s." % {
'att': attach_id, 'srv': server_id})
LOG.debug("Server %(srv)s still has attachment %(att)s.",
{'att': attach_id, 'srv': server_id})
return False
def interface_detach(self, server_id, port_id):

View File

@ -18,7 +18,6 @@ from zaqarclient.queues.v2 import client as zaqarclient
from zaqarclient.transport import errors as zaqar_errors
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.engine.clients import client_plugin
from heat.engine import constraints
@ -42,7 +41,7 @@ class ZaqarClientPlugin(client_plugin.ClientPlugin):
def create_for_tenant(self, tenant_id, token):
con = self.context
if token is None:
LOG.error(_LE("Zaqar connection failed, no auth_token!"))
LOG.error("Zaqar connection failed, no auth_token!")
return None
opts = {

View File

@ -25,7 +25,6 @@ import six
from heat.common import cache
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import resources
# decorator that allows to cache the value
@ -101,8 +100,8 @@ class Schema(collections.Mapping):
message=_('Invalid type (%s)') % self.type)
if required and default is not None:
LOG.warning(_LW("Option 'required=True' should not be used with "
"any 'default' value (%s)") % default)
LOG.warning("Option 'required=True' should not be used with "
"any 'default' value (%s)", default)
self.description = description
self.required = required

View File

@ -26,9 +26,6 @@ import six
from heat.common import environment_format as env_fmt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import policy
from heat.engine import support
@ -321,13 +318,13 @@ class ResourceRegistry(object):
for res_name, reg_info in list(registry.items()):
if (isinstance(reg_info, ResourceInfo) and
res_name.startswith(name[:-1])):
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
LOG.warning('Removing %(item)s from %(path)s', {
'item': res_name,
'path': descriptive_path})
del registry[res_name]
else:
# delete this entry.
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
LOG.warning('Removing %(item)s from %(path)s', {
'item': name,
'path': descriptive_path})
registry.pop(name, None)
@ -340,8 +337,7 @@ class ResourceRegistry(object):
'path': descriptive_path,
'was': str(registry[name].value),
'now': str(info.value)}
LOG.warning(_LW('Changing %(path)s from %(was)s to %(now)s'),
details)
LOG.warning('Changing %(path)s from %(was)s to %(now)s', details)
if isinstance(info, ClassResourceInfo):
if info.value.support_status.status != support.SUPPORTED:
@ -353,7 +349,7 @@ class ResourceRegistry(object):
'message': six.text_type(
info.value.support_status.message)
}
LOG.warning(_LW('%(name)s is %(status)s. %(message)s'),
LOG.warning('%(name)s is %(status)s. %(message)s',
details)
info.user_resource = (self.global_registry is not None)
@ -366,7 +362,7 @@ class ResourceRegistry(object):
if name == 'resources':
continue
if show_all or isinstance(registry[name], TemplateResourceInfo):
msg = (_LI('%(p)sRegistered: %(t)s') %
msg = ('%(p)sRegistered: %(t)s' %
{'p': prefix,
't': six.text_type(registry[name])})
LOG.info(msg)
@ -842,17 +838,17 @@ def read_global_environment(env, env_dir=None):
try:
env_files = glob.glob(os.path.join(env_dir, '*'))
except OSError:
LOG.exception(_LE('Failed to read %s'), env_dir)
LOG.exception('Failed to read %s', env_dir)
return
for file_path in env_files:
try:
with open(file_path) as env_fd:
LOG.info(_LI('Loading %s'), file_path)
LOG.info('Loading %s', file_path)
env_body = env_fmt.parse(env_fd.read())
env_fmt.default_for_missing(env_body)
env.load(env_body)
except ValueError:
LOG.exception(_LE('Failed to parse %s'), file_path)
LOG.exception('Failed to parse %s', file_path)
except IOError:
LOG.exception(_LE('Failed to read %s'), file_path)
LOG.exception('Failed to read %s', file_path)

View File

@ -19,7 +19,6 @@ from oslo_config import cfg
from oslo_log import log
import six
from heat.common.i18n import _LE
from heat.common import plugin_loader
LOG = log.getLogger(__name__)
@ -92,15 +91,15 @@ class PluginMapping(object):
try:
mapping_dict = mapping_func(*self.args, **self.kwargs)
except Exception:
LOG.error(_LE('Failed to load %(mapping_name)s '
'from %(module)s'), fmt_data)
LOG.error('Failed to load %(mapping_name)s '
'from %(module)s', fmt_data)
raise
else:
if isinstance(mapping_dict, collections.Mapping):
return mapping_dict
elif mapping_dict is not None:
LOG.error(_LE('Invalid type for %(mapping_name)s '
'from %(module)s'), fmt_data)
LOG.error('Invalid type for %(mapping_name)s '
'from %(module)s', fmt_data)
return {}

View File

@ -24,9 +24,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import short_id
from heat.common import timeutils
@ -188,7 +185,7 @@ class Resource(object):
try:
(svc_available, reason) = cls.is_service_available(context)
except Exception as exc:
LOG.exception(_LE("Resource type %s unavailable"),
LOG.exception("Resource type %s unavailable",
resource_type)
ex = exception.ResourceTypeUnavailable(
resource_type=resource_type,
@ -485,7 +482,7 @@ class Resource(object):
self._add_event(self.action, self.status,
_("%(a)s paused until Hook %(h)s is cleared")
% {'a': action, 'h': hook})
LOG.info(_LI('Reached hook on %s'), self)
LOG.info('Reached hook on %s', self)
while self.has_hook(hook):
try:
@ -584,8 +581,8 @@ class Resource(object):
# stacks (see bug 1543685). The error should be harmless
# because we're on the before properties, which have presumably
# already been validated.
LOG.warning(_LW('Ignoring error in old property value '
'%(prop_name)s: %(msg)s'),
LOG.warning('Ignoring error in old property value '
'%(prop_name)s: %(msg)s',
{'prop_name': key, 'msg': six.text_type(exc)})
return True
@ -768,7 +765,7 @@ class Resource(object):
self.state_set(action, self.COMPLETE, six.text_type(ex))
LOG.debug('%s', six.text_type(ex))
except Exception as ex:
LOG.info(_LI('%(action)s: %(info)s'),
LOG.info('%(action)s: %(info)s',
{"action": action,
"info": six.text_type(self)},
exc_info=True)
@ -784,7 +781,7 @@ class Resource(object):
msg += ' (%s)' % reason
self.state_set(action, self.FAILED, msg)
except Exception:
LOG.exception(_LE('Error marking resource as failed'))
LOG.exception('Error marking resource as failed')
else:
self.state_set(action, self.COMPLETE)
@ -837,7 +834,7 @@ class Resource(object):
canceller(handler_data)
except Exception:
LOG.exception(
_LE('Error cancelling resource %s'),
'Error cancelling resource %s',
action
)
@ -957,7 +954,7 @@ class Resource(object):
yield self._break_if_required(
self.CREATE, environment.HOOK_PRE_CREATE)
LOG.info(_LI('creating %s'), self)
LOG.info('creating %s', self)
# Re-resolve the template, since if the resource Ref's
# the StackId pseudo parameter, it will change after
@ -1284,9 +1281,9 @@ class Resource(object):
except exception.EntityNotFound:
raise UpdateReplace(self)
except Exception as ex:
LOG.warning(_LW("Resource cannot be updated with it's "
"live state in case of next "
"error: %s"), ex)
LOG.warning("Resource cannot be updated with it's "
"live state in case of next "
"error: %s", ex)
return after_props, before_props
def _prepare_update_replace(self, action):
@ -1367,7 +1364,7 @@ class Resource(object):
exc = Exception(_('Resource update already requested'))
raise exception.ResourceFailure(exc, self, action)
LOG.info(_LI('updating %s'), self)
LOG.info('updating %s', self)
self.updated_time = datetime.utcnow()
@ -1429,7 +1426,7 @@ class Resource(object):
original state with the added message that check was not performed.
"""
action = self.CHECK
LOG.info(_LI('Checking %s'), self)
LOG.info('Checking %s', self)
if hasattr(self, 'handle_%s' % action.lower()):
if self.state == (self.INIT, self.COMPLETE):
@ -1477,7 +1474,7 @@ class Resource(object):
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info(_LI('suspending %s'), self)
LOG.info('suspending %s', self)
return self._do_action(action)
def resume(self):
@ -1496,12 +1493,12 @@ class Resource(object):
exc = exception.Error(_('State %s invalid for resume')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info(_LI('resuming %s'), self)
LOG.info('resuming %s', self)
return self._do_action(action)
def snapshot(self):
"""Snapshot the resource and return the created data, if any."""
LOG.info(_LI('snapshotting %s'), self)
LOG.info('snapshotting %s', self)
return self._do_action(self.SNAPSHOT)
@scheduler.wrappertask
@ -1551,7 +1548,7 @@ class Resource(object):
This may be overridden by resource plugins to add extra
validation logic specific to the resource implementation.
"""
LOG.info(_LI('Validating %s'), self)
LOG.info('Validating %s', self)
return self.validate_template()
def validate_template(self):
@ -1608,8 +1605,8 @@ class Resource(object):
db_res = resource_objects.Resource.get_obj(
self.context, self.replaced_by)
except exception.NotFound:
LOG.info(_LI("Could not find replacement of resource %(name)s "
"with id %(id)s while updating needed_by."),
LOG.info("Could not find replacement of resource %(name)s "
"with id %(id)s while updating needed_by.",
{'name': self.name, 'id': self.replaced_by})
return
@ -1698,7 +1695,7 @@ class Resource(object):
yield self._break_if_required(
self.DELETE, environment.HOOK_PRE_DELETE)
LOG.info(_LI('deleting %s'), self)
LOG.info('deleting %s', self)
if self._stored_properties_data is not None:
# On delete we can't rely on re-resolving the properties
@ -1723,7 +1720,7 @@ class Resource(object):
while True:
count += 1
LOG.info(_LI('delete %(name)s attempt %(attempt)d') %
LOG.info('delete %(name)s attempt %(attempt)d' %
{'name': six.text_type(self), 'attempt': count+1})
if count:
delay = timeutils.retry_backoff_delay(count,
@ -1765,7 +1762,7 @@ class Resource(object):
self.id,
{'physical_resource_id': self.resource_id})
except Exception as ex:
LOG.warning(_LW('db error %s'), ex)
LOG.warning('db error %s', ex)
def store(self, set_metadata=False):
"""Create the resource in the database.
@ -1836,11 +1833,11 @@ class Resource(object):
atomic_key=rs.atomic_key,
expected_engine_id=None)
except Exception as ex:
LOG.error(_LE('DB error %s'), ex)
LOG.error('DB error %s', ex)
raise
if not updated_ok:
LOG.info(_LI('Resource %s is locked for update; deferring'),
LOG.info('Resource %s is locked for update; deferring',
six.text_type(self))
LOG.debug(('Resource id:%(resource_id)s with '
'atomic_key:%(atomic_key)s, locked '
@ -1872,7 +1869,7 @@ class Resource(object):
atomic_key=atomic_key)
if not updated_ok:
LOG.warning(_LW('Failed to unlock resource %s'), self.name)
LOG.warning('Failed to unlock resource %s', self.name)
def _resolve_all_attributes(self, attr):
"""Method for resolving all attributes.
@ -1920,7 +1917,7 @@ class Resource(object):
else:
return resource.to_dict()
except AttributeError as ex:
LOG.warning(_LW("Resolving 'show' attribute has failed : %s"),
LOG.warning("Resolving 'show' attribute has failed : %s",
ex)
return None
@ -2138,7 +2135,7 @@ class Resource(object):
# `handle_signal` callbacks:
hook = details['unset_hook']
self.clear_hook(hook)
LOG.info(_LI('Clearing %(hook)s hook on %(resource)s'),
LOG.info('Clearing %(hook)s hook on %(resource)s',
{'hook': hook, 'resource': six.text_type(self)})
self._add_event(self.action, self.status,
"Hook %s is cleared" % hook)
@ -2175,7 +2172,7 @@ class Resource(object):
# Don't log an event as it just spams the user.
pass
except Exception as ex:
LOG.info(_LI('signal %(name)s : %(msg)s'),
LOG.info('signal %(name)s : %(msg)s',
{'name': six.text_type(self),
'msg': six.text_type(ex)},
exc_info=True)
@ -2207,7 +2204,7 @@ class Resource(object):
def metadata_update(self, new_metadata=None):
"""No-op for resources which don't explicitly override this method."""
if new_metadata:
LOG.warning(_LW("Resource %s does not implement metadata update"),
LOG.warning("Resource %s does not implement metadata update",
self.name)
@classmethod

View File

@ -18,8 +18,6 @@ import six
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import function
@ -282,8 +280,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
min_adjustment_step=None):
"""Adjust the size of the scaling group if the cooldown permits."""
if self.status != self.COMPLETE:
LOG.info(_LI("%s NOT performing scaling adjustment, "
"when status is not COMPLETE") % self.name)
LOG.info("%s NOT performing scaling adjustment, "
"when status is not COMPLETE", self.name)
raise resource.NoActionRequired
capacity = grouputils.get_size(self)
@ -291,8 +289,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
adjustment_type,
min_adjustment_step)
if new_capacity == capacity:
LOG.info(_LI("%s NOT performing scaling adjustment, "
"as there is no change in capacity.") % self.name)
LOG.info("%s NOT performing scaling adjustment, "
"as there is no change in capacity.", self.name)
raise resource.NoActionRequired
self._check_scaling_allowed()
@ -322,7 +320,7 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
})
notification.send(**notif)
except Exception:
LOG.exception(_LE('Failed sending error notification'))
LOG.exception('Failed sending error notification')
else:
size_changed = True
notif.update({
@ -333,8 +331,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
})
notification.send(**notif)
except Exception:
LOG.error(_LE("Error in performing scaling adjustment for "
"group %s.") % self.name)
LOG.error("Error in performing scaling adjustment for "
"group %s.", self.name)
raise
finally:
self._finished_scaling("%s : %s" % (adjustment_type, adjustment),

View File

@ -17,9 +17,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -106,21 +103,20 @@ class ElasticIp(resource.Resource):
'floatingip': props})['floatingip']
self.ipaddress = ips['floating_ip_address']
self.resource_id_set(ips['id'])
LOG.info(_LI('ElasticIp create %s'), str(ips))
LOG.info('ElasticIp create %s', str(ips))
else:
try:
ips = self.client().floating_ips.create()
except Exception as e:
with excutils.save_and_reraise_exception():
if self.client_plugin('nova').is_not_found(e):
LOG.error(_LE("No default floating IP pool configured."
" Set 'default_floating_pool' in "
"nova.conf."))
LOG.error("No default floating IP pool configured. "
"Set 'default_floating_pool' in nova.conf.")
if ips:
self.ipaddress = ips.ip
self.resource_id_set(ips.id)
LOG.info(_LI('ElasticIp create %s'), str(ips))
LOG.info('ElasticIp create %s', str(ips))
instance_id = self.properties[self.INSTANCE_ID]
if instance_id:
@ -330,7 +326,7 @@ class ElasticIpAssociation(resource.Resource):
self._floatingIp_detach(nova_ignore_not_found=True)
port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
if not port_id or not port_rsrc:
LOG.error(_LE('Port not specified.'))
LOG.error('Port not specified.')
raise exception.NotFound(_('Failed to update, can not found '
'port info.'))
@ -353,7 +349,7 @@ class ElasticIpAssociation(resource.Resource):
port_id, port_rsrc = self._get_port_info(ni_id_update,
instance_id_update)
if not port_id or not port_rsrc:
LOG.error(_LE('Port not specified.'))
LOG.error('Port not specified.')
raise exception.NotFound(_('Failed to update, can not found '
'port info.'))
@ -377,8 +373,7 @@ class ElasticIpAssociation(resource.Resource):
instance_id = self.properties[self.INSTANCE_ID]
port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
if not port_id or not port_rsrc:
LOG.warning(_LW('Skipping association, resource not '
'specified'))
LOG.warning('Skipping association, resource not specified')
return
float_id = self.properties[self.ALLOCATION_ID]

View File

@ -21,8 +21,6 @@ cfg.CONF.import_opt('max_server_name_length', 'heat.common.config')
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
@ -396,7 +394,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
elif name in self.ATTRIBUTES[1:]:
res = self._ipaddress()
LOG.info(_LI('%(name)s._resolve_attribute(%(attname)s) == %(res)s'),
LOG.info('%(name)s._resolve_attribute(%(attname)s) == %(res)s',
{'name': self.name, 'attname': name, 'res': res})
return six.text_type(res) if res else None
@ -679,9 +677,9 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# keep the behavior as creation
elif (old_network_ifaces and
(self.NETWORK_INTERFACES not in prop_diff)):
LOG.warning(_LW('There is no change of "%(net_interfaces)s" '
'for instance %(server)s, do nothing '
'when updating.'),
LOG.warning('There is no change of "%(net_interfaces)s" '
'for instance %(server)s, do nothing '
'when updating.',
{'net_interfaces': self.NETWORK_INTERFACES,
'server': self.resource_id})
# if the interfaces not come from property 'NetworkInterfaces',
@ -806,10 +804,10 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
if network_interfaces and subnet_id:
# consider the old templates, we only to log to warn user
# NetworkInterfaces has higher priority than SubnetId
LOG.warning(_LW('"%(subnet)s" will be ignored if specified '
'"%(net_interfaces)s". So if you specified the '
'"%(net_interfaces)s" property, '
'do not specify "%(subnet)s" property.'),
LOG.warning('"%(subnet)s" will be ignored if specified '
'"%(net_interfaces)s". So if you specified the '
'"%(net_interfaces)s" property, '
'do not specify "%(subnet)s" property.',
{'subnet': self.SUBNET_ID,
'net_interfaces': self.NETWORK_INTERFACES})
@ -854,7 +852,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# if the instance has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug("suspending instance %s" % self.resource_id)
LOG.debug("suspending instance %s", self.resource_id)
server.suspend()
return server.id
@ -864,8 +862,8 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
% {'name': self.name, 'status': status})
LOG.debug('%(name)s check_suspend_complete status = %(status)s',
{'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
@ -897,7 +895,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# if the instance has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug("resuming instance %s" % self.resource_id)
LOG.debug("resuming instance %s", self.resource_id)
server.resume()
return server.id

View File

@ -16,7 +16,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -83,20 +82,19 @@ class User(stack_user.StackUser):
# and we don't want to break templates which previously worked
if not isinstance(policy, six.string_types):
LOG.debug("Ignoring policy %s, must be string "
"resource name" % policy)
"resource name", policy)
continue
try:
policy_rsrc = self.stack[policy]
except KeyError:
LOG.debug("Policy %(policy)s does not exist in stack "
"%(stack)s"
% {'policy': policy, 'stack': self.stack.name})
"%(stack)s",
{'policy': policy, 'stack': self.stack.name})
return False
if not callable(getattr(policy_rsrc, 'access_allowed', None)):
LOG.debug("Policy %s is not an AccessPolicy resource"
% policy)
LOG.debug("Policy %s is not an AccessPolicy resource", policy)
return False
return True
@ -122,7 +120,7 @@ class User(stack_user.StackUser):
for policy in policies:
if not isinstance(policy, six.string_types):
LOG.debug("Ignoring policy %s, must be string "
"resource name" % policy)
"resource name", policy)
continue
policy_rsrc = self.stack[policy]
if not policy_rsrc.access_allowed(resource_name):
@ -221,7 +219,7 @@ class AccessKey(resource.Resource):
user = self._get_user()
if user is None:
LOG.debug('Error deleting %s - user not found' % str(self))
LOG.debug('Error deleting %s - user not found', str(self))
return
user._delete_keypair()
@ -232,8 +230,8 @@ class AccessKey(resource.Resource):
"""
if self._secret is None:
if not self.resource_id:
LOG.info(_LI('could not get secret for %(username)s '
'Error:%(msg)s'),
LOG.info('could not get secret for %(username)s '
'Error:%(msg)s',
{'username': self.properties[self.USER_NAME],
'msg': "resource_id not yet set"})
else:
@ -252,10 +250,10 @@ class AccessKey(resource.Resource):
# And the ID of the v3 credential
self.data_set('credential_id', kp.id, redact=True)
except Exception as ex:
LOG.info(_LI('could not get secret for %(username)s '
'Error:%(msg)s'), {
'username': self.properties[self.USER_NAME],
'msg': ex})
LOG.info('could not get secret for %(username)s '
'Error:%(msg)s',
{'username': self.properties[self.USER_NAME],
'msg': ex})
return self._secret or '000-000-000'

View File

@ -18,7 +18,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common import template_format
from heat.engine import attributes
from heat.engine import constraints
@ -505,7 +504,7 @@ backend servers
nova_cp = self.client_plugin('nova')
for i in instances or []:
ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0'
LOG.debug('haproxy server:%s' % ip)
LOG.debug('haproxy server:%s', ip)
servers.append('%sserver server%d %s:%s%s' % (spaces, n,
ip, inst_port,
check))
@ -526,7 +525,7 @@ backend servers
def get_parsed_template(self):
if cfg.CONF.loadbalancer_template:
with open(cfg.CONF.loadbalancer_template) as templ_fd:
LOG.info(_LI('Using custom loadbalancer template %s'),
LOG.info('Using custom loadbalancer template %s',
cfg.CONF.loadbalancer_template)
contents = templ_fd.read()
else:

View File

@ -17,7 +17,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
@ -345,18 +344,18 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
def _check_extend_volume_complete(self):
vol = self.client().volumes.get(self.resource_id)
if vol.status == 'extending':
LOG.debug("Volume %s is being extended" % vol.id)
LOG.debug("Volume %s is being extended", vol.id)
return False
if vol.status != 'available':
LOG.info(_LI("Resize failed: Volume %(vol)s "
"is in %(status)s state."),
LOG.info("Resize failed: Volume %(vol)s "
"is in %(status)s state.",
{'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume resize failed'))
LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id})
LOG.info('Volume %(id)s resize complete', {'id': vol.id})
return True
def _backup_restore(self, vol_id, backup_id):
@ -376,17 +375,17 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
def _check_backup_restore_complete(self):
vol = self.client().volumes.get(self.resource_id)
if vol.status == 'restoring-backup':
LOG.debug("Volume %s is being restoring from backup" % vol.id)
LOG.debug("Volume %s is being restoring from backup", vol.id)
return False
if vol.status != 'available':
LOG.info(_LI("Restore failed: Volume %(vol)s is in %(status)s "
"state."), {'vol': vol.id, 'status': vol.status})
LOG.info("Restore failed: Volume %(vol)s is in %(status)s "
"state.", {'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume backup restore failed'))
LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id})
LOG.info('Volume %s backup restore complete', vol.id)
return True
def needs_replace_failed(self):

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
import six
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -81,7 +80,7 @@ class Restarter(signal_responder.SignalResponder):
else:
alarm_state = details.get('state', 'alarm').lower()
LOG.info(_LI('%(name)s Alarm, new state %(state)s'),
LOG.info('%(name)s Alarm, new state %(state)s',
{'name': self.name, 'state': alarm_state})
if alarm_state != 'alarm':
@ -90,13 +89,13 @@ class Restarter(signal_responder.SignalResponder):
target_id = self.properties[self.INSTANCE_ID]
victim = self.stack.resource_by_refid(target_id)
if victim is None:
LOG.info(_LI('%(name)s Alarm, can not find instance '
'%(instance)s'),
LOG.info('%(name)s Alarm, can not find instance '
'%(instance)s',
{'name': self.name,
'instance': target_id})
return
LOG.info(_LI('%(name)s Alarm, restarting resource: %(victim)s'),
LOG.info('%(name)s Alarm, restarting resource: %(victim)s',
{'name': self.name, 'victim': victim.name})
self.stack.restart_resource(victim.name)

View File

@ -16,8 +16,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -162,7 +160,7 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
alarm_state = details.get('current',
details.get('state', 'alarm')).lower()
LOG.info(_LI('Alarm %(name)s, new state %(state)s'),
LOG.info('Alarm %(name)s, new state %(state)s',
{'name': self.name, 'state': alarm_state})
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
@ -176,11 +174,11 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
self._check_scaling_allowed()
LOG.info(_LI('%(name)s alarm, adjusting group %(group)s with id '
'%(asgn_id)s by %(filter)s') % {
'name': self.name, 'group': group.name,
'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]})
LOG.info('%(name)s alarm, adjusting group %(group)s with id '
'%(asgn_id)s by %(filter)s',
{'name': self.name, 'group': group.name,
'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]})
size_changed = False
try:
@ -192,10 +190,9 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
except resource.NoActionRequired:
raise
except Exception:
LOG.error(_LE("Error in performing scaling adjustment with "
"%(name)s alarm for group %(group)s.") % {
'name': self.name,
'group': group.name})
LOG.error("Error in performing scaling adjustment with "
"%(name)s alarm for group %(group)s.",
{'name': self.name, 'group': group.name})
raise
finally:
self._finished_scaling("%s : %s" % (

View File

@ -19,7 +19,6 @@ from six.moves.urllib import parse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients.os import swift
from heat.engine import constraints
@ -245,12 +244,12 @@ class SwiftSignal(resource.Resource):
container = self.client().get_container(self.stack.id)
except Exception as exc:
self.client_plugin().ignore_not_found(exc)
LOG.debug("Swift container %s was not found" % self.stack.id)
LOG.debug("Swift container %s was not found", self.stack.id)
return []
index = container[1]
if not index:
LOG.debug("Swift objects in container %s were not found" %
LOG.debug("Swift objects in container %s were not found",
self.stack.id)
return []
@ -332,14 +331,14 @@ class SwiftSignal(resource.Resource):
for status in statuses:
if status == self.STATUS_FAILURE:
failure = SwiftSignalFailure(self)
LOG.info(_LI('%(name)s Failed (%(failure)s)'),
LOG.info('%(name)s Failed (%(failure)s)',
{'name': str(self), 'failure': str(failure)})
raise failure
elif status != self.STATUS_SUCCESS:
raise exception.Error(_("Unknown status: %s") % status)
if len(statuses) >= self.properties[self.COUNT]:
LOG.info(_LI("%s Succeeded"), str(self))
LOG.info("%s Succeeded", str(self))
return True
return False

View File

@ -16,7 +16,7 @@ import eventlet
from oslo_utils import timeutils
import six
from heat.common.i18n import _, _LI
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -161,7 +161,7 @@ class TestResource(resource.Resource):
secs = self.properties[self.ACTION_WAIT_SECS][self.action.lower()]
if secs is None:
secs = self.properties[self.WAIT_SECS]
LOG.info(_LI('%(name)s wait_secs:%(wait)s, action:%(action)s'),
LOG.info('%(name)s wait_secs:%(wait)s, action:%(action)s',
{'name': self.name,
'wait': secs,
'action': self.action.lower()})
@ -228,7 +228,7 @@ class TestResource(resource.Resource):
started_at = timeutils.normalize_time(started_at)
waited = timeutils.utcnow() - started_at
LOG.info(_LI("Resource %(name)s waited %(waited)s/%(sec)s seconds"),
LOG.info("Resource %(name)s waited %(waited)s/%(sec)s seconds",
{'name': self.name,
'waited': waited,
'sec': wait_secs})

View File

@ -17,7 +17,6 @@ from oslo_utils import timeutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -100,7 +99,7 @@ class HeatWaitCondition(resource.Resource):
def _wait(self, handle, started_at, timeout_in):
if timeutils.is_older_than(started_at, timeout_in):
exc = wc_base.WaitConditionTimeout(self, handle)
LOG.info(_LI('%(name)s Timed out (%(timeout)s)'),
LOG.info('%(name)s Timed out (%(timeout)s)',
{'name': str(self), 'timeout': str(exc)})
raise exc
@ -108,12 +107,12 @@ class HeatWaitCondition(resource.Resource):
if any(s != handle.STATUS_SUCCESS for s in handle_status):
failure = wc_base.WaitConditionFailure(self, handle)
LOG.info(_LI('%(name)s Failed (%(failure)s)'),
LOG.info('%(name)s Failed (%(failure)s)',
{'name': str(self), 'failure': str(failure)})
raise failure
if len(handle_status) >= self.properties[self.COUNT]:
LOG.info(_LI("%s Succeeded"), str(self))
LOG.info("%s Succeeded", str(self))
return True
return False

View File

@ -17,7 +17,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -234,7 +233,7 @@ class ManilaShare(resource.Resource):
if share_status == self.STATUS_CREATING:
return False
elif share_status == self.STATUS_AVAILABLE:
LOG.info(_LI('Applying access rules to created Share.'))
LOG.info('Applying access rules to created Share.')
# apply access rules to created share. please note that it is not
# possible to define rules for share with share_status = creating
access_rules = self.properties.get(self.ACCESS_RULES)

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -152,9 +151,9 @@ class CronTrigger(resource.Resource):
# changed after
# https://blueprints.launchpad.net/mistral/+spec/mistral-cron-trigger-life-cycle
# will be merged.
LOG.warning(_LW("get_live_state isn't implemented for this type of "
"resource due to specific behaviour of cron trigger "
"in mistral."))
LOG.warning("get_live_state isn't implemented for this type of "
"resource due to specific behaviour of cron trigger "
"in mistral.")
return {}

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import resource
LOG = logging.getLogger(__name__)
@ -113,8 +112,7 @@ class NeutronResource(resource.Resource):
key = self.res_info_key if self.res_info_key else self.entity
return res_info[key]
except AttributeError as ex:
LOG.warning(_LW("Resolving 'show' attribute has failed : %s"),
ex)
LOG.warning("Resolving 'show' attribute has failed : %s", ex)
def _resolve_attribute(self, name):
if self.resource_id is None:

View File

@ -16,7 +16,6 @@ from oslo_serialization import jsonutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -503,7 +502,7 @@ class Port(neutron.NeutronResource):
subnets.append(self.client().show_subnet(
subnet_id)['subnet'])
except Exception as ex:
LOG.warning(_LW("Failed to fetch resource attributes: %s"), ex)
LOG.warning("Failed to fetch resource attributes: %s", ex)
return
return subnets
return super(Port, self)._resolve_attribute(name)
@ -520,7 +519,7 @@ class Port(neutron.NeutronResource):
prop_diff['qos_policy_id'] = self.client_plugin(
).get_qos_policy_id(qos_policy) if qos_policy else None
self._prepare_port_properties(prop_diff, prepare_for_update=True)
LOG.debug('updating port with %s' % prop_diff)
LOG.debug('updating port with %s', prop_diff)
self.client().update_port(self.resource_id, {'port': prop_diff})
def check_update_complete(self, *args):

View File

@ -14,7 +14,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -170,8 +169,8 @@ class NovaFlavor(resource.Resource):
if not self.IS_PUBLIC:
if not tenants:
LOG.info(_LI('Tenant property is recommended if IS_PUBLIC'
'is false.'))
LOG.info('Tenant property is recommended if IS_PUBLIC '
'is false.')
tenant = self.stack.context.tenant_id
self.client().flavor_access.add_tenant_access(flavor, tenant)
else:

View File

@ -16,7 +16,6 @@ from oslo_utils import excutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -90,9 +89,9 @@ class NovaFloatingIp(resource.Resource):
with excutils.save_and_reraise_exception():
if self.client_plugin().is_not_found(e):
if pool is None:
LOG.error(_LE('Could not allocate floating IP. '
'Probably there is no default floating'
' IP pool is configured.'))
LOG.error('Could not allocate floating IP. '
'Probably there is no default floating '
'IP pool is configured.')
self.resource_id_set(floating_ip.id)
self._floating_ip = floating_ip

View File

@ -1509,7 +1509,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s' % self.resource_id)
LOG.debug('suspending server %s', self.resource_id)
server.suspend()
return server.id
@ -1519,8 +1519,8 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
% {'name': self.name, 'status': status})
LOG.debug('%(name)s check_suspend_complete status = %(status)s',
{'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
@ -1552,7 +1552,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
# if the server has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s' % self.resource_id)
LOG.debug('resuming server %s', self.resource_id)
server.resume()
return server.id

View File

@ -21,7 +21,6 @@ import tenacity
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import resource
from heat.engine.resources.openstack.neutron import port as neutron_port
@ -434,10 +433,10 @@ class ServerNetworkMixin(object):
try:
if self.client_plugin().check_interface_detach(
existing_server_id, port['id']):
LOG.info(_LI('Detach interface %(port)s successful from '
'server %(server)s.')
% {'port': port['id'],
'server': existing_server_id})
LOG.info('Detach interface %(port)s successful from '
'server %(server)s.',
{'port': port['id'],
'server': existing_server_id})
except tenacity.RetryError:
raise exception.InterfaceDetachFailed(
port=port['id'], server=existing_server_id)
@ -451,10 +450,10 @@ class ServerNetworkMixin(object):
try:
if self.client_plugin().check_interface_attach(
prev_server_id, port['id']):
LOG.info(_LI('Attach interface %(port)s successful to '
'server %(server)s')
% {'port': port['id'],
'server': prev_server_id})
LOG.info('Attach interface %(port)s successful to '
'server %(server)s',
{'port': port['id'],
'server': prev_server_id})
except tenacity.RetryError:
raise exception.InterfaceAttachFailed(
port=port['id'], server=prev_server_id)

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -249,7 +248,7 @@ class SaharaCluster(resource.Resource):
net_id=net_id,
use_autoconfig=use_autoconfig,
shares=shares)
LOG.info(_LI('Cluster "%s" is being started.'), cluster.name)
LOG.info('Cluster "%s" is being started.', cluster.name)
self.resource_id_set(cluster.id)
return self.resource_id
@ -261,7 +260,7 @@ class SaharaCluster(resource.Resource):
if cluster.status != self.CLUSTER_ACTIVE:
return False
LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
LOG.info("Cluster '%s' has been created", cluster.name)
return True
def check_delete_complete(self, resource_id):
@ -272,7 +271,7 @@ class SaharaCluster(resource.Resource):
cluster = self.client().clusters.get(resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
LOG.info(_LI("Cluster '%s' has been deleted"),
LOG.info("Cluster '%s' has been deleted",
self._cluster_name())
return True
else:

View File

@ -21,7 +21,6 @@ from oslo_utils import encodeutils
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
@ -286,7 +285,7 @@ class SaharaNodeGroupTemplate(resource.Resource):
props = dict((k, v) for k, v in six.iteritems(self.properties))
args = self._prepare_properties(props)
node_group_template = self.client().node_group_templates.create(**args)
LOG.info(_LI("Node Group Template '%s' has been created"),
LOG.info("Node Group Template '%s' has been created",
node_group_template.name)
self.resource_id_set(node_group_template.id)
return self.resource_id
@ -573,7 +572,7 @@ class SaharaClusterTemplate(resource.Resource):
props = dict((k, v) for k, v in six.iteritems(self.properties))
args = self._prepare_properties(props)
cluster_template = self.client().cluster_templates.create(**args)
LOG.info(_LI("Cluster Template '%s' has been created"),
LOG.info("Cluster Template '%s' has been created",
cluster_template.name)
self.resource_id_set(cluster_template.id)
return self.resource_id

View File

@ -14,7 +14,6 @@
from oslo_log import log as logging
from heat.common.i18n import _LW
from heat.engine import resource
from heat.engine import support
@ -35,7 +34,7 @@ class BaseSenlinResource(resource.Resource):
res_info = client_method(self.resource_id)
return res_info.to_dict()
except AttributeError as ex:
LOG.warning(_LW("No method to get the resource: %s"), ex)
LOG.warning("No method to get the resource: %s", ex)
def _resolve_attribute(self, name):
if self.resource_id is None:

View File

@ -17,7 +17,6 @@ from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import properties
from heat.engine import resource
@ -154,10 +153,10 @@ class SwiftContainer(resource.Resource):
LOG.debug('SwiftContainer create container %(container)s with '
'container headers %(container_headers)s and '
'account headers %(account_headers)s'
% {'container': container,
'account_headers': account_headers,
'container_headers': container_headers})
'account headers %(account_headers)s',
{'container': container,
'account_headers': account_headers,
'container_headers': container_headers})
self.client().put_container(container, container_headers)
@ -242,7 +241,7 @@ class SwiftContainer(resource.Resource):
headers = self.client().head_container(self.resource_id)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
LOG.warning(_LW("Head container failed: %s"), ex)
LOG.warning("Head container failed: %s", ex)
return None
raise
else:

View File

@ -15,8 +15,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -176,9 +174,9 @@ class TroveCluster(resource.Resource):
return cluster
except Exception as exc:
if self.client_plugin().is_over_limit(exc):
LOG.warning(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during clusters.get():"
" %(exception)s"),
LOG.warning("Stack %(name)s (%(id)s) received an "
"OverLimit response during clusters.get():"
" %(exception)s",
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
@ -202,7 +200,7 @@ class TroveCluster(resource.Resource):
if instance['status'] != self.ACTIVE:
return False
LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
LOG.info("Cluster '%s' has been created", cluster.name)
return True
def cluster_delete(self, cluster_id):

View File

@ -16,8 +16,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -382,9 +380,9 @@ class Instance(resource.Resource):
return instance
except Exception as exc:
if self.client_plugin().is_over_limit(exc):
LOG.warning(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():"
" %(exception)s"),
LOG.warning("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():"
" %(exception)s",
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
@ -405,10 +403,10 @@ class Instance(resource.Resource):
if instance.status != self.ACTIVE:
return False
LOG.info(_LI("Database instance %(database)s created "
"(flavor:%(flavor)s, volume:%(volume)s, "
"datastore:%(datastore_type)s, "
"datastore_version:%(datastore_version)s)"),
LOG.info("Database instance %(database)s created "
"(flavor:%(flavor)s, volume:%(volume)s, "
"datastore:%(datastore_type)s, "
"datastore_version:%(datastore_version)s)",
{'database': self._dbinstance_name(),
'flavor': self.flavor,
'volume': self.volume,
@ -493,12 +491,12 @@ class Instance(resource.Resource):
# the instance could have updated between the time
# we retrieve it and try to update it so check again
if self.client_plugin().is_over_limit(exc):
LOG.debug("API rate limit: %(ex)s. Retrying." %
LOG.debug("API rate limit: %(ex)s. Retrying.",
{'ex': six.text_type(exc)})
return False
if "No change was requested" in six.text_type(exc):
LOG.warning(_LW("Unexpected instance state change "
"during update. Retrying."))
LOG.warning("Unexpected instance state change "
"during update. Retrying.")
return False
raise
return True

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from heat.common import exception
from heat.common.i18n import _LE
from heat.engine.clients import progress
from heat.engine.resources import stack_user
@ -244,7 +243,7 @@ class BaseServer(stack_user.StackUser):
# transport will continue to work, and the new transport may work
# despite exceptions in the above block.
LOG.exception(
_LE('Error while updating software config transport')
'Error while updating software config transport'
)
def metadata_update(self, new_metadata=None):

View File

@ -21,7 +21,6 @@ from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine.clients.os import swift
from heat.engine.resources import stack_user
@ -139,8 +138,8 @@ class SignalResponder(stack_user.StackUser):
secret_key = self.data().get('secret_key')
if not access_key or not secret_key:
LOG.warning(_LW('Cannot generate signed url, '
'unable to create keypair'))
LOG.warning('Cannot generate signed url, '
'unable to create keypair')
return
config_url = cfg.CONF.heat_waitcondition_server_url
@ -342,12 +341,12 @@ class SignalResponder(stack_user.StackUser):
container = swift_client.get_container(self.stack.id)
except Exception as exc:
self.client_plugin('swift').ignore_not_found(exc)
LOG.debug("Swift container %s was not found" % self.stack.id)
LOG.debug("Swift container %s was not found", self.stack.id)
return
index = container[1]
if not index: # Swift objects were deleted by user
LOG.debug("Swift objects in container %s were not found" %
LOG.debug("Swift objects in container %s were not found",
self.stack.id)
return

View File

@ -23,8 +23,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import template_format
from heat.engine import attributes
@ -169,7 +167,7 @@ class StackResource(resource.Resource):
params = self.child_params()
except NotImplementedError:
class_name = reflection.get_class_name(self, fully_qualified=False)
LOG.warning(_LW("Preview of '%s' not yet implemented"), class_name)
LOG.warning("Preview of '%s' not yet implemented", class_name)
return self
name = "%s-%s" % (self.stack.name, self.name)
@ -446,7 +444,7 @@ class StackResource(resource.Resource):
if self.stack.action == self.stack.ROLLBACK:
if self._try_rollback():
LOG.info(_LI('Triggered nested stack %s rollback'),
LOG.info('Triggered nested stack %s rollback',
self.physical_resource_name())
return {'target_action': self.stack.ROLLBACK}

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import resource
LOG = logging.getLogger(__name__)
@ -93,7 +92,7 @@ class StackUser(resource.Resource):
# compatibility with resources created before the migration
# to stack_user.StackUser domain users. After an appropriate
# transitional period, this should be removed.
LOG.warning(_LW('Reverting to legacy user delete path'))
LOG.warning('Reverting to legacy user delete path')
try:
self.keystone().delete_stack_user(user_id)
except kc_exception.NotFound:

View File

@ -18,7 +18,6 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine.resources import signal_responder
LOG = logging.getLogger(__name__)
@ -67,7 +66,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
latest_rsrc_metadata)
if not self._metadata_format_ok(signal_data):
LOG.info(_LI("Metadata failed validation for %s"), self.name)
LOG.info("Metadata failed validation for %s", self.name)
raise ValueError(_("Metadata format invalid"))
new_entry = signal_data.copy()
@ -75,7 +74,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
new_rsrc_metadata = latest_rsrc_metadata.copy()
if unique_id in new_rsrc_metadata:
LOG.info(_LI("Overwriting Metadata item for id %s!"),
LOG.info("Overwriting Metadata item for id %s!",
unique_id)
new_rsrc_metadata.update({unique_id: new_entry})

View File

@ -22,7 +22,6 @@ from oslo_utils import excutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import repr_wrapper
from heat.common import timeutils
@ -153,7 +152,7 @@ class TaskRunner(object):
def _sleep(self, wait_time):
"""Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None:
LOG.debug('%s sleeping' % six.text_type(self))
LOG.debug('%s sleeping', six.text_type(self))
eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None, progress_callback=None):
@ -180,7 +179,7 @@ class TaskRunner(object):
assert self._runner is None, "Task already started"
assert not self._done, "Task already cancelled"
LOG.debug('%s starting' % six.text_type(self))
LOG.debug('%s starting', six.text_type(self))
if timeout is not None:
self._timeout = Timeout(self, timeout)
@ -192,7 +191,7 @@ class TaskRunner(object):
else:
self._runner = False
self._done = True
LOG.debug('%s done (not resumable)' % six.text_type(self))
LOG.debug('%s done (not resumable)', six.text_type(self))
def step(self):
"""Run another step of the task.
@ -207,18 +206,18 @@ class TaskRunner(object):
return False
if self._timeout is not None and self._timeout.expired():
LOG.info(_LI('%s timed out'), self)
LOG.info('%s timed out', self)
self._done = True
self._timeout.trigger(self._runner)
else:
LOG.debug('%s running' % six.text_type(self))
LOG.debug('%s running', six.text_type(self))
try:
poll_period = next(self._runner)
except StopIteration:
self._done = True
LOG.debug('%s complete' % six.text_type(self))
LOG.debug('%s complete', six.text_type(self))
else:
if isinstance(poll_period, six.integer_types):
self._poll_period = max(poll_period, 1)
@ -276,7 +275,7 @@ class TaskRunner(object):
return
if not self.started() or grace_period is None:
LOG.debug('%s cancelled' % six.text_type(self))
LOG.debug('%s cancelled', six.text_type(self))
self._done = True
if self.started():
self._runner.close()
@ -477,13 +476,13 @@ class DependencyTaskGroup(object):
try:
r.cancel(grace_period=gp)
except Exception as ex:
LOG.debug('Exception cancelling task: %s' % six.text_type(ex))
LOG.debug('Exception cancelling task: %s', six.text_type(ex))
def _cancel_recursively(self, key, runner):
try:
runner.cancel()
except Exception as ex:
LOG.debug('Exception cancelling task: %s' % six.text_type(ex))
LOG.debug('Exception cancelling task: %s', six.text_type(ex))
node = self._graph[key]
for dependent_node in node.required_by():
node_runner = self._runners[dependent_node]

View File

@ -37,9 +37,6 @@ from heat.common import environment_format as env_fmt
from heat.common import environment_util as env_util
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import messaging as rpc_messaging
from heat.common import policy
@ -133,7 +130,7 @@ class ThreadGroupManager(object):
try:
gt.wait()
except Exception:
LOG.exception(_LE('Unhandled error in asynchronous task'))
LOG.exception('Unhandled error in asynchronous task')
except BaseException:
pass
@ -278,9 +275,9 @@ class EngineListener(object):
try:
self._server.stop()
self._server.wait()
LOG.info(_LI("Engine listener is stopped successfully"))
LOG.info("Engine listener is stopped successfully")
except Exception as e:
LOG.error(_LE("Failed to stop engine listener, %s"), e)
LOG.error("Failed to stop engine listener, %s", e)
def listening(self, ctxt):
"""Respond to a watchdog request.
@ -336,14 +333,14 @@ class EngineService(service.ServiceBase):
self.resource_enforcer = policy.ResourceEnforcer()
if cfg.CONF.trusts_delegated_roles:
LOG.warning(_LW('The default value of "trusts_delegated_roles" '
'option in heat.conf is changed to [] in Kilo '
'and heat will delegate all roles of trustor. '
'Please keep the same if you do not want to '
'delegate subset roles when upgrading.'))
LOG.warning('The default value of "trusts_delegated_roles" '
'option in heat.conf is changed to [] in Kilo '
'and heat will delegate all roles of trustor. '
'Please keep the same if you do not want to '
'delegate subset roles when upgrading.')
def create_periodic_tasks(self):
LOG.debug("Starting periodic watch tasks pid=%s" % os.getpid())
LOG.debug("Starting periodic watch tasks pid=%s", os.getpid())
# Note with multiple workers, the parent process hasn't called start()
# so we need to create a ThreadGroupManager here for the periodic tasks
if self.thread_group_mgr is None:
@ -361,10 +358,10 @@ class EngineService(service.ServiceBase):
show_hidden=True)
for s in stacks:
self.stack_watch.start_watch_task(s.id, admin_context)
LOG.info(_LI("Watch tasks created"))
LOG.info("Watch tasks created")
return
except Exception as e:
LOG.error(_LE("Watch task creation attempt failed, %s"), e)
LOG.error("Watch task creation attempt failed, %s", e)
eventlet.sleep(5)
if self.manage_thread_grp is None:
@ -377,7 +374,7 @@ class EngineService(service.ServiceBase):
self.thread_group_mgr = ThreadGroupManager()
self.listener = EngineListener(self.host, self.engine_id,
self.thread_group_mgr)
LOG.debug("Starting listener for engine %s" % self.engine_id)
LOG.debug("Starting listener for engine %s", self.engine_id)
self.listener.start()
if cfg.CONF.convergence_engine:
@ -427,9 +424,9 @@ class EngineService(service.ServiceBase):
try:
self._rpc_server.stop()
self._rpc_server.wait()
LOG.info(_LI("Engine service is stopped successfully"))
LOG.info("Engine service is stopped successfully")
except Exception as e:
LOG.error(_LE("Failed to stop engine service, %s"), e)
LOG.error("Failed to stop engine service, %s", e)
def stop(self):
self._stop_rpc_server()
@ -446,19 +443,19 @@ class EngineService(service.ServiceBase):
# Ignore dummy service task
if stack_id == cfg.CONF.periodic_interval:
continue
LOG.info(_LI("Waiting stack %s processing to be finished"),
LOG.info("Waiting stack %s processing to be finished",
stack_id)
# Stop threads gracefully
self.thread_group_mgr.stop(stack_id, True)
LOG.info(_LI("Stack %s processing was finished"), stack_id)
LOG.info("Stack %s processing was finished", stack_id)
if self.manage_thread_grp:
self.manage_thread_grp.stop()
ctxt = context.get_admin_context()
service_objects.Service.delete(ctxt, self.service_id)
LOG.info(_LI('Service %s is deleted'), self.service_id)
LOG.info('Service %s is deleted', self.service_id)
# Terminate the engine process
LOG.info(_LI("All threads were gone, terminating engine"))
LOG.info("All threads were gone, terminating engine")
def wait(self):
pass
@ -739,7 +736,7 @@ class EngineService(service.ServiceBase):
:type environment_files: list or None
"""
LOG.info(_LI('previewing stack %s'), stack_name)
LOG.info('previewing stack %s', stack_name)
conv_eng = cfg.CONF.convergence_engine
stack = self._parse_template_and_validate_stack(cnxt,
@ -783,7 +780,7 @@ class EngineService(service.ServiceBase):
:param parent_resource_name: the parent resource name
:param template_id: the ID of a pre-stored template in the DB
"""
LOG.info(_LI('Creating stack %s'), stack_name)
LOG.info('Creating stack %s', stack_name)
def _create_stack_user(stack):
if not stack.stack_user_project_id:
@ -806,7 +803,7 @@ class EngineService(service.ServiceBase):
# Schedule a periodic watcher task for this stack
self.stack_watch.start_watch_task(stack.id, cnxt)
else:
LOG.info(_LI("Stack create failed, status %s"), stack.status)
LOG.info("Stack create failed, status %s", stack.status)
convergence = cfg.CONF.convergence_engine
@ -883,9 +880,9 @@ class EngineService(service.ServiceBase):
else:
# Nothing we can do, the failed update happened before
# we started storing prev_raw_template_id
LOG.error(_LE('PATCH update to FAILED stack only '
'possible if convergence enabled or '
'previous template stored'))
LOG.error('PATCH update to FAILED stack only '
'possible if convergence enabled or '
'previous template stored')
msg = _('PATCH update to non-COMPLETE stack')
raise exception.NotSupported(feature=msg)
@ -970,7 +967,7 @@ class EngineService(service.ServiceBase):
"""
# Get the database representation of the existing stack
db_stack = self._get_stack(cnxt, stack_identity)
LOG.info(_LI('Updating stack %s'), db_stack.name)
LOG.info('Updating stack %s', db_stack.name)
if cfg.CONF.reauthentication_auth_method == 'trusts':
current_stack = parser.Stack.load(
cnxt, stack=db_stack, use_stored_context=True)
@ -1025,7 +1022,7 @@ class EngineService(service.ServiceBase):
"""
# Get the database representation of the existing stack
db_stack = self._get_stack(cnxt, stack_identity)
LOG.info(_LI('Previewing update of stack %s'), db_stack.name)
LOG.info('Previewing update of stack %s', db_stack.name)
current_stack = parser.Stack.load(cnxt, stack=db_stack)
@ -1137,7 +1134,7 @@ class EngineService(service.ServiceBase):
state = '_'.join(current_stack.state)
msg = _("Cancelling update when stack is %s") % str(state)
raise exception.NotSupported(feature=msg)
LOG.info(_LI('Starting cancel of updating stack %s'), db_stack.name)
LOG.info('Starting cancel of updating stack %s', db_stack.name)
if current_stack.convergence:
if cancel_with_rollback:
@ -1206,7 +1203,7 @@ class EngineService(service.ServiceBase):
:param ignorable_errors: List of error_code to be ignored as part of
validation
"""
LOG.info(_LI('validate_template'))
LOG.info('validate_template')
if template is None:
msg = _("No Template provided.")
return webob.exc.HTTPBadRequest(explanation=msg)
@ -1383,7 +1380,7 @@ class EngineService(service.ServiceBase):
st.action == parser.Stack.DELETE):
raise exception.EntityNotFound(entity='Stack', name=st.name)
LOG.info(_LI('Deleting stack %s'), st.name)
LOG.info('Deleting stack %s', st.name)
stack = parser.Stack.load(cnxt, stack=st)
self.resource_enforcer.enforce_stack(stack)
@ -1440,7 +1437,7 @@ class EngineService(service.ServiceBase):
watch.start()
while not watch.expired():
LOG.debug('Waiting for stack cancel to complete: %s' %
LOG.debug('Waiting for stack cancel to complete: %s',
stack.name)
with lock.try_thread_lock() as acquire_result:
@ -1463,7 +1460,7 @@ class EngineService(service.ServiceBase):
stack_identity=stack_identity)
if stop_result is None:
LOG.debug("Successfully stopped remote task "
"on engine %s" % acquire_result)
"on engine %s", acquire_result)
else:
raise exception.StopActionFailed(
stack_name=stack.name, engine_id=acquire_result)
@ -1507,13 +1504,13 @@ class EngineService(service.ServiceBase):
# Get stack details before deleting it.
stack_info = stack.prepare_abandon()
if abandon:
LOG.info(_LI('abandoning stack %s'), st.name)
LOG.info('abandoning stack %s', st.name)
self.thread_group_mgr.start_with_acquired_lock(stack,
lock,
stack.delete,
abandon=True)
else:
LOG.info(_LI('exporting stack %s'), st.name)
LOG.info('exporting stack %s', st.name)
return stack_info
def list_resource_types(self,
@ -1615,8 +1612,8 @@ class EngineService(service.ServiceBase):
try:
resource_class = resources.global_env().get_class(type_name)
except exception.NotFound:
LOG.exception(_LE('Error loading resource type %s '
'from global environment.'),
LOG.exception('Error loading resource type %s '
'from global environment.',
type_name)
raise exception.InvalidGlobalResource(type_name=type_name)
@ -1676,8 +1673,8 @@ class EngineService(service.ServiceBase):
try:
resource_class = resources.global_env().get_class(type_name)
except exception.NotFound:
LOG.exception(_LE('Error loading resource type %s '
'from global environment.'),
LOG.exception('Error loading resource type %s '
'from global environment.',
type_name)
raise exception.InvalidGlobalResource(type_name=type_name)
else:
@ -1809,7 +1806,7 @@ class EngineService(service.ServiceBase):
if cfg.CONF.heat_stack_user_role in cnxt.roles:
if not self._authorize_stack_user(cnxt, stack, resource_name):
LOG.warning(_LW("Access denied to resource %s"), resource_name)
LOG.warning("Access denied to resource %s", resource_name)
raise exception.Forbidden()
resource = stack.resource_get(resource_name)
@ -2012,7 +2009,7 @@ class EngineService(service.ServiceBase):
def stack_suspend(self, cnxt, stack_identity):
"""Handle request to perform suspend action on a stack."""
def _stack_suspend(stack):
LOG.debug("suspending stack %s" % stack.name)
LOG.debug("suspending stack %s", stack.name)
stack.suspend()
s = self._get_stack(cnxt, stack_identity)
@ -2026,7 +2023,7 @@ class EngineService(service.ServiceBase):
def stack_resume(self, cnxt, stack_identity):
"""Handle request to perform a resume action on a stack."""
def _stack_resume(stack):
LOG.debug("resuming stack %s" % stack.name)
LOG.debug("resuming stack %s", stack.name)
stack.resume()
s = self._get_stack(cnxt, stack_identity)
@ -2049,17 +2046,17 @@ class EngineService(service.ServiceBase):
{'data': data, 'status': status,
'status_reason': reason})
LOG.debug("Snapshotting stack %s" % stack.name)
LOG.debug("Snapshotting stack %s", stack.name)
stack.snapshot(save_snapshot_func=save_snapshot)
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
if stack.status == stack.IN_PROGRESS:
LOG.info(_LI('%(stack)s is in state %(action)s_IN_PROGRESS, '
'snapshot is not permitted.'), {
'stack': six.text_type(stack),
'action': stack.action})
LOG.info('%(stack)s is in state %(action)s_IN_PROGRESS, '
'snapshot is not permitted.', {
'stack': six.text_type(stack),
'action': stack.action})
raise exception.ActionInProgress(stack_name=stack.name,
action=stack.action)
@ -2104,7 +2101,7 @@ class EngineService(service.ServiceBase):
"""Handle request to perform a check action on a stack."""
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
LOG.info(_LI("Checking stack %s"), stack.name)
LOG.info("Checking stack %s", stack.name)
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
stack.check)
@ -2112,7 +2109,7 @@ class EngineService(service.ServiceBase):
@context.request_context
def stack_restore(self, cnxt, stack_identity, snapshot_id):
def _stack_restore(stack, snapshot):
LOG.debug("restoring stack %s" % stack.name)
LOG.debug("restoring stack %s", stack.name)
stack.restore(snapshot)
s = self._get_stack(cnxt, stack_identity)
@ -2174,7 +2171,7 @@ class EngineService(service.ServiceBase):
try:
wrn = [w.name for w in watch_rule.WatchRule.get_all(cnxt)]
except Exception as ex:
LOG.warning(_LW('show_watch (all) db error %s'), ex)
LOG.warning('show_watch (all) db error %s', ex)
return
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
@ -2196,7 +2193,7 @@ class EngineService(service.ServiceBase):
# namespace/metric, but we will want this at some point
# for now, the API can query all metric data and filter locally
if metric_namespace is not None or metric_name is not None:
LOG.error(_LE("Filtering by namespace/metric not yet supported"))
LOG.error("Filtering by namespace/metric not yet supported")
return
try:
@ -2205,7 +2202,7 @@ class EngineService(service.ServiceBase):
r.id: r.name for r in watch_rule.WatchRule.get_all(cnxt)
}
except Exception as ex:
LOG.warning(_LW('show_metric (all) db error %s'), ex)
LOG.warning('show_metric (all) db error %s', ex)
return
result = [api.format_watch_data(w, rule_names) for w in wds]
@ -2339,7 +2336,7 @@ class EngineService(service.ServiceBase):
stack_id=stack_id,
show_deleted=False)
if parent_stack.convergence:
LOG.info(_LI("Convergence was already enabled for stack %s"),
LOG.info("Convergence was already enabled for stack %s",
stack_id)
return
db_stacks = stack_object.Stack.get_all_by_root_owner_id(
@ -2382,17 +2379,17 @@ class EngineService(service.ServiceBase):
report_interval=cfg.CONF.periodic_interval)
)
self.service_id = service_ref['id']
LOG.debug('Service %s is started' % self.service_id)
LOG.debug('Service %s is started', self.service_id)
try:
service_objects.Service.update_by_id(
cnxt,
self.service_id,
dict(deleted_at=None))
LOG.debug('Service %s is updated' % self.service_id)
LOG.debug('Service %s is updated', self.service_id)
except Exception as ex:
LOG.error(_LE('Service %(service_id)s update '
'failed: %(error)s'),
LOG.error('Service %(service_id)s update '
'failed: %(error)s',
{'service_id': self.service_id, 'error': ex})
def service_manage_cleanup(self):
@ -2410,7 +2407,7 @@ class EngineService(service.ServiceBase):
continue
if service_ref['updated_at'] < time_line:
# hasn't been updated, assuming it's died.
LOG.debug('Service %s was aborted' % service_ref['id'])
LOG.debug('Service %s was aborted', service_ref['id'])
service_objects.Service.delete(cnxt, service_ref['id'])
def reset_stack_status(self):
@ -2440,8 +2437,8 @@ class EngineService(service.ServiceBase):
stk = parser.Stack.load(cnxt, stack=s,
service_check_defer=True,
resource_validate=False)
LOG.info(_LI('Engine %(engine)s went down when stack '
'%(stack_id)s was in action %(action)s'),
LOG.info('Engine %(engine)s went down when stack '
'%(stack_id)s was in action %(action)s',
{'engine': engine_id, 'action': stk.action,
'stack_id': stk.id})
@ -2457,6 +2454,5 @@ class EngineService(service.ServiceBase):
except exception.ActionInProgress:
continue
except Exception:
LOG.exception(_LE('Error while resetting stack: %s')
% stack_id)
LOG.exception('Error while resetting stack: %s', stack_id)
continue

View File

@ -23,7 +23,6 @@ from six.moves.urllib import parse as urlparse
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.db.sqlalchemy import api as db_api
from heat.engine import api
from heat.engine import scheduler
@ -138,7 +137,7 @@ class SoftwareConfigService(object):
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
LOG.info('Signal object not found: %(c)s %(o)s', {
'c': container, 'o': object_name})
return sd
raise
@ -160,8 +159,8 @@ class SoftwareConfigService(object):
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI(
'Signal object not found: %(c)s %(o)s'), {
LOG.info(
'Signal object not found: %(c)s %(o)s', {
'c': container, 'o': object_name})
return sd
raise

View File

@ -16,8 +16,6 @@ from oslo_utils import timeutils
import six
from heat.common import context
from heat.common.i18n import _LE
from heat.common.i18n import _LW
from heat.engine import stack
from heat.engine import watchrule
from heat.objects import stack as stack_object
@ -64,13 +62,12 @@ class StackWatch(object):
def check_stack_watches(self, sid):
# Use admin_context for stack_get to defeat tenant
# scoping otherwise we fail to retrieve the stack
LOG.debug("Periodic watcher task for stack %s" % sid)
LOG.debug("Periodic watcher task for stack %s", sid)
admin_context = context.get_admin_context()
db_stack = stack_object.Stack.get_by_id(admin_context,
sid)
if not db_stack:
LOG.error(_LE("Unable to retrieve stack %s for periodic task"),
sid)
LOG.error("Unable to retrieve stack %s for periodic task", sid)
return
stk = stack.Stack.load(admin_context, stack=db_stack,
use_stored_context=True)
@ -85,8 +82,7 @@ class StackWatch(object):
wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context,
sid)
except Exception as ex:
LOG.warning(_LW('periodic_task db error watch rule'
' removed? %(ex)s'),
LOG.warning('periodic_task db error watch rule removed? %(ex)s',
ex)
return

View File

@ -33,9 +33,6 @@ from heat.common import context as common_context
from heat.common import environment_format as env_fmt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import lifecycle_plugin_utils
from heat.common import timeutils
@ -81,13 +78,13 @@ def reset_state_on_error(func):
except Exception as exc:
with excutils.save_and_reraise_exception():
errmsg = six.text_type(exc)
LOG.error(_LE('Unexpected exception in %(func)s: %(msg)s'),
LOG.error('Unexpected exception in %(func)s: %(msg)s',
{'func': func.__name__, 'msg': errmsg})
except BaseException as exc:
with excutils.save_and_reraise_exception():
exc_type = type(exc).__name__
errmsg = '%s(%s)' % (exc_type, six.text_type(exc))
LOG.info(_LI('Stopped due to %(msg)s in %(func)s'),
LOG.info('Stopped due to %(msg)s in %(func)s',
{'func': func.__name__, 'msg': errmsg})
finally:
if stack.status == stack.IN_PROGRESS:
@ -458,7 +455,7 @@ class Stack(collections.Mapping):
parameter.
"""
if not self.parameters.set_stack_id(self.identifier()):
LOG.warning(_LW("Unable to set parameters StackId identifier"))
LOG.warning("Unable to set parameters StackId identifier")
@staticmethod
def get_dep_attrs(resources, resource_name):
@ -483,8 +480,8 @@ class Stack(collections.Mapping):
if not ignore_errors:
raise
else:
LOG.warning(_LW('Ignoring error adding implicit '
'dependencies for %(res)s: %(err)s') %
LOG.warning('Ignoring error adding implicit '
'dependencies for %(res)s: %(err)s',
{'res': six.text_type(res),
'err': six.text_type(exc)})
@ -856,7 +853,7 @@ class Stack(collections.Mapping):
except AssertionError:
raise
except Exception as ex:
LOG.info(_LI("Exception in stack validation"),
LOG.info("Exception in stack validation",
exc_info=True)
raise exception.StackValidationFailed(
message=encodeutils.safe_decode(six.text_type(ex)))
@ -897,7 +894,7 @@ class Stack(collections.Mapping):
for sink in sinks:
sink.consume(ctx, ev)
except Exception as e:
LOG.debug('Got error sending events %s' % e)
LOG.debug('Got error sending events %s', e)
if self.thread_group_mgr is not None:
self.thread_group_mgr.start(self.id, _dispatch,
self.context,
@ -926,9 +923,9 @@ class Stack(collections.Mapping):
updated = self._persist_state()
if not updated:
# Possibly failed concurrent update
LOG.warning(_LW("Failed to set state of stack %(name)s with"
" traversal ID %(trvsl_id)s, to"
" %(action)s_%(status)s"),
LOG.warning("Failed to set state of stack %(name)s with"
" traversal ID %(trvsl_id)s, to"
" %(action)s_%(status)s",
{'name': self.name,
'trvsl_id': self.current_traversal,
'action': action, 'status': status})
@ -942,8 +939,7 @@ class Stack(collections.Mapping):
self._persist_state()
def _log_status(self):
LOG.info(_LI('Stack %(action)s %(status)s (%(name)s): '
'%(reason)s'),
LOG.info('Stack %(action)s %(status)s (%(name)s): %(reason)s',
{'action': self.action,
'status': self.status,
'name': self.name,
@ -1254,8 +1250,8 @@ class Stack(collections.Mapping):
# we expect to update the stack having previous traversal ID
stack_id = self.store(exp_trvsl=previous_traversal)
if stack_id is None:
LOG.warning(_LW("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s"),
LOG.warning("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s",
{'name': self.name, 'trvsl_id': previous_traversal,
'action': self.action})
return
@ -1280,13 +1276,13 @@ class Stack(collections.Mapping):
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warning(_LW("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s"),
LOG.warning("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s",
{'name': self.name, 'trvsl_id': self.current_traversal,
'action': self.action})
return
LOG.info(_LI('convergence_dependencies: %s'),
LOG.info('convergence_dependencies: %s',
self.convergence_dependencies)
# Delete all the snapshots before starting delete operation
@ -1312,9 +1308,9 @@ class Stack(collections.Mapping):
else:
for rsrc_id, is_update in self.convergence_dependencies.leaves():
if is_update:
LOG.info(_LI("Triggering resource %s for update"), rsrc_id)
LOG.info("Triggering resource %s for update", rsrc_id)
else:
LOG.info(_LI("Triggering resource %s for cleanup"),
LOG.info("Triggering resource %s for cleanup",
rsrc_id)
input_data = sync_point.serialize_input_data({})
self.worker_client.check_resource(self.context, rsrc_id,
@ -1335,8 +1331,8 @@ class Stack(collections.Mapping):
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warning(_LW("Failed to store stack %(name)s with traversal"
" ID %(trvsl_id)s, not triggering rollback."),
LOG.warning("Failed to store stack %(name)s with traversal"
" ID %(trvsl_id)s, not triggering rollback.",
{'name': self.name,
'trvsl_id': self.current_traversal})
return
@ -1446,7 +1442,7 @@ class Stack(collections.Mapping):
@scheduler.wrappertask
def update_task(self, newstack, action=UPDATE, msg_queue=None):
if action not in (self.UPDATE, self.ROLLBACK, self.RESTORE):
LOG.error(_LE("Unexpected action %s passed to update!"), action)
LOG.error("Unexpected action %s passed to update!", action)
self.state_set(self.UPDATE, self.FAILED,
"Invalid action %s" % action)
return
@ -1460,7 +1456,7 @@ class Stack(collections.Mapping):
return
if self.status == self.IN_PROGRESS:
if action == self.ROLLBACK:
LOG.debug("Starting update rollback for %s" % self.name)
LOG.debug("Starting update rollback for %s", self.name)
else:
reason = _('Attempted to %s an IN_PROGRESS '
'stack') % action
@ -1620,7 +1616,7 @@ class Stack(collections.Mapping):
elif message == rpc_api.THREAD_CANCEL_WITH_ROLLBACK:
raise ForcedCancel(with_rollback=True)
LOG.error(_LE('Unknown message "%s" received'), message)
LOG.error('Unknown message "%s" received', message)
def _delete_backup_stack(self, stack):
# Delete resources in the backup stack referred to by 'stack'
@ -1673,7 +1669,7 @@ class Stack(collections.Mapping):
return ucreds_object.UserCreds.get_by_id(self.context,
self.user_creds_id)
except exception.Error:
LOG.exception(_LE("Failed to retrieve user_creds"))
LOG.exception("Failed to retrieve user_creds")
return None
def _delete_credentials(self, stack_status, reason, abandon):
@ -1704,7 +1700,7 @@ class Stack(collections.Mapping):
self.clients.client('keystone').delete_trust(
trust_id)
except Exception as ex:
LOG.exception(_LE("Error deleting trust"))
LOG.exception("Error deleting trust")
stack_status = self.FAILED
reason = ("Error deleting trust: %s" %
six.text_type(ex))
@ -1714,15 +1710,15 @@ class Stack(collections.Mapping):
ucreds_object.UserCreds.delete(self.context,
self.user_creds_id)
except exception.NotFound:
LOG.info(_LI("Tried to delete user_creds that do not exist "
"(stack=%(stack)s user_creds_id=%(uc)s)"),
LOG.info("Tried to delete user_creds that do not exist "
"(stack=%(stack)s user_creds_id=%(uc)s)",
{'stack': self.id, 'uc': self.user_creds_id})
try:
self.user_creds_id = None
self.store()
except exception.NotFound:
LOG.info(_LI("Tried to store a stack that does not exist %s"),
LOG.info("Tried to store a stack that does not exist %s",
self.id)
# If the stack has a domain project, delete it
@ -1732,7 +1728,7 @@ class Stack(collections.Mapping):
keystone.delete_stack_domain_project(
project_id=self.stack_user_project_id)
except Exception as ex:
LOG.exception(_LE("Error deleting project"))
LOG.exception("Error deleting project")
stack_status = self.FAILED
reason = "Error deleting project: %s" % six.text_type(ex)
@ -1753,7 +1749,7 @@ class Stack(collections.Mapping):
required for those resources, e.g the stack_user_project.
"""
if action not in (self.DELETE, self.ROLLBACK):
LOG.error(_LE("Unexpected action %s passed to delete!"), action)
LOG.error("Unexpected action %s passed to delete!", action)
self.state_set(self.DELETE, self.FAILED,
"Invalid action %s" % action)
return
@ -1811,8 +1807,8 @@ class Stack(collections.Mapping):
try:
self.state_set(action, stack_status, reason)
except exception.NotFound:
LOG.info(_LI("Tried to delete stack that does not exist "
"%s "), self.id)
LOG.info("Tried to delete stack that does not exist "
"%s ", self.id)
if not backup:
lifecycle_plugin_utils.do_post_ops(self.context, self,
@ -1823,8 +1819,8 @@ class Stack(collections.Mapping):
try:
stack_object.Stack.delete(self.context, self.id)
except exception.NotFound:
LOG.info(_LI("Tried to delete stack that does not exist "
"%s "), self.id)
LOG.info("Tried to delete stack that does not exist "
"%s ", self.id)
self.id = None
@profiler.trace('Stack.suspend', hide_args=False)
@ -1842,7 +1838,7 @@ class Stack(collections.Mapping):
"""
# No need to suspend if the stack has been suspended
if self.state == (self.SUSPEND, self.COMPLETE):
LOG.info(_LI('%s is already suspended'), self)
LOG.info('%s is already suspended', self)
return
self.updated_time = oslo_timeutils.utcnow()
@ -1867,7 +1863,7 @@ class Stack(collections.Mapping):
"""
# No need to resume if the stack has been resumed
if self.state == (self.RESUME, self.COMPLETE):
LOG.info(_LI('%s is already resumed'), self)
LOG.info('%s is already resumed', self)
return
self.updated_time = oslo_timeutils.utcnow()
@ -1949,7 +1945,7 @@ class Stack(collections.Mapping):
scheduler.TaskRunner(res.destroy)()
except exception.ResourceFailure as ex:
failed = True
LOG.info(_LI('Resource %(name)s delete failed: %(ex)s'),
LOG.info('Resource %(name)s delete failed: %(ex)s',
{'name': res.name, 'ex': ex})
for res in deps:
@ -1959,8 +1955,8 @@ class Stack(collections.Mapping):
scheduler.TaskRunner(res.create)()
except exception.ResourceFailure as ex:
failed = True
LOG.info(_LI('Resource %(name)s create failed: '
'%(ex)s'), {'name': res.name, 'ex': ex})
LOG.info('Resource %(name)s create failed: '
'%(ex)s', {'name': res.name, 'ex': ex})
else:
res.state_set(res.CREATE, res.FAILED,
'Resource restart aborted')
@ -2023,7 +2019,7 @@ class Stack(collections.Mapping):
service.
"""
LOG.info(_LI('[%(name)s(%(id)s)] update traversal %(tid)s complete'),
LOG.info('[%(name)s(%(id)s)] update traversal %(tid)s complete',
{'name': self.name, 'id': self.id,
'tid': self.current_traversal})
@ -2060,8 +2056,8 @@ class Stack(collections.Mapping):
stack_id = self.store(exp_trvsl=exp_trvsl)
if stack_id is None:
# Failed concurrent update
LOG.warning(_LW("Failed to store stack %(name)s with traversal ID "
"%(trvsl_id)s, aborting stack purge"),
LOG.warning("Failed to store stack %(name)s with traversal ID "
"%(trvsl_id)s, aborting stack purge",
{'name': self.name,
'trvsl_id': self.current_traversal})
return

View File

@ -17,8 +17,6 @@ from oslo_log import log as logging
from oslo_utils import excutils
from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import service_utils
from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object
@ -77,8 +75,8 @@ class StackLock(object):
raise exception.ActionInProgress(stack_name=stack.name,
action=stack.action)
else:
LOG.info(_LI("Stale lock detected on stack %(stack)s. Engine "
"%(engine)s will attempt to steal the lock"),
LOG.info("Stale lock detected on stack %(stack)s. Engine "
"%(engine)s will attempt to steal the lock",
{'stack': self.stack_id, 'engine': self.engine_id})
result = stack_lock_object.StackLock.steal(self.context,
@ -87,22 +85,22 @@ class StackLock(object):
self.engine_id)
if result is None:
LOG.info(_LI("Engine %(engine)s successfully stole the lock "
"on stack %(stack)s"),
LOG.info("Engine %(engine)s successfully stole the lock "
"on stack %(stack)s",
{'engine': self.engine_id,
'stack': self.stack_id})
return
elif result is True:
if retry:
LOG.info(_LI("The lock on stack %(stack)s was released "
"while engine %(engine)s was stealing it. "
"Trying again"), {'stack': self.stack_id,
'engine': self.engine_id})
LOG.info("The lock on stack %(stack)s was released "
"while engine %(engine)s was stealing it. "
"Trying again", {'stack': self.stack_id,
'engine': self.engine_id})
return self.acquire(retry=False)
else:
new_lock_engine_id = result
LOG.info(_LI("Failed to steal lock on stack %(stack)s. "
"Engine %(engine)s stole the lock first"),
LOG.info("Failed to steal lock on stack %(stack)s. "
"Engine %(engine)s stole the lock first",
{'stack': self.stack_id,
'engine': new_lock_engine_id})
@ -117,7 +115,7 @@ class StackLock(object):
self.stack_id,
self.engine_id)
if result is True:
LOG.warning(_LW("Lock was already released on stack %s!"),
LOG.warning("Lock was already released on stack %s!",
self.stack_id)
else:
LOG.debug("Engine %(engine)s released lock on stack "

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
import six
from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import repr_wrapper
from heat.engine import dependencies
from heat.engine import resource
@ -82,7 +81,7 @@ class StackUpdate(object):
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
LOG.debug("Deleting backup resource %s" % prev_res.name)
LOG.debug("Deleting backup resource %s", prev_res.name)
yield prev_res.destroy()
@staticmethod
@ -106,17 +105,17 @@ class StackUpdate(object):
# Swap in the backup resource if it is in a valid state,
# instead of creating a new resource
if prev_res.status == prev_res.COMPLETE:
LOG.debug("Swapping in backup Resource %s" % res_name)
LOG.debug("Swapping in backup Resource %s", res_name)
self._exchange_stacks(self.existing_stack[res_name],
prev_res)
return
LOG.debug("Deleting backup Resource %s" % res_name)
LOG.debug("Deleting backup Resource %s", res_name)
yield prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
LOG.debug("Backing up existing Resource %s" % res_name)
LOG.debug("Backing up existing Resource %s", res_name)
existing_res = self.existing_stack[res_name]
self.previous_stack.add_resource(existing_res)
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
@ -170,8 +169,8 @@ class StackUpdate(object):
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
LOG.info(_LI("Resource %(res_name)s for stack "
"%(stack_name)s updated"),
LOG.info("Resource %(res_name)s for stack "
"%(stack_name)s updated",
{'res_name': res_name,
'stack_name': self.existing_stack.name})
return

View File

@ -19,8 +19,6 @@ from oslo_utils import timeutils
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import stack
from heat.engine import timestamp
from heat.objects import stack as stack_object
@ -85,9 +83,8 @@ class WatchRule(object):
watch = watch_rule_objects.WatchRule.get_by_name(context,
watch_name)
except Exception as ex:
LOG.warning(_LW('WatchRule.load (%(watch_name)s) db error '
'%(ex)s'), {'watch_name': watch_name,
'ex': ex})
LOG.warning('WatchRule.load (%(watch_name)s) db error %(ex)s',
{'watch_name': watch_name, 'ex': ex})
if watch is None:
raise exception.EntityNotFound(entity='Watch Rule',
name=watch_name)
@ -218,7 +215,7 @@ class WatchRule(object):
data = 0
for d in self.watch_data:
if d.created_at < self.now - self.timeperiod:
LOG.debug('ignoring %s' % str(d.data))
LOG.debug('ignoring %s', str(d.data))
continue
data = data + float(d.data[self.rule['MetricName']]['Value'])
@ -255,13 +252,13 @@ class WatchRule(object):
return actions
def rule_actions(self, new_state):
LOG.info(_LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
'new_state:%(new_state)s'), {'stack': self.stack_id,
'watch_name': self.name,
'new_state': new_state})
LOG.info('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
'new_state:%(new_state)s', {'stack': self.stack_id,
'watch_name': self.name,
'new_state': new_state})
actions = []
if self.ACTION_MAP[new_state] not in self.rule:
LOG.info(_LI('no action for new state %s'), new_state)
LOG.info('no action for new state %s', new_state)
else:
s = stack_object.Stack.get_by_id(
self.context,
@ -272,7 +269,7 @@ class WatchRule(object):
for refid in self.rule[self.ACTION_MAP[new_state]]:
actions.append(stk.resource_by_refid(refid).signal)
else:
LOG.warning(_LW("Could not process watch state %s for stack"),
LOG.warning("Could not process watch state %s for stack",
new_state)
return actions
@ -292,7 +289,7 @@ class WatchRule(object):
dims = dims[0]
sample['resource_metadata'] = dims
sample['resource_id'] = dims.get('InstanceId')
LOG.debug('new sample:%(k)s data:%(sample)s' % {
LOG.debug('new sample:%(k)s data:%(sample)s', {
'k': k, 'sample': sample})
clients.client('ceilometer').samples.create(**sample)
@ -305,8 +302,8 @@ class WatchRule(object):
return
if self.state == self.SUSPENDED:
LOG.debug('Ignoring metric data for %s, SUSPENDED state'
% self.name)
LOG.debug('Ignoring metric data for %s, SUSPENDED state',
self.name)
return []
if self.rule['MetricName'] not in data:
@ -355,9 +352,9 @@ class WatchRule(object):
% {'self_state': self.state, 'name': self.name,
'state': state})
else:
LOG.warning(_LW("Unable to override state %(state)s for "
"watch %(name)s"), {'state': self.state,
'name': self.name})
LOG.warning("Unable to override state %(state)s for "
"watch %(name)s", {'state': self.state,
'name': self.name})
return actions

View File

@ -21,9 +21,6 @@ from oslo_utils import uuidutils
from osprofiler import profiler
from heat.common import context
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import messaging as rpc_messaging
from heat.db.sqlalchemy import api as db_api
from heat.engine import check_resource
@ -72,7 +69,7 @@ class WorkerService(object):
server=self.engine_id,
topic=self.topic)
self.target = target
LOG.info(_LI("Starting %(topic)s (%(version)s) in engine %(engine)s."),
LOG.info("Starting %(topic)s (%(version)s) in engine %(engine)s.",
{'topic': self.topic,
'version': self.RPC_API_VERSION,
'engine': self.engine_id})
@ -84,13 +81,13 @@ class WorkerService(object):
if self._rpc_server is None:
return
# Stop rpc connection at first for preventing new requests
LOG.info(_LI("Stopping %(topic)s in engine %(engine)s."),
LOG.info("Stopping %(topic)s in engine %(engine)s.",
{'topic': self.topic, 'engine': self.engine_id})
try:
self._rpc_server.stop()
self._rpc_server.wait()
except Exception as e:
LOG.error(_LE("%(topic)s is failed to stop, %(exc)s"),
LOG.error("%(topic)s is failed to stop, %(exc)s",
{'topic': self.topic, 'exc': e})
def stop_traversal(self, stack):
@ -122,13 +119,12 @@ class WorkerService(object):
cancelled = _cancel_workers(stack, self.thread_group_mgr,
self.engine_id, self._rpc_client)
if not cancelled:
LOG.error(_LE("Failed to stop all workers of stack %(name)s "
", stack cancel not complete"),
{'name': stack.name})
LOG.error("Failed to stop all workers of stack %s, "
"stack cancel not complete", stack.name)
return False
LOG.info(_LI('[%(name)s(%(id)s)] Stopped all active workers for stack '
'%(action)s'),
LOG.info('[%(name)s(%(id)s)] Stopped all active workers for stack '
'%(action)s',
{'name': stack.name, 'id': stack.id, 'action': stack.action})
return True
@ -199,18 +195,17 @@ def _stop_traversal(stack):
old_trvsl = stack.current_traversal
updated = _update_current_traversal(stack)
if not updated:
LOG.warning(_LW("Failed to update stack %(name)s with new "
"traversal, aborting stack cancel"),
{'name': stack.name})
LOG.warning("Failed to update stack %(name)s with new "
"traversal, aborting stack cancel", stack.name)
return
reason = 'Stack %(action)s cancelled' % {'action': stack.action}
updated = stack.state_set(stack.action, stack.FAILED, reason)
if not updated:
LOG.warning(_LW("Failed to update stack %(name)s status "
"to %(action)s_%(state)s"),
LOG.warning("Failed to update stack %(name)s status "
"to %(action)s_%(state)s",
{'name': stack.name, 'action': stack.action,
'state': stack.FAILED})
'state': stack.FAILED})
return
sync_point.delete_all(stack.context, stack.id, old_trvsl)

View File

@ -23,7 +23,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat import version as hversion
@ -44,6 +43,6 @@ def init_application():
port = cfg.CONF.heat_api.bind_port
host = cfg.CONF.heat_api.bind_host
profiler.setup('heat-api', host)
LOG.info(_LI('Starting Heat REST API on %(host)s:%(port)s'),
LOG.info('Starting Heat REST API on %(host)s:%(port)s',
{'host': host, 'port': port})
return config.load_paste_app()

View File

@ -23,7 +23,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat import version
@ -45,7 +44,7 @@ def init_application():
port = cfg.CONF.heat_api_cfn.bind_port
host = cfg.CONF.heat_api_cfn.bind_host
LOG.info(_LI('Starting Heat API on %(host)s:%(port)s'),
LOG.info('Starting Heat API on %(host)s:%(port)s',
{'host': host, 'port': port})
profiler.setup('heat-api-cfn', host)

View File

@ -23,7 +23,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat import version
@ -45,7 +44,7 @@ def init_application():
port = cfg.CONF.heat_api_cloudwatch.bind_port
host = cfg.CONF.heat_api_cloudwatch.bind_host
LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'),
LOG.info('Starting Heat CloudWatch API on %(host)s:%(port)s',
{'host': host, 'port': port})
profiler.setup('heat-api-cloudwatch', host)

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from heat.common.i18n import _LI
from heat.common import identifier
from heat.db.sqlalchemy import api as db_api
from heat.objects import base as heat_base
@ -80,7 +79,7 @@ class Event(
@property
def resource_properties(self):
if self._resource_properties is None:
LOG.info(_LI('rsrp_prop_data lazy load'))
LOG.info('rsrp_prop_data lazy load')
rpd_obj = rpd.ResourcePropertiesData.get_by_id(
self._context, self.rsrc_prop_data_id)
self._resource_properties = rpd_obj.data or {}

View File

@ -24,7 +24,6 @@ from oslo_versionedobjects import fields
from heat.common import crypt
from heat.common import environment_format as env_fmt
from heat.common.i18n import _LW
from heat.db.sqlalchemy import api as db_api
from heat.objects import base as heat_base
from heat.objects import fields as heat_fields
@ -71,10 +70,10 @@ class RawTemplate(
value = crypt.decrypt(method, enc_value)
else:
value = parameters[param_name]
LOG.warning(_LW(
LOG.warning(
'Encountered already-decrypted data while attempting '
'to decrypt parameter %s. Please file a Heat bug so '
'this can be fixed.'), param_name)
'to decrypt parameter %s. Please file a Heat bug so '
'this can be fixed.', param_name)
parameters[param_name] = value
tpl.environment[env_fmt.PARAMETERS] = parameters

View File

@ -27,7 +27,6 @@ import tenacity
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.db.sqlalchemy import api as db_api
from heat.objects import base as heat_base
from heat.objects import fields as heat_fields
@ -120,12 +119,12 @@ class Resource(
resource._properties_data = resource['rsrc_prop_data'].data
if db_resource['properties_data']:
LOG.error(
_LE('Unexpected condition where resource.rsrc_prop_data '
'and resource.properties_data are both not null. '
'rsrc_prop_data.id: %(rsrc_prop_data_id)s ,'
'resource id: %(res_id)s')
% {'rsrc_prop_data_id': resource['rsrc_prop_data'].id,
'res_id': resource['id']})
'Unexpected condition where resource.rsrc_prop_data '
'and resource.properties_data are both not null. '
'rsrc_prop_data.id: %(rsrc_prop_data_id)s, '
'resource id: %(res_id)s',
{'rsrc_prop_data_id': resource['rsrc_prop_data'].id,
'res_id': resource['id']})
elif db_resource['properties_data']: # legacy field
if db_resource['properties_data_encrypted']:
decrypted_data = crypt.decrypted_dict(

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import resource
from oslo_utils import timeutils
import six
@ -33,8 +32,8 @@ class CooldownMixin(object):
def _check_scaling_allowed(self):
metadata = self.metadata_get()
if metadata.get('scaling_in_progress'):
LOG.info(_LI("Can not perform scaling action: resource %s "
"is already in scaling.") % self.name)
LOG.info("Can not perform scaling action: resource %s "
"is already in scaling.", self.name)
reason = _('due to scaling activity')
raise resource.NoActionRequired(res_name=self.name,
reason=reason)
@ -66,8 +65,8 @@ class CooldownMixin(object):
def _cooldown_check(self, cooldown, last_adjust):
if not timeutils.is_older_than(last_adjust, cooldown):
LOG.info(_LI("Can not perform scaling action: "
"resource %(name)s is in cooldown (%(cooldown)s).") %
LOG.info("Can not perform scaling action: "
"resource %(name)s is in cooldown (%(cooldown)s).",
{'name': self.name,
'cooldown': cooldown})
reason = _('due to cooldown, '

View File

@ -60,7 +60,7 @@ class MessageProcessor(object):
def __call__(self):
message = self.queue.get()
if message is None:
LOG.debug('[%s] No messages' % self.name)
LOG.debug('[%s] No messages', self.name)
return False
try:

View File

@ -14,7 +14,6 @@
import os
from oslo_log import log as logging
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__)
@ -22,7 +21,7 @@ LOG = logging.getLogger(__name__)
def list_all():
scenario_dir = os.path.join(os.path.dirname(__file__), '../scenarios')
if not os.path.isdir(scenario_dir):
LOG.error(_LE('Scenario directory "%s" not found'), scenario_dir)
LOG.error('Scenario directory "%s" not found', scenario_dir)
return
for root, dirs, files in os.walk(scenario_dir):

View File

@ -15,7 +15,6 @@ import collections
from oslo_log import log as logging
import six
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -39,26 +38,26 @@ class GenericResource(resource.Resource):
return (True, None)
def handle_create(self):
LOG.warning(_LW('Creating generic resource (Type "%s")'),
LOG.warning('Creating generic resource (Type "%s")',
self.type())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
LOG.warning(_LW('Updating generic resource (Type "%s")'),
LOG.warning('Updating generic resource (Type "%s")',
self.type())
def handle_delete(self):
LOG.warning(_LW('Deleting generic resource (Type "%s")'),
LOG.warning('Deleting generic resource (Type "%s")',
self.type())
def _resolve_attribute(self, name):
return self.name
def handle_suspend(self):
LOG.warning(_LW('Suspending generic resource (Type "%s")'),
LOG.warning('Suspending generic resource (Type "%s")',
self.type())
def handle_resume(self):
LOG.warning(_LW('Resuming generic resource (Type "%s")'),
LOG.warning(('Resuming generic resource (Type "%s")'),
self.type())
@ -67,14 +66,14 @@ class CancellableResource(GenericResource):
return True
def handle_create_cancel(self, cookie):
LOG.warning(_LW('Cancelling create generic resource (Type "%s")'),
LOG.warning('Cancelling create generic resource (Type "%s")',
self.type())
def check_update_complete(self, cookie):
return True
def handle_update_cancel(self, cookie):
LOG.warning(_LW('Cancelling update generic resource (Type "%s")'),
LOG.warning('Cancelling update generic resource (Type "%s")',
self.type())
@ -257,7 +256,7 @@ class SignalResource(signal_responder.SignalResponder):
self.resource_id_set(self._get_user_id())
def handle_signal(self, details=None):
LOG.warning(_LW('Signaled resource (Type "%(type)s") %(details)s'),
LOG.warning('Signaled resource (Type "%(type)s") %(details)s',
{'type': self.type(), 'details': details})
def _resolve_attribute(self, name):

View File

@ -118,7 +118,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
try:
linux_client.validate_authentication()
except exceptions.SSHTimeout:
LOG.exception('ssh connection to %s failed' % ip)
LOG.exception('ssh connection to %s failed', ip)
raise
return linux_client
@ -637,7 +637,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
rsrc_events = self.client.events.list(stack_identifier,
resource_name=rsrc_name)
except heat_exceptions.HTTPNotFound:
LOG.debug("No events yet found for %s" % rsrc_name)
LOG.debug("No events yet found for %s", rsrc_name)
else:
matched = [e for e in rsrc_events
if e.resource_status_reason == reason]

View File

@ -19,7 +19,6 @@ from oslo_log import log
import six
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import constraints
from heat.engine import resources
from heat.engine import support
@ -155,8 +154,8 @@ class HeatCustomGuidelines(object):
try:
cls_file = open(cls.__module__.replace('.', '/') + '.py')
except IOError as ex:
LOG.warning(_LW('Cannot perform trailing spaces check on '
'resource module: %s') % six.text_type(ex))
LOG.warning('Cannot perform trailing spaces check on '
'resource module: %s', six.text_type(ex))
continue
lines = [line.strip() for line in cls_file.readlines()]
idx = 0