Merge "Remove log translations"

This commit is contained in:
Jenkins 2017-04-17 15:59:05 +00:00 committed by Gerrit Code Review
commit 6db5f8bc28
26 changed files with 152 additions and 201 deletions

View File

@ -30,13 +30,6 @@ Formatting Guidelines
Sahara uses string formatting defined in `PEP 3101`_ for logs.
.. code:: python
LOG.warning(_LW("Incorrect path: {path}").format(path=path))
..
Translation Guidelines
----------------------
@ -46,6 +39,5 @@ CLI tools packaged with sahara contain log translations.
* Debug: no translation
* Info: _LI
* Warning: _LW
.. _PEP 3101: https://www.python.org/dev/peps/pep-3101/

View File

@ -20,7 +20,6 @@ import webob
import webob.exc as ex
from sahara.i18n import _
from sahara.i18n import _LW
LOG = logging.getLogger(__name__)
@ -43,7 +42,7 @@ class AuthValidator(base.Middleware):
"""
token_tenant = req.environ.get("HTTP_X_TENANT_ID")
if not token_tenant:
LOG.warning(_LW("Can't get tenant_id from env"))
LOG.warning("Can't get tenant_id from env")
raise ex.HTTPServiceUnavailable()
path = req.environ['PATH_INFO']
@ -52,7 +51,7 @@ class AuthValidator(base.Middleware):
version, url_tenant, rest = strutils.split_path(path, 3, 3,
True)
except ValueError:
LOG.warning(_LW("Incorrect path: {path}").format(path=path))
LOG.warning("Incorrect path: {path}".format(path=path))
raise ex.HTTPNotFound(_("Incorrect path"))
if token_tenant != url_tenant:
@ -83,7 +82,7 @@ class AuthValidatorV2(base.Middleware):
if path != '/':
token_tenant = req.environ.get("HTTP_X_TENANT_ID")
if not token_tenant:
LOG.warning(_LW("Can't get tenant_id from env"))
LOG.warning("Can't get tenant_id from env")
raise ex.HTTPServiceUnavailable()
try:
@ -94,7 +93,7 @@ class AuthValidatorV2(base.Middleware):
version, requested_tenant, rest = strutils.split_path(
path, 3, 3, True)
except ValueError:
LOG.warning(_LW("Incorrect path: {path}").format(path=path))
LOG.warning("Incorrect path: {path}".format(path=path))
raise ex.HTTPNotFound(_("Incorrect path"))
if token_tenant != requested_tenant:

View File

@ -26,7 +26,6 @@ from oslo_log import log as logging
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.service import sessions
@ -52,8 +51,8 @@ class Context(context.RequestContext):
overwrite=True,
**kwargs):
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: '
'{args}').format(args=kwargs))
LOG.warning('Arguments dropped when creating context: '
'{args}'.format(args=kwargs))
super(Context, self).__init__(auth_token=auth_token,
user=user_id,
@ -319,5 +318,5 @@ def get_auth_token():
try:
cur.auth_token = sessions.cache().token_for_auth(cur.auth_plugin)
except Exception as e:
LOG.warning(_LW("Cannot update token, reason: %s"), e)
LOG.warning("Cannot update token, reason: %s", e)
return cur.auth_token

View File

@ -30,7 +30,6 @@ import sqlalchemy as sa
from sahara.db.sqlalchemy import models as m
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.service.validations import acl as validate
from sahara.utils import types
@ -263,8 +262,7 @@ def setup_db():
engine = get_engine()
m.Cluster.metadata.create_all(engine)
except sa.exc.OperationalError as e:
LOG.warning(_LW("Database registration exception: {exc}")
.format(exc=e))
LOG.warning("Database registration exception: {exc}".format(exc=e))
return False
return True
@ -274,7 +272,7 @@ def drop_db():
engine = get_engine()
m.Cluster.metadata.drop_all(engine)
except Exception as e:
LOG.warning(_LW("Database shutdown exception: {exc}").format(exc=e))
LOG.warning("Database shutdown exception: {exc}".format(exc=e))
return False
return True

View File

@ -24,7 +24,6 @@ import six
from sahara import conductor
from sahara.db.templates import utils as u
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service.validations import cluster_template_schema as clt
from sahara.service.validations import node_group_template_schema as ngt
from sahara.utils import api_validator
@ -126,17 +125,17 @@ def check_usage_of_existing(ctx, ng_templates, cl_templates):
ng["id"], clusters)
if cluster_users:
LOG.warning(_LW("Node group template {name} "
"in use by clusters {clusters}").format(
LOG.warning("Node group template {name} "
"in use by clusters {clusters}".format(
name=ng["name"], clusters=cluster_users))
if template_users:
LOG.warning(_LW("Node group template {name} "
"in use by cluster templates {templates}").format(
LOG.warning("Node group template {name} "
"in use by cluster templates {templates}".format(
name=ng["name"], templates=template_users))
if cluster_users or template_users:
LOG.warning(_LW("Update of node group template "
"{name} is not allowed").format(name=ng["name"]))
LOG.warning("Update of node group template "
"{name} is not allowed".format(name=ng["name"]))
error = True
for cl_info in cl_templates:
@ -145,12 +144,12 @@ def check_usage_of_existing(ctx, ng_templates, cl_templates):
cluster_users = u.check_cluster_template_usage(cl["id"], clusters)
if cluster_users:
LOG.warning(_LW("Cluster template {name} "
"in use by clusters {clusters}").format(
LOG.warning("Cluster template {name} "
"in use by clusters {clusters}".format(
name=cl["name"], clusters=cluster_users))
LOG.warning(_LW("Update of cluster template "
"{name} is not allowed").format(name=cl["name"]))
LOG.warning("Update of cluster template "
"{name} is not allowed".format(name=cl["name"]))
error = True
return error
@ -159,7 +158,7 @@ def check_usage_of_existing(ctx, ng_templates, cl_templates):
def log_skipping_dir(path, reason=""):
if reason:
reason = ", " + reason
LOG.warning(_LW("Skipping processing for {dir}{reason}").format(
LOG.warning("Skipping processing for {dir}{reason}".format(
dir=path, reason=reason))
@ -180,7 +179,7 @@ def check_cluster_templates_valid(ng_templates, cl_templates):
try:
ct_validator.validate(template)
except jsonschema.ValidationError as e:
LOG.warning(_LW("Validation for {path} failed, {reason}").format(
LOG.warning("Validation for {path} failed, {reason}".format(
path=cl["path"], reason=e))
return True
return False
@ -300,8 +299,8 @@ def process_files(dirname, files):
data = fp.read()
template = json.loads(data)
except ValueError as e:
LOG.warning(_LW("Error processing {path}, "
"{reason}").format(path=fpath, reason=e))
LOG.warning("Error processing {path}, "
"{reason}".format(path=fpath, reason=e))
raise Handled("error processing files")
# If this file doesn't contain basic fields, skip it.
@ -333,9 +332,9 @@ def process_files(dirname, files):
try:
ng_validator.validate(template)
except jsonschema.ValidationError as e:
LOG.warning(_LW("Validation for {path} failed, "
"{reason}").format(path=fpath,
reason=e))
LOG.warning("Validation for {path} failed, "
"{reason}".format(path=fpath,
reason=e))
raise Handled(
"node group template validation failed")
node_groups.append(file_entry)
@ -373,27 +372,27 @@ def delete_node_group_template(ctx, template, rollback=False):
template["id"], clusters, cluster_templates)
if cluster_users:
LOG.warning(_LW("Node group template {info} "
"in use by clusters {clusters}").format(
LOG.warning("Node group template {info} "
"in use by clusters {clusters}".format(
info=u.name_and_id(template),
clusters=cluster_users))
if template_users:
LOG.warning(_LW("Node group template {info} "
"in use by cluster templates {templates}").format(
LOG.warning("Node group template {info} "
"in use by cluster templates {templates}".format(
info=u.name_and_id(template),
templates=template_users))
if cluster_users or template_users:
LOG.warning(_LW("Deletion of node group template "
"{info} failed").format(info=u.name_and_id(template)))
LOG.warning("Deletion of node group template "
"{info} failed".format(info=u.name_and_id(template)))
return
try:
conductor.API.node_group_template_destroy(ctx, template["id"],
ignore_prot_on_def=True)
except Exception as e:
LOG.warning(_LW("Deletion of node group template {info} "
"failed{rollback}, {reason}").format(
LOG.warning("Deletion of node group template {info} "
"failed{rollback}, {reason}".format(
info=u.name_and_id(template),
reason=e,
rollback=rollback_msg))
@ -415,8 +414,8 @@ def reverse_node_group_template_updates(ctx, update_info):
template["id"], values,
ignore_prot_on_def=True)
except Exception as e:
LOG.warning(_LW("Rollback of update for node group "
"template {info} failed, {reason}").format(
LOG.warning("Rollback of update for node group "
"template {info} failed, {reason}".format(
info=u.name_and_id(template),
reason=e))
else:
@ -459,8 +458,8 @@ def add_node_group_templates(ctx, node_groups):
template = conductor.API.node_group_template_update(
ctx, current['id'], template, ignore_prot_on_def=True)
except Exception as e:
LOG.warning(_LW("Update of node group template {info} "
"failed, {reason}").format(
LOG.warning("Update of node group template {info} "
"failed, {reason}".format(
info=u.name_and_id(current),
reason=e))
raise Handled()
@ -482,8 +481,8 @@ def add_node_group_templates(ctx, node_groups):
template = conductor.API.node_group_template_create(
ctx, template)
except Exception as e:
LOG.warning(_LW("Creation of node group template "
"from {path} failed, {reason}").format(
LOG.warning("Creation of node group template "
"from {path} failed, {reason}".format(
path=ng['path'], reason=e))
raise Handled()
@ -499,8 +498,8 @@ def add_node_group_templates(ctx, node_groups):
ng_info, error = do_reversals(ng_info)
except Exception as e:
LOG.warning(_LW("Unhandled exception while processing "
"node group templates, {reason}").format(reason=e))
LOG.warning("Unhandled exception while processing "
"node group templates, {reason}".format(reason=e))
ng_info, error = do_reversals(ng_info)
return ng_info, error
@ -518,23 +517,23 @@ def delete_cluster_template(ctx, template, rollback=False):
clusters)
if cluster_users:
LOG.warning(_LW("Cluster template {info} "
"in use by clusters {clusters}").format(
LOG.warning("Cluster template {info} "
"in use by clusters {clusters}".format(
info=u.name_and_id(template),
clusters=cluster_users))
LOG.warning(_LW("Deletion of cluster template "
"{info} failed").format(info=u.name_and_id(template)))
LOG.warning("Deletion of cluster template "
"{info} failed".format(info=u.name_and_id(template)))
return
try:
conductor.API.cluster_template_destroy(ctx, template["id"],
ignore_prot_on_def=True)
except Exception as e:
LOG.warning(_LW("Deletion of cluster template {info} failed{rollback}"
", {reason}").format(info=u.name_and_id(template),
reason=e,
rollback=rollback_msg))
LOG.warning("Deletion of cluster template {info} failed{rollback}"
", {reason}".format(info=u.name_and_id(template),
reason=e,
rollback=rollback_msg))
else:
LOG.info(_LI("Deleted cluster template {info}{rollback}").format(
info=u.name_and_id(template), rollback=rollback_msg))
@ -553,8 +552,8 @@ def reverse_cluster_template_updates(ctx, update_info):
template["id"], values,
ignore_prot_on_def=True)
except Exception as e:
LOG.warning(_LW("Rollback of update for cluster "
"template {info} failed, {reason}").format(
LOG.warning("Rollback of update for cluster "
"template {info} failed, {reason}".format(
info=u.name_and_id(template),
reason=e))
else:
@ -622,8 +621,8 @@ def add_cluster_templates(ctx, clusters, ng_dict):
template = conductor.API.cluster_template_update(
ctx, current['id'], template, ignore_prot_on_def=True)
except Exception as e:
LOG.warning(_LW("Update of cluster template {info} "
"failed, {reason}").format(
LOG.warning("Update of cluster template {info} "
"failed, {reason}".format(
info=u.name_and_id(current), reason=e))
raise Handled()
@ -643,8 +642,8 @@ def add_cluster_templates(ctx, clusters, ng_dict):
template = conductor.API.cluster_template_create(ctx,
template)
except Exception as e:
LOG.warning(_LW("Creation of cluster template "
"from {path} failed, {reason}").format(
LOG.warning("Creation of cluster template "
"from {path} failed, {reason}".format(
path=cl['path'],
reason=e))
raise Handled()
@ -658,8 +657,8 @@ def add_cluster_templates(ctx, clusters, ng_dict):
error = do_reversals(created, updated)
except Exception as e:
LOG.warning(_LW("Unhandled exception while processing "
"cluster templates, {reason}").format(reason=e))
LOG.warning("Unhandled exception while processing "
"cluster templates, {reason}".format(reason=e))
error = do_reversals(created, updated)
return error
@ -757,8 +756,8 @@ def do_node_group_template_delete():
if t:
delete_node_group_template(ctx, t)
else:
LOG.warning(_LW("Deletion of node group template {name} failed, "
"no such template").format(name=template_name))
LOG.warning("Deletion of node group template {name} failed, "
"no such template".format(name=template_name))
def do_node_group_template_delete_by_id():
@ -770,12 +769,12 @@ def do_node_group_template_delete_by_id():
if t["is_default"]:
delete_node_group_template(ctx, t)
else:
LOG.warning(_LW("Deletion of node group template {info} skipped, "
"not a default template").format(
LOG.warning("Deletion of node group template {info} skipped, "
"not a default template".format(
info=u.name_and_id(t)))
else:
LOG.warning(_LW("Deletion of node group template {id} failed, "
"no such template").format(id=CONF.command.id))
LOG.warning("Deletion of node group template {id} failed, "
"no such template".format(id=CONF.command.id))
def do_cluster_template_delete():
@ -786,8 +785,8 @@ def do_cluster_template_delete():
if t:
delete_cluster_template(ctx, t)
else:
LOG.warning(_LW("Deletion of cluster template {name} failed, "
"no such template").format(name=template_name))
LOG.warning("Deletion of cluster template {name} failed, "
"no such template".format(name=template_name))
def do_cluster_template_delete_by_id():
@ -799,9 +798,9 @@ def do_cluster_template_delete_by_id():
if t["is_default"]:
delete_cluster_template(ctx, t)
else:
LOG.warning(_LW("Deletion of cluster template {info} skipped, "
"not a default template").format(
LOG.warning("Deletion of cluster template {info} skipped, "
"not a default template".format(
info=u.name_and_id(t)))
else:
LOG.warning(_LW("Deletion of cluster template {id} failed, "
"no such template").format(id=CONF.command.id))
LOG.warning("Deletion of cluster template {id} failed, "
"no such template".format(id=CONF.command.id))

View File

@ -30,4 +30,3 @@ _ = _translators.primary
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning

View File

@ -23,7 +23,6 @@ from oslo_utils import uuidutils
from sahara import conductor
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.plugins.ambari import client as ambari_client
from sahara.plugins.ambari import common as p_common
from sahara.plugins.ambari import configs
@ -169,8 +168,8 @@ def resolve_package_conflicts(cluster, instances=None):
r.execute_command(
"apt-get remove -y libmysql-java", run_as_root=True)
except Exception:
LOG.warning(_LW("Can't remove libmysql-java, "
"it's probably not installed"))
LOG.warning("Can't remove libmysql-java, "
"it's probably not installed")
def _prepare_ranger(cluster):
@ -623,9 +622,9 @@ def _get_topology_data(cluster):
if not t_helper.is_data_locality_enabled():
return {}
LOG.warning(_LW("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False "
"explicitly"))
LOG.warning("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False "
"explicitly")
return t_helper.generate_topology_map(cluster, is_node_awareness=False)
@ -672,9 +671,9 @@ def _add_hadoop_swift_jar(instance, new_jar):
amb_code, amb_hadoop_version))
r.execute_command("sudo cp {} {}".format(new_jar, origin_jar))
else:
LOG.warning(_LW("The {jar_file} file cannot be found "
"in the {dir} directory so Keystone API v3 "
"is not enabled for this cluster.")
LOG.warning("The {jar_file} file cannot be found "
"in the {dir} directory so Keystone API v3 "
"is not enabled for this cluster."
.format(jar_file="hadoop-openstack.jar",
dir="/opt"))

View File

@ -29,7 +29,6 @@ from oslo_serialization import jsonutils as json
import six
from six.moves import urllib
from sahara.i18n import _LW
from sahara.plugins.cdh import exceptions as ex
LOG = logging.getLogger(__name__)
@ -106,9 +105,9 @@ class HttpClient(object):
url = self._make_url(path, params)
if http_method in ("GET", "DELETE"):
if data is not None:
LOG.warning(_LW("{method} method does not pass any data. "
"Path {path}").format(method=http_method,
path=path))
LOG.warning("{method} method does not pass any data. "
"Path {path}".format(method=http_method,
path=path))
data = None
# Setup the request

View File

@ -32,7 +32,6 @@ from six.moves import urllib
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.plugins.cdh import exceptions as ex
LOG = logging.getLogger(__name__)
@ -113,13 +112,13 @@ class Resource(object):
except (socket.error, urllib.error.URLError) as e:
if "timed out" in six.text_type(e).lower():
if retry < self.retries:
LOG.warning(_LW("Timeout issuing GET request for "
"{path}. Will retry").format(
path=self._join_uri(relpath)))
LOG.warning("Timeout issuing GET request for "
"{path}. Will retry".format(
path=self._join_uri(relpath)))
else:
LOG.warning(_LW("Timeout issuing GET request for "
"{path}. No retries left").format(
path=self._join_uri(relpath)))
LOG.warning("Timeout issuing GET request for "
"{path}. No retries left".format(
path=self._join_uri(relpath)))
else:
raise
else:

View File

@ -19,7 +19,6 @@ from sahara import conductor as cond
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
conductor = cond.API
LOG = logging.getLogger(__name__)
@ -209,8 +208,8 @@ class LabelHandler(object):
_("Plugin %s is not enabled") % plugin_name)
if plb.get('deprecated', {}).get('status', False):
LOG.warning(_LW("Plugin %s is deprecated and can be removed in "
"the next release") % plugin_name)
LOG.warning("Plugin %s is deprecated and can be removed in "
"the next release" % plugin_name)
vlb = details.get(VERSION_LABELS_SCOPE, {}).get(version, {})
if not vlb.get('enabled', {}).get('status'):
@ -219,7 +218,6 @@ class LabelHandler(object):
% {'version': version, 'plugin': plugin_name})
if vlb.get('deprecated', {}).get('status', False):
LOG.warning(
_LW("Using version %(version)s of plugin %(plugin)s is "
"deprecated and can removed in next release")
% {'version': version, 'plugin': plugin_name})
LOG.warning("Using version %(version)s of plugin %(plugin)s is "
"deprecated and can removed in next release"
% {'version': version, 'plugin': plugin_name})

View File

@ -22,7 +22,6 @@ from sahara import conductor
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
import sahara.plugins.mapr.abstract.configurer as ac
from sahara.plugins.mapr.domain import distro as d
from sahara.plugins.mapr.domain import service as srvc
@ -279,7 +278,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
('mapr', mapr_user_pass),
run_as_root=True)
else:
LOG.warning(_LW('User "mapr" does not exists'))
LOG.warning('User "mapr" does not exists')
def create_home_mapr(instance):
target_path = '/home/mapr'
@ -293,7 +292,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
with instance.remote() as r:
r.execute_command(cmd, run_as_root=True)
else:
LOG.warning(_LW('User "mapr" does not exists'))
LOG.warning('User "mapr" does not exists')
util.execute_on_instances(instances, set_user_password)
util.execute_on_instances(instances, create_home_mapr)

View File

@ -20,7 +20,6 @@ import six
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.plugins import utils
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper
@ -419,9 +418,8 @@ def _make_hadoop_paths(paths, hadoop_dir):
True, step=_("Configure topology data"), param=('cluster', 1))
def configure_topology_data(pctx, cluster):
if c_helper.is_data_locality_enabled(pctx, cluster):
LOG.warning(_LW("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False "
"explicitly"))
LOG.warning("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False explicitly")
tpl_map = th.generate_topology_map(cluster, is_node_awareness=False)
topology_data = "\n".join(
[k + " " + v for k, v in tpl_map.items()]) + "\n"

View File

@ -19,7 +19,6 @@ from oslo_log import log as logging
from sahara import conductor as cond
from sahara import context
from sahara.i18n import _LW
from sahara.plugins.vanilla import utils as u
from sahara.service.castellan import utils as castellan
@ -70,7 +69,7 @@ def delete_oozie_password(cluster):
if 'oozie_pass_id' in extra:
castellan.delete_secret(extra['oozie_pass_id'])
else:
LOG.warning(_LW("Cluster hasn't Oozie password"))
LOG.warning("Cluster hasn't Oozie password")
def get_hive_password(cluster):
@ -87,4 +86,4 @@ def delete_hive_password(cluster):
if 'hive_pass_id' in extra:
castellan.delete_secret(extra['hive_pass_id'])
else:
LOG.warning(_LW("Cluster hasn't hive password"))
LOG.warning("Cluster hasn't hive password")

View File

@ -24,7 +24,6 @@ from sahara import context
from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.service.edp.spark import engine as spark_engine
@ -176,9 +175,8 @@ def cancel_job(job_execution_id):
job_info = engine.cancel_job(job_execution)
except Exception as ex:
job_info = None
LOG.warning(
_LW("Error during cancel of job execution: "
"{error}").format(error=ex))
LOG.warning("Error during cancel of job execution: "
"{error}".format(error=ex))
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
LOG.info(_LI("Job execution was canceled successfully"))

View File

@ -21,7 +21,6 @@ from oslo_log import log
import six
from sahara import context
from sahara.i18n import _LW
from sahara.utils.openstack import manila
LOG = log.getLogger(__name__)
@ -176,13 +175,12 @@ class _ShareHandler(object):
if accesses:
access = accesses[0]
if access.access_level not in ('ro', 'rw'):
LOG.warning(
_LW("Unknown permission level {access_level} on share "
"id {share_id} for ip {ip}. Leaving pre-existing "
"permissions.").format(
access_level=access.access_level,
share_id=self.share.id,
ip=instance.internal_ip))
LOG.warning("Unknown permission level {access_level} on share "
"id {share_id} for ip {ip}. Leaving pre-existing "
"permissions.".format(
access_level=access.access_level,
share_id=self.share.id,
ip=instance.internal_ip))
elif access.access_level == 'ro' and access_level == 'rw':
self.share.deny(access.id)
self.share.allow('ip', instance.internal_ip, access_level)
@ -238,9 +236,8 @@ class _NFSMounter(_ShareHandler):
command = cls._NFS_CHECKS[distro]
remote.execute_command(command, run_as_root=True)
else:
LOG.warning(
_LW("Cannot verify installation of NFS mount tools for "
"unknown distro {distro}.").format(distro=distro))
LOG.warning("Cannot verify installation of NFS mount tools for "
"unknown distro {distro}.".format(distro=distro))
def mount_to_instance(self, remote, share_info):
"""Mounts the share to the instance as configured."""

View File

@ -21,7 +21,6 @@ from oslo_log import log
import six
from sahara import context
from sahara.i18n import _LW
from sahara.utils.openstack import manila
LOG = log.getLogger(__name__)
@ -176,13 +175,12 @@ class _ShareHandler(object):
if accesses:
access = accesses[0]
if access.access_level not in ('ro', 'rw'):
LOG.warning(
_LW("Unknown permission level {access_level} on share "
"id {share_id} for ip {ip}. Leaving pre-existing "
"permissions.").format(
access_level=access.access_level,
share_id=self.share.id,
ip=instance.internal_ip))
LOG.warning("Unknown permission level {access_level} on share "
"id {share_id} for ip {ip}. Leaving pre-existing "
"permissions.".format(
access_level=access.access_level,
share_id=self.share.id,
ip=instance.internal_ip))
elif access.access_level == 'ro' and access_level == 'rw':
self.share.deny(access.id)
self.share.allow('ip', instance.internal_ip, access_level)
@ -237,9 +235,8 @@ class _NFSMounter(_ShareHandler):
command = cls._NFS_CHECKS[distro]
remote.execute_command(command, run_as_root=True)
else:
LOG.warning(
_LW("Cannot verify installation of NFS mount tools for "
"unknown distro {distro}.").format(distro=distro))
LOG.warning("Cannot verify installation of NFS mount tools for "
"unknown distro {distro}.".format(distro=distro))
def mount_to_instance(self, remote, share_info):
"""Mounts the share to the instance as configured."""

View File

@ -26,7 +26,6 @@ from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service import networks
from sahara.service import volumes
from sahara.utils import cluster as cluster_utils
@ -244,12 +243,12 @@ sed '/^Defaults requiretty*/ s/^/#/' -i /etc/sudoers\n
security_group = b.execute_with_retries(client.get, name)
if (security_group.name !=
g.generate_auto_security_group_name(node_group)):
LOG.warning(_LW("Auto security group for node group {name} is "
"not found").format(name=node_group.name))
LOG.warning("Auto security group for node group {name} is "
"not found".format(name=node_group.name))
return
b.execute_with_retries(client.delete, name)
except Exception:
LOG.warning(_LW("Failed to delete security group {name}").format(
LOG.warning("Failed to delete security group {name}".format(
name=name))
def _delete_aa_server_groups(self, cluster):
@ -281,20 +280,20 @@ sed '/^Defaults requiretty*/ s/^/#/' -i /etc/sudoers\n
b.execute_with_retries(networks.delete_floating_ip,
instance.instance_id)
except nova_exceptions.NotFound:
LOG.warning(_LW("Attempted to delete non-existent floating IP "
"in pool {pool} from instance")
LOG.warning("Attempted to delete non-existent floating IP "
"in pool {pool} from instance"
.format(pool=instance.node_group.floating_ip_pool))
try:
volumes.detach_from_instance(instance)
except Exception:
LOG.warning(_LW("Detaching volumes from instance failed"))
LOG.warning("Detaching volumes from instance failed")
try:
b.execute_with_retries(nova.client().servers.delete,
instance.instance_id)
except nova_exceptions.NotFound:
LOG.warning(_LW("Attempted to delete non-existent instance"))
LOG.warning("Attempted to delete non-existent instance")
conductor.instance_remove(context.ctx(), instance)

View File

@ -20,7 +20,6 @@ from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.service import engine as e
from sahara.service.heat import commons as heat_common
from sahara.service.heat import templates as ht
@ -110,8 +109,8 @@ class HeatEngine(e.Engine):
if rollback_info.get('shutdown', False):
self._rollback_cluster_creation(cluster, reason)
LOG.warning(_LW("Cluster creation rollback "
"(reason: {reason})").format(reason=reason))
LOG.warning("Cluster creation rollback "
"(reason: {reason})".format(reason=reason))
return False
@ -120,8 +119,8 @@ class HeatEngine(e.Engine):
if rollback_count or target_count:
self._rollback_cluster_scaling(
cluster, rollback_count, target_count, reason)
LOG.warning(_LW("Cluster scaling rollback "
"(reason: {reason})").format(reason=reason))
LOG.warning("Cluster scaling rollback "
"(reason: {reason})".format(reason=reason))
return True
@ -196,8 +195,8 @@ class HeatEngine(e.Engine):
try:
heat.delete_stack(cluster)
except heat_exc.HTTPNotFound:
LOG.warning(_LW('Did not find stack for cluster. Trying to delete '
'cluster manually.'))
LOG.warning('Did not find stack for cluster. Trying to delete '
'cluster manually.')
# Stack not found. Trying to delete cluster like direct engine
# do it

View File

@ -19,7 +19,6 @@ from oslo_log import log as logging
from sahara import conductor as cond
from sahara import context
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import provisioning as common_configs
from sahara.utils import cluster as c_u
@ -77,7 +76,7 @@ def _configure_ntp_on_instance(instance, url):
with instance.remote() as r:
if not _check_ntp_installed(r):
# missing ntp service
LOG.warning(_LW("Unable to configure NTP service"))
LOG.warning("Unable to configure NTP service")
return
r.append_to_file(

View File

@ -25,7 +25,6 @@ import six
from sahara import conductor as c
from sahara import context
from sahara.i18n import _LW
from sahara.service.api import v10 as api
from sahara.service import coordinator
from sahara.service.edp import job_manager
@ -100,11 +99,11 @@ def terminate_cluster(ctx, cluster, description):
try:
api.terminate_cluster(cluster.id)
except Exception as e:
LOG.warning(_LW(
LOG.warning(
'Failed to terminate {description} cluster in "{status}" '
'state: {error}.').format(error=six.text_type(e),
status=cluster.status,
description=description))
'state: {error}.'.format(error=six.text_type(e),
status=cluster.status,
description=description))
else:
if (cluster.status !=

View File

@ -22,7 +22,6 @@ from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.plugins import provisioning as plugin_base
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils.openstack import base as b
@ -77,16 +76,14 @@ def _check_installed_xfs(instance):
with instance.remote() as r:
distro = _get_os_distrib(r)
if not cmd_map.get(distro):
LOG.warning(
_LW("Cannot verify installation of XFS tools for "
"unknown distro {distro}.").format(distro=distro))
LOG.warning("Cannot verify installation of XFS tools for "
"unknown distro {distro}.".format(distro=distro))
return False
try:
r.execute_command(cmd_map.get(distro), run_as_root=True)
return True
except Exception as e:
LOG.warning(
_LW("Cannot install xfsprogs: {reason}").format(reason=e))
LOG.warning("Cannot install xfsprogs: {reason}".format(reason=e))
return False
@ -202,9 +199,8 @@ def _format_device(
with lock:
formatted_devices.append(device)
except Exception as e:
LOG.warning(
_LW("Device {dev} cannot be formatted: {reason}").format(
dev=device, reason=e))
LOG.warning("Device {dev} cannot be formatted: {reason}".format(
dev=device, reason=e))
cpo.add_fail_event(instance, e)

View File

@ -21,7 +21,6 @@ from oslo_log import log
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.utils.openstack import base as b
from sahara.utils.openstack import nova
from sahara.utils import xmlutils as x
@ -77,7 +76,7 @@ def _read_swift_topology():
(host, path) = line.split()
topology[host] = path
except IOError:
LOG.warning(_LW("Unable to read Swift nodes topology from {config}")
LOG.warning("Unable to read Swift nodes topology from {config}"
.format(config=CONF.swift_topology_file))
return {}

View File

@ -24,7 +24,6 @@ from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.utils.notification import sender
from sahara.utils.openstack import base as auth_base
@ -148,9 +147,8 @@ def etc_hosts_entry_for_service(service):
try:
result = "%s %s\n" % (socket.gethostbyname(hostname), hostname)
except socket.gaierror:
LOG.warning(
_LW("Failed to resolve hostname of service: '{}'").format(service)
)
LOG.warning("Failed to resolve hostname of service: '{}'"
.format(service))
result = "# Failed to resolve {} during deployment\n".format(hostname)
return result

View File

@ -22,8 +22,6 @@ import re
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning)\(\s*(_\(|'|\")")
accepted_log_level = re.compile(
r"^LOG\.(debug|info|exception|warning|error|critical)\(")
@ -53,9 +51,6 @@ def validate_log_translations(logical_line, filename):
msg = "S369: LOG.info messages require translations `_LI()`!"
if log_translation_LI.search(logical_line):
yield (0, msg)
msg = "S371: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.search(logical_line):
yield (0, msg)
def no_translate_debug_logs(logical_line, filename):

View File

@ -24,7 +24,6 @@ from six.moves.urllib import parse as urlparse
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _LW
LOG = logging.getLogger(__name__)
@ -94,12 +93,12 @@ def execute_with_retries(method, *args, **kwargs):
error_code = getattr(e, 'http_status', None) or getattr(
e, 'status_code', None) or getattr(e, 'code', None)
if error_code in ERRORS_TO_RETRY:
LOG.warning(_LW('Occasional error occurred during "{method}" '
'execution: {error_msg} ({error_code}). '
'Operation will be retried.').format(
method=method.__name__,
error_msg=e,
error_code=error_code))
LOG.warning('Occasional error occurred during "{method}" '
'execution: {error_msg} ({error_code}). '
'Operation will be retried.'.format(
method=method.__name__,
error_msg=e,
error_code=error_code))
attempts -= 1
retry_after = getattr(e, 'retry_after', 0)
context.sleep(max(retry_after, CONF.retries.retry_after))

View File

@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from sahara import context
from sahara.i18n import _LW
from sahara.service import sessions
from sahara.utils.openstack import base
from sahara.utils.openstack import keystone
@ -55,10 +54,10 @@ CONF.register_opts(opts, group=cinder_group)
def validate_config():
if CONF.cinder.api_version != 2:
LOG.warning(_LW('Unsupported Cinder API version: {bad}. Please set a '
'correct value for cinder.api_version in your '
'sahara.conf file (currently supported versions are: '
'{supported}). Falling back to Cinder API version 2.')
LOG.warning('Unsupported Cinder API version: {bad}. Please set a '
'correct value for cinder.api_version in your '
'sahara.conf file (currently supported versions are: '
'{supported}). Falling back to Cinder API version 2.'
.format(bad=CONF.cinder.api_version,
supported=[2]))
CONF.set_override('api_version', 2, group='cinder', enforce_type=True)