Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, _LW and _LC translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

It will be a giant patch if all these _LE, _LI, _LW and _LC being deleted
within one patch, so this patch only delete _LI; _LW, _LC and _LE has been
handled in previous patchies. This is the last patch for sahara to delete
log translations.

Change-Id: I73e99ca7fd5bc0cd8df7cce30b5f12e00e70149c
This commit is contained in:
lcsong 2017-04-18 10:56:48 +08:00
parent 6db5f8bc28
commit 8b4af0daf3
24 changed files with 74 additions and 149 deletions

View File

@ -31,13 +31,4 @@ Formatting Guidelines
Sahara uses string formatting defined in `PEP 3101`_ for logs.
Translation Guidelines
----------------------
All log levels except Debug require translation. None of the separate
CLI tools packaged with sahara contain log translations.
* Debug: no translation
* Info: _LI
.. _PEP 3101: https://www.python.org/dev/peps/pep-3101/

View File

@ -13,7 +13,6 @@
# limitations under the License.
from sahara import conductor # noqa
from sahara.i18n import _LI
from sahara.plugins import base as plugins_base
from sahara.utils import remote
@ -89,9 +88,9 @@ class ImageRemote(remote.TerminalOnlyRemote):
def execute_command(self, cmd, run_as_root=False, get_stderr=False,
raise_when_error=True, timeout=300):
try:
LOG.info(_LI("Issuing command: {cmd}").format(cmd=cmd))
LOG.info("Issuing command: {cmd}".format(cmd=cmd))
stdout = self.guest.sh(cmd)
LOG.info(_LI("Received response: {stdout}").format(stdout=stdout))
LOG.info("Received response: {stdout}".format(stdout=stdout))
return 0, stdout
except RuntimeError as ex:
if raise_when_error:

View File

@ -20,7 +20,6 @@ import six
from sahara.cli.image_pack import api
from sahara.i18n import _
from sahara.i18n import _LI
LOG = log.getLogger(__name__)
@ -108,7 +107,7 @@ def main():
CONF.reload_config_files()
log.setup(CONF, "sahara")
LOG.info(_LI("Command: {command}").format(command=' '.join(sys.argv)))
LOG.info("Command: {command}".format(command=' '.join(sys.argv)))
api.set_logger(LOG)
api.set_conf(CONF)
@ -121,5 +120,5 @@ def main():
api.pack_image(CONF.image, plugin, version, image_arguments,
CONF.root_fs, CONF.test_only)
LOG.info(_LI("Finished packing image for {plugin} at version {version}")
.format(plugin=plugin, version=version))
LOG.info("Finished packing image for {plugin} at version "
"{version}".format(plugin=plugin, version=version))

View File

@ -23,7 +23,6 @@ import six
from sahara import conductor
from sahara.db.templates import utils as u
from sahara.i18n import _LI
from sahara.service.validations import cluster_template_schema as clt
from sahara.service.validations import node_group_template_schema as ngt
from sahara.utils import api_validator
@ -397,7 +396,7 @@ def delete_node_group_template(ctx, template, rollback=False):
reason=e,
rollback=rollback_msg))
else:
LOG.info(_LI("Deleted node group template {info}{rollback}").format(
LOG.info("Deleted node group template {info}{rollback}".format(
info=u.name_and_id(template), rollback=rollback_msg))
@ -419,9 +418,8 @@ def reverse_node_group_template_updates(ctx, update_info):
info=u.name_and_id(template),
reason=e))
else:
LOG.info(_LI("Rolled back update for "
"node group template {info}").format(
info=u.name_and_id(template)))
LOG.info("Rolled back update for node group "
"template {info}".format(info=u.name_and_id(template)))
def add_node_group_templates(ctx, node_groups):
@ -466,10 +464,9 @@ def add_node_group_templates(ctx, node_groups):
if template['updated_at'] != current['updated_at']:
ng_info["updated"].append((template, updated_fields))
LOG.info(_LI("Updated node group template {info} "
"from {path}").format(
info=u.name_and_id(template),
path=ng["path"]))
LOG.info("Updated node group template {info} "
"from {path}".format(info=u.name_and_id(template),
path=ng["path"]))
else:
LOG.debug("No change to node group template {info} "
"from {path}".format(
@ -487,9 +484,9 @@ def add_node_group_templates(ctx, node_groups):
raise Handled()
ng_info["created"].append(template)
LOG.info(_LI("Created node group template {info} "
"from {path}").format(info=u.name_and_id(template),
path=ng["path"]))
LOG.info("Created node group template {info} "
"from {path}".format(info=u.name_and_id(template),
path=ng["path"]))
# For the purposes of substitution we need a dict of id by name
ng_info["ids"][template['name']] = template['id']
@ -535,7 +532,7 @@ def delete_cluster_template(ctx, template, rollback=False):
reason=e,
rollback=rollback_msg))
else:
LOG.info(_LI("Deleted cluster template {info}{rollback}").format(
LOG.info("Deleted cluster template {info}{rollback}".format(
info=u.name_and_id(template), rollback=rollback_msg))
@ -557,9 +554,8 @@ def reverse_cluster_template_updates(ctx, update_info):
info=u.name_and_id(template),
reason=e))
else:
LOG.info(_LI("Rolled back update for "
"cluster template {info}").format(
info=u.name_and_id(template)))
LOG.info("Rolled back update for cluster "
"template {info}".format(info=u.name_and_id(template)))
def add_cluster_templates(ctx, clusters, ng_dict):
@ -628,10 +624,9 @@ def add_cluster_templates(ctx, clusters, ng_dict):
if template['updated_at'] != current['updated_at']:
updated.append((template, updated_fields))
LOG.info(_LI("Updated cluster template {info} "
"from {path}").format(
info=u.name_and_id(template),
path=cl['path']))
LOG.info("Updated cluster template {info} "
"from {path}".format(info=u.name_and_id(template),
path=cl['path']))
else:
LOG.debug("No change to cluster template {info} "
"from {path}".format(info=u.name_and_id(current),
@ -649,9 +644,9 @@ def add_cluster_templates(ctx, clusters, ng_dict):
raise Handled()
created.append(template)
LOG.info(_LI("Created cluster template {info} "
"from {path}").format(info=u.name_and_id(template),
path=cl['path']))
LOG.info("Created cluster template {info} "
"from {path}".format(info=u.name_and_id(template),
path=cl['path']))
except Handled:
error = do_reversals(created, updated)

View File

@ -20,7 +20,6 @@ from oslo_log import log
import pkg_resources as pkg
from sahara.db.templates import api
from sahara.i18n import _LI
from sahara import version
LOG = log.getLogger(__name__)
@ -49,8 +48,8 @@ def extra_option_checks():
# will be loaded by Sahara
if not CONF.command.plugin_name:
if "plugins" in CONF and CONF.plugins:
LOG.info(_LI("Using plugin list {plugins} from config"
).format(plugins=CONF.plugins))
LOG.info("Using plugin list {plugins} from "
"config".format(plugins=CONF.plugins))
else:
print("No plugins specified with --plugin-name "
"or config", file=sys.stderr)
@ -199,11 +198,11 @@ def main():
# Since this may be scripted, record the command in the log
# so a user can know exactly what was done
LOG.info(_LI("Command: {command}").format(command=' '.join(sys.argv)))
LOG.info("Command: {command}".format(command=' '.join(sys.argv)))
api.set_logger(LOG)
api.set_conf(CONF)
CONF.command.func()
LOG.info(_LI("Finished {command}").format(command=CONF.command.name))
LOG.info("Finished {command}".format(command=CONF.command.name))

View File

@ -23,10 +23,3 @@ _translators = oslo_i18n.TranslatorFactory(domain='sahara')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info

View File

@ -27,7 +27,6 @@ from sahara.api import acl
from sahara.common import config as common_config
from sahara import config
from sahara import context
from sahara.i18n import _LI
from sahara.plugins import base as plugins_base
from sahara.service import api
from sahara.service.castellan import config as castellan
@ -90,7 +89,7 @@ def setup_common(possible_topdir, service_name):
ds_manager.setup_data_sources()
jb_manager.setup_job_binaries()
LOG.info(_LI('Sahara {service} started').format(service=service_name))
LOG.info('Sahara {service} started'.format(service=service_name))
def validate_castellan_config():
@ -129,7 +128,7 @@ def _load_driver(namespace, name):
name=name,
invoke_on_load=True
)
LOG.info(_LI("Driver {name} successfully loaded").format(name=name))
LOG.info("Driver {name} successfully loaded".format(name=name))
return extension_manager.driver

View File

@ -23,7 +23,6 @@ from stevedore import enabled
from sahara import conductor as cond
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.plugins import labels
from sahara.utils import resources
@ -99,7 +98,7 @@ class PluginManager(object):
_("Plugin with name '%s' already exists.") % ext.name)
ext.obj.name = ext.name
self.plugins[ext.name] = ext.obj
LOG.info(_LI("Plugin {plugin_name} loaded {entry_point}").format(
LOG.info("Plugin {plugin_name} loaded {entry_point}".format(
plugin_name=ext.name,
entry_point=ext.entry_point_target))

View File

@ -21,7 +21,6 @@ import six
from sahara import conductor
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
import sahara.plugins.mapr.abstract.configurer as ac
from sahara.plugins.mapr.domain import distro as d
from sahara.plugins.mapr.domain import service as srvc
@ -92,7 +91,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
self._update_services(cluster_context, existing)
self._restart_services(cluster_context)
self._update_cluster_info(cluster_context)
LOG.info(_LI('Existing instances successfully configured'))
LOG.info('Existing instances successfully configured')
def _configure_services(self, cluster_context, instances):
for service in cluster_context.cluster_services:
@ -185,7 +184,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
util.execute_on_instances(instances, write_topology_data)
LOG.info(_LI('Cluster topology successfully configured'))
LOG.info('Cluster topology successfully configured')
@el.provision_step(_("Write config files to instances"))
def _write_config_files(self, cluster_context, instances):
@ -232,7 +231,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
service=s)
if service_instances:
s.post_install(cluster_context, instances)
LOG.info(_LI('Post install hooks execution successfully executed'))
LOG.info('Post install hooks execution successfully executed')
def _update_cluster_info(self, cluster_context):
LOG.debug('Updating UI information.')
@ -356,7 +355,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
updated = cluster_context.filter_instances(instances,
service=service)
service.post_start(cluster_context, updated)
LOG.info(_LI('Post start hooks successfully executed'))
LOG.info('Post start hooks successfully executed')
@el.provision_step(_("Set cluster mode"))
def _set_cluster_mode(self, cluster_context, instances):
@ -399,7 +398,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
LOG.debug('Executing post configure.sh hooks')
for service in cluster_context.cluster_services:
service.post_configure_sh(cluster_context, instances)
LOG.info(_LI('Post configure.sh hooks successfully executed'))
LOG.info('Post configure.sh hooks successfully executed')
def _post_configure_services(self, cluster_context, instances):
for service in cluster_context.cluster_services:

View File

@ -21,7 +21,6 @@ from oslo_utils import timeutils
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
import sahara.plugins.exceptions as ex
import sahara.plugins.mapr.abstract.node_manager as s
import sahara.plugins.mapr.services.management.management as mng
@ -57,7 +56,7 @@ class BaseNodeManager(s.AbstractNodeManager):
ec, out = r.execute_command(command, run_as_root=True)
command = MOVE_NODE_CMD % out.strip()
cldb_remote.execute_command(command, run_as_root=True)
LOG.info(_LI("Nodes successfully moved"))
LOG.info("Nodes successfully moved")
def remove_nodes(self, cluster_context, instances):
LOG.debug("Removing nodes from cluster")
@ -72,7 +71,7 @@ class BaseNodeManager(s.AbstractNodeManager):
}
command = REMOVE_NODE_CMD % args
cldb_remote.execute_command(command, run_as_root=True)
LOG.info(_LI("Nodes successfully removed"))
LOG.info("Nodes successfully removed")
def start(self, cluster_context, instances=None):
instances = instances or cluster_context.get_instances()
@ -148,17 +147,17 @@ class BaseNodeManager(s.AbstractNodeManager):
def _start_zk_nodes(self, instances):
LOG.debug('Starting ZooKeeper nodes')
self._start_nodes(instances, mng.ZOOKEEPER.ui_name)
LOG.info(_LI('ZooKeeper nodes successfully started'))
LOG.info('ZooKeeper nodes successfully started')
def _start_cldb_nodes(self, instances):
LOG.debug('Starting CLDB nodes')
self._start_nodes(instances, WARDEN_SERVICE)
LOG.info(_LI('CLDB nodes successfully started'))
LOG.info('CLDB nodes successfully started')
def _start_non_cldb_nodes(self, instances):
LOG.debug('Starting non-control nodes')
self._start_nodes(instances, WARDEN_SERVICE)
LOG.info(_LI('Non-control nodes successfully started'))
LOG.info('Non-control nodes successfully started')
def _stop_zk_nodes(self, instances):
self._stop_nodes(instances, mng.ZOOKEEPER.ui_name)

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
import sahara.plugins.mapr.domain.configuration_file as bcf
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
@ -113,7 +112,7 @@ class MapRFS(s.Service):
for instance in file_servers:
tg.spawn('init-mfs-%s' % instance.id,
self._init_mfs_instance, instance)
LOG.info(_LI('MapR FS successfully initialized'))
LOG.info('MapR FS successfully initialized')
@el.provision_event(instance_reference=1)
def _init_mfs_instance(self, instance):

View File

@ -22,7 +22,6 @@ from oslo_log import log as logging
from sahara import conductor
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.plugins import exceptions as ex
from sahara.plugins import provisioning as p
from sahara.plugins import recommendations_utils as ru
@ -136,7 +135,7 @@ class SparkProvider(p.ProvisioningPluginBase):
def _start_spark(self, cluster, sm_instance):
with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster))
LOG.info(_LI("Spark service has been started"))
LOG.info("Spark service has been started")
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
@ -149,7 +148,7 @@ class SparkProvider(p.ProvisioningPluginBase):
self._start_datanode_processes(dn_instances)
run.await_datanodes(cluster)
LOG.info(_LI("Hadoop services have been started"))
LOG.info("Hadoop services have been started")
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
@ -160,7 +159,7 @@ class SparkProvider(p.ProvisioningPluginBase):
self.start_spark(cluster)
swift_helper.install_ssl_certs(utils.get_instances(cluster))
LOG.info(_LI('Cluster has been started successfully'))
LOG.info('Cluster has been started successfully')
self._set_cluster_info(cluster)
def _spark_home(self, cluster):
@ -456,7 +455,7 @@ class SparkProvider(p.ProvisioningPluginBase):
swift_helper.install_ssl_certs(instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info(_LI("Spark master service has been restarted"))
LOG.info("Spark master service has been restarted")
def _get_scalable_processes(self):
return ["datanode", "slave"]

View File

@ -20,7 +20,6 @@ import yaml
from sahara import conductor
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.plugins import exceptions as ex
from sahara.plugins import provisioning as p
from sahara.plugins.storm import config_helper as c_helper
@ -100,8 +99,8 @@ class StormProvider(p.ProvisioningPluginBase):
# start storm slaves
self._start_slave_processes(sl_instances)
LOG.info(_LI('Cluster {cluster} has been started successfully').format(
cluster=cluster.name))
LOG.info("Cluster {cluster} has been started successfully".format(
cluster=cluster.name))
self._set_cluster_info(cluster)
def get_edp_engine(self, cluster, job_type):
@ -180,8 +179,8 @@ class StormProvider(p.ProvisioningPluginBase):
def _start_storm_master(self, sm_instance):
with remote.get_remote(sm_instance) as r:
run.start_storm_nimbus_and_ui(r)
LOG.info(_LI("Storm master at {host} has been started").format(
host=sm_instance.hostname()))
LOG.info("Storm master at {host} has been started".format(
host=sm_instance.hostname()))
def _start_slave_processes(self, sl_instances):
if len(sl_instances) == 0:
@ -370,7 +369,7 @@ class StormProvider(p.ProvisioningPluginBase):
# start storm slaves
self._start_slave_processes(instances)
self.rebalance_topology(cluster)
LOG.info(_LI("Storm scaling has been started."))
LOG.info("Storm scaling has been started.")
def _get_scalable_processes(self):
return ["supervisor"]

View File

@ -19,7 +19,6 @@ from oslo_log import log as logging
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.plugins import utils as pu
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper
@ -248,5 +247,5 @@ def start_hiveserver_process(pctx, instance):
r.write_file_to('/tmp/create_hive_db.sql', sql_script)
_hive_create_db(r)
_hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at {host} has been "
"started").format(host=instance.hostname()))
LOG.info("Hive Metastore server at {host} has been "
"started".format(host=instance.hostname()))

View File

@ -21,8 +21,6 @@ from oslo_log import log
from oslo_utils import uuidutils
from tooz import coordination
from sahara.i18n import _LI
LOG = log.getLogger(__name__)
coordinator_opts = [
@ -51,7 +49,7 @@ class Coordinator(object):
self.coordinator = coordination.get_coordinator(
backend_url, self.member_id)
self.coordinator.start()
LOG.info(_LI('Coordination backend loaded successfully.'))
LOG.info('Coordination backend loaded successfully.')
except coordination.ToozError:
LOG.error('Error connecting to coordination backend.')
raise

View File

@ -23,7 +23,6 @@ from stevedore import enabled
from sahara import conductor as cond
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
conductor = cond.API
@ -51,9 +50,8 @@ class DataSourceManager(object):
ext.name)
ext.obj.name = ext.name
self.data_sources[ext.name] = ext.obj
LOG.info(_LI("Data source name {ds_name} loaded {entry_point}")
.format(ds_name=ext.name,
entry_point=ext.entry_point_target))
LOG.info("Data source name {ds_name} loaded {entry_point}".format(
ds_name=ext.name, entry_point=ext.entry_point_target))
if len(self.data_sources) < len(config_ds):
loaded_ds = set(six.iterkeys(self.data_sources))

View File

@ -23,7 +23,6 @@ from stevedore import enabled
from sahara import conductor as cond
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
conductor = cond.API
@ -51,9 +50,8 @@ class JobBinaryManager(object):
ext.name)
ext.obj.name = ext.name
self.job_binaries[ext.name] = ext.obj
LOG.info(_LI("Job binary name {jb_name} loaded {entry_point}")
.format(jb_name=ext.name,
entry_point=ext.entry_point_target))
LOG.info("Job binary name {jb_name} loaded {entry_point}".format(
jb_name=ext.name, entry_point=ext.entry_point_target))
if len(self.job_binaries) < len(config_jb):
loaded_jb = set(six.iterkeys(self.job_binaries))

View File

@ -23,7 +23,6 @@ from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.service.edp.spark import engine as spark_engine
@ -94,8 +93,8 @@ def _run_job(job_execution_id):
job_execution = conductor.job_execution_get(ctx, job_execution_id)
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster is None or cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
LOG.info(_LI("Can not run this job on a non-existant cluster or a"
" inactive cluster."))
LOG.info("Can not run this job on a non-existant cluster or a "
"inactive cluster.")
return
eng = get_job_engine(cluster, job_execution)
@ -154,12 +153,11 @@ def cancel_job(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
if job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED:
LOG.info(_LI("Job execution is already finished and shouldn't be"
" canceled"))
LOG.info("Job execution is already finished and shouldn't be canceled")
return job_execution
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster is None:
LOG.info(_LI("Can not cancel this job on a non-existant cluster."))
LOG.info("Can not cancel this job on a non-existant cluster.")
return job_execution
engine = get_job_engine(cluster, job_execution)
if engine is not None:
@ -179,18 +177,18 @@ def cancel_job(job_execution_id):
"{error}".format(error=ex))
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
LOG.info(_LI("Job execution was canceled successfully"))
LOG.info("Job execution was canceled successfully")
return job_execution
context.sleep(3)
job_execution = conductor.job_execution_get(
ctx, job_execution_id)
if not job_execution:
LOG.info(_LI("Job execution was deleted. "
"Canceling current operation."))
LOG.info("Job execution was deleted. "
"Canceling current operation.")
return job_execution
else:
LOG.info(_LI("Job execution status: {status}").format(
status=job_execution.info['status']))
LOG.info("Job execution status: {status}").format(
status=job_execution.info['status'])
return job_execution
else:
raise e.CancelingFailed(_('Job execution %s was not canceled')
@ -256,7 +254,7 @@ def suspend_job(job_execution_id):
"{error}")).format(error=ex)
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
LOG.info(_LI("Job execution was suspended successfully"))
LOG.info("Job execution was suspended successfully")
return job_execution
conductor.job_execution_update(

View File

@ -25,7 +25,6 @@ import six
from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.service import networks
from sahara.service import volumes
from sahara.utils import cluster as cluster_utils
@ -94,8 +93,7 @@ class Engine(object):
ips_assigned = set()
self._ips_assign(ips_assigned, cluster, instances)
LOG.info(
_LI("All instances have IPs assigned"))
LOG.info("All instances have IPs assigned")
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = cluster_utils.get_instances(cluster, ips_assigned)
@ -109,7 +107,7 @@ class Engine(object):
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info(_LI("All instances are accessible"))
LOG.info("All instances are accessible")
@poll_utils.poll_status(
'wait_until_accessible', _("Wait for instance accessibility"),

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from sahara import conductor as cond
from sahara import context
from sahara.i18n import _LI
from sahara.plugins import provisioning as common_configs
from sahara.utils import cluster as c_u
@ -87,7 +86,7 @@ def _configure_ntp_on_instance(instance, url):
_sudo(r, "ntpdate -u {url}".format(url=url))
except Exception as e:
LOG.debug("Update time on VM failed with error: %s", e)
LOG.info(_LI("NTP successfully configured"))
LOG.info("NTP successfully configured")
def is_ntp_enabled(cluster):

View File

@ -17,7 +17,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from sahara import context
from sahara.i18n import _LI
from sahara.swift import utils as su
from sahara.utils import xmlutils as x
@ -63,8 +62,8 @@ def get_swift_configs():
conf['value'] = CONF.os_region_name
result = [cfg for cfg in configs if cfg['value']]
LOG.info(_LI("Swift would be integrated with the following "
"params: {result}").format(result=result))
LOG.info("Swift would be integrated with the following "
"params: {result}".format(result=result))
return result

View File

@ -23,7 +23,6 @@ from six.moves.urllib import parse
from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _LI
from sahara.utils.notification import sender
from sahara.utils.openstack import base as auth_base
@ -96,8 +95,8 @@ def change_cluster_status(cluster, status, status_description=None):
cluster = conductor.cluster_update(ctx, cluster, update_dict)
conductor.cluster_provision_progress_update(ctx, cluster.id)
LOG.info(_LI("Cluster status has been changed. New status="
"{status}").format(status=cluster.status))
LOG.info("Cluster status has been changed. New status="
"{status}".format(status=cluster.status))
sender.status_notify(cluster.id, cluster.name, cluster.status,
"update")

View File

@ -133,7 +133,6 @@ def factory(register):
register(import_checks.hacking_import_groups)
register(import_checks.hacking_import_groups_together)
register(dict_constructor_with_list_copy)
register(logging_checks.validate_log_translations)
register(logging_checks.no_translate_debug_logs)
register(logging_checks.accepted_log_levels)
register(use_jsonutils)

View File

@ -20,39 +20,10 @@ import re
# keystone but it will need additional work and total checks refactoring.
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
accepted_log_level = re.compile(
r"^LOG\.(debug|info|exception|warning|error|critical)\(")
def validate_log_translations(logical_line, filename):
"""Check if log levels has translations and if it's correct.
S369
S370
S371
S372
"""
# NOTE(Kezar): sahara/tests included because we don't require translations
# in tests. sahara/db/templates provide separate cli interface so we don't
# want to translate it.
ignore_dirs = ["sahara/db/templates",
"sahara/tests"]
for directory in ignore_dirs:
if directory in filename:
return
# Translations are not required in the test directory.
# This will not catch all instances of violations, just direct
# misuse of the form LOG.info('Message').
msg = "S369: LOG.info messages require translations `_LI()`!"
if log_translation_LI.search(logical_line):
yield (0, msg)
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('