Merge "Handle log message interpolation by the logger in common/strategies/"

This commit is contained in:
Jenkins 2017-06-06 21:36:11 +00:00 committed by Gerrit Code Review
commit 5d05c68e26
13 changed files with 124 additions and 117 deletions

View File

@ -169,7 +169,7 @@ class CassandraCluster(models.Cluster):
return "%s-member-%s-%s-%d" % (cluster_name, dc, rack, instance_idx)
def grow(self, instances):
LOG.debug("Processing a request for growing cluster: %s" % self.id)
LOG.debug("Processing a request for growing cluster: %s", self.id)
self.validate_cluster_available()
@ -193,7 +193,7 @@ class CassandraCluster(models.Cluster):
return CassandraCluster(context, db_info, datastore, datastore_version)
def shrink(self, removal_ids):
LOG.debug("Processing a request for shrinking cluster: %s" % self.id)
LOG.debug("Processing a request for shrinking cluster: %s", self.id)
self.validate_cluster_available()

View File

@ -42,70 +42,70 @@ class CassandraGuestAgentAPI(guest_api.API):
"""
def get_data_center(self):
LOG.debug("Retrieving the data center for node: %s" % self.id)
LOG.debug("Retrieving the data center for node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("get_data_center", guest_api.AGENT_LOW_TIMEOUT,
version=version)
def get_rack(self):
LOG.debug("Retrieving the rack for node: %s" % self.id)
LOG.debug("Retrieving the rack for node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("get_rack", guest_api.AGENT_LOW_TIMEOUT,
version=version)
def set_seeds(self, seeds):
LOG.debug("Configuring the gossip seeds for node: %s" % self.id)
LOG.debug("Configuring the gossip seeds for node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("set_seeds", guest_api.AGENT_LOW_TIMEOUT,
version=version, seeds=seeds)
def get_seeds(self):
LOG.debug("Retrieving the gossip seeds for node: %s" % self.id)
LOG.debug("Retrieving the gossip seeds for node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("get_seeds", guest_api.AGENT_LOW_TIMEOUT,
version=version)
def set_auto_bootstrap(self, enabled):
LOG.debug("Setting the auto-bootstrap to '%s' for node: %s"
% (enabled, self.id))
LOG.debug("Setting the auto-bootstrap to '%(enabled)s' "
"for node: %(id)s", {'enabled': enabled, 'id': self.id})
version = guest_api.API.API_BASE_VERSION
return self._call("set_auto_bootstrap", guest_api.AGENT_LOW_TIMEOUT,
version=version, enabled=enabled)
def cluster_complete(self):
LOG.debug("Sending a setup completion notification for node: %s"
% self.id)
LOG.debug("Sending a setup completion notification for node: %s",
self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
version=version)
def node_cleanup_begin(self):
LOG.debug("Signaling the node to prepare for cleanup: %s" % self.id)
LOG.debug("Signaling the node to prepare for cleanup: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("node_cleanup_begin", guest_api.AGENT_LOW_TIMEOUT,
version=version)
def node_cleanup(self):
LOG.debug("Running cleanup on node: %s" % self.id)
LOG.debug("Running cleanup on node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._cast('node_cleanup', version=version)
def node_decommission(self):
LOG.debug("Decommission node: %s" % self.id)
LOG.debug("Decommission node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._cast("node_decommission", version=version)
def cluster_secure(self, password):
LOG.debug("Securing the cluster via node: %s" % self.id)
LOG.debug("Securing the cluster via node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call(
@ -113,14 +113,14 @@ class CassandraGuestAgentAPI(guest_api.API):
version=version, password=password)
def get_admin_credentials(self):
LOG.debug("Retrieving the admin credentials from node: %s" % self.id)
LOG.debug("Retrieving the admin credentials from node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("get_admin_credentials", guest_api.AGENT_LOW_TIMEOUT,
version=version)
def store_admin_credentials(self, admin_credentials):
LOG.debug("Storing the admin credentials on node: %s" % self.id)
LOG.debug("Storing the admin credentials on node: %s", self.id)
version = guest_api.API.API_BASE_VERSION
return self._call("store_admin_credentials",

View File

@ -45,7 +45,7 @@ class CassandraTaskManagerStrategy(base.BaseTaskManagerStrategy):
class CassandraClusterTasks(task_models.ClusterTasks):
def create_cluster(self, context, cluster_id):
LOG.debug("Begin create_cluster for id: %s." % cluster_id)
LOG.debug("Begin create_cluster for id: %s.", cluster_id)
def _create_cluster():
cluster_node_ids = self.find_cluster_node_ids(cluster_id)
@ -64,10 +64,10 @@ class CassandraClusterTasks(task_models.ClusterTasks):
# Once all nodes are configured, start the seed nodes one at a time
# followed by the rest of the nodes.
try:
LOG.debug("Selected seed nodes: %s" % seeds)
LOG.debug("Selected seed nodes: %s", seeds)
for node in cluster_nodes:
LOG.debug("Configuring node: %s." % node['id'])
LOG.debug("Configuring node: %s.", node['id'])
node['guest'].set_seeds(seeds)
node['guest'].set_auto_bootstrap(False)
@ -113,7 +113,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("End create_cluster for id: %s." % cluster_id)
LOG.debug("End create_cluster for id: %s.", cluster_id)
@classmethod
def find_cluster_node_ids(cls, cluster_id):
@ -175,7 +175,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
return ips_by_affinity
def grow_cluster(self, context, cluster_id, new_instance_ids):
LOG.debug("Begin grow_cluster for id: %s." % cluster_id)
LOG.debug("Begin grow_cluster for id: %s.", cluster_id)
def _grow_cluster():
# Wait for new nodes to get to cluster-ready status.
@ -223,7 +223,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
seeds = self.choose_seed_nodes(cluster_nodes)
# Configure each cluster node with the updated list of seeds.
LOG.debug("Updating all nodes with new seeds: %s" % seeds)
LOG.debug("Updating all nodes with new seeds: %s", seeds)
for node in cluster_nodes:
node['guest'].set_seeds(seeds)
@ -237,10 +237,10 @@ class CassandraClusterTasks(task_models.ClusterTasks):
node['guest'].node_cleanup_begin()
node['guest'].node_cleanup()
LOG.debug("Waiting for node to finish its "
"cleanup: %s" % nid)
"cleanup: %s", nid)
if not self._all_instances_running([nid], cluster_id):
LOG.warning(_("Node did not complete cleanup "
"successfully: %s") % nid)
"successfully: %s"), nid)
LOG.debug("Cluster configuration finished successfully.")
except Exception:
@ -259,10 +259,10 @@ class CassandraClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("End grow_cluster for id: %s." % cluster_id)
LOG.debug("End grow_cluster for id: %s.", cluster_id)
def shrink_cluster(self, context, cluster_id, removal_ids):
LOG.debug("Begin shrink_cluster for id: %s." % cluster_id)
LOG.debug("Begin shrink_cluster for id: %s.", cluster_id)
def _shrink_cluster():
cluster_node_ids = self.find_cluster_node_ids(cluster_id)
@ -304,9 +304,9 @@ class CassandraClusterTasks(task_models.ClusterTasks):
remaining_nodes = [node for node in cluster_nodes
if node['id'] not in removal_ids]
seeds = self.choose_seed_nodes(remaining_nodes)
LOG.debug("Selected seed nodes: %s" % seeds)
LOG.debug("Selected seed nodes: %s", seeds)
for node in remaining_nodes:
LOG.debug("Configuring node: %s." % node['id'])
LOG.debug("Configuring node: %s.", node['id'])
node['guest'].set_seeds(seeds)
# Wait for the removed nodes to go SHUTDOWN.
@ -339,7 +339,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("End shrink_cluster for id: %s." % cluster_id)
LOG.debug("End shrink_cluster for id: %s.", cluster_id)
def restart_cluster(self, context, cluster_id):
self.rolling_restart_cluster(

View File

@ -151,7 +151,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
return cls(context, db_info, datastore, datastore_version)
def grow(self, instances):
LOG.debug("Growing cluster %s." % self.id)
LOG.debug("Growing cluster %s.", self.id)
self.validate_cluster_available()
@ -179,7 +179,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
def shrink(self, instances):
"""Removes instances from a cluster."""
LOG.debug("Shrinking cluster %s." % self.id)
LOG.debug("Shrinking cluster %s.", self.id)
self.validate_cluster_available()
removal_instances = [Instance.load(self.context, inst_id)

View File

@ -68,7 +68,7 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
return config_rendered
def create_cluster(self, context, cluster_id):
LOG.debug("Begin create_cluster for id: %s." % cluster_id)
LOG.debug("Begin create_cluster for id: %s.", cluster_id)
def _create_cluster():
# Fetch instances by cluster_id against instances table.
@ -146,12 +146,12 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
LOG.exception(_("Timeout for building cluster."))
self.update_statuses_on_failure(cluster_id)
except TroveError:
LOG.exception(_("Error creating cluster %s.") % cluster_id)
LOG.exception(_("Error creating cluster %s."), cluster_id)
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("End create_cluster for id: %s." % cluster_id)
LOG.debug("End create_cluster for id: %s.", cluster_id)
def _check_cluster_for_root(self, context, existing_instances,
new_instances):
@ -164,7 +164,7 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
return
def grow_cluster(self, context, cluster_id, new_instance_ids):
LOG.debug("Begin Galera grow_cluster for id: %s." % cluster_id)
LOG.debug("Begin Galera grow_cluster for id: %s.", cluster_id)
def _grow_cluster():
@ -249,16 +249,16 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
except Exception:
LOG.exception(_("Error growing cluster %s.") % cluster_id)
LOG.exception(_("Error growing cluster %s."), cluster_id)
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
finally:
timeout.cancel()
LOG.debug("End grow_cluster for id: %s." % cluster_id)
LOG.debug("End grow_cluster for id: %s.", cluster_id)
def shrink_cluster(self, context, cluster_id, removal_instance_ids):
LOG.debug("Begin Galera shrink_cluster for id: %s." % cluster_id)
LOG.debug("Begin Galera shrink_cluster for id: %s.", cluster_id)
def _shrink_cluster():
removal_instances = [Instance.load(context, instance_id)
@ -277,7 +277,7 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
set(non_deleted_ids))
)
try:
LOG.info(_("Deleting instances (%s)") % removal_instance_ids)
LOG.info(_("Deleting instances (%s)"), removal_instance_ids)
utils.poll_until(all_instances_marked_deleted,
sleep_time=2,
time_out=CONF.cluster_delete_time_out)
@ -320,13 +320,13 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
except Exception:
LOG.exception(_("Error shrinking cluster %s.") % cluster_id)
LOG.exception(_("Error shrinking cluster %s."), cluster_id)
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
finally:
timeout.cancel()
LOG.debug("End shrink_cluster for id: %s." % cluster_id)
LOG.debug("End shrink_cluster for id: %s.", cluster_id)
def restart_cluster(self, context, cluster_id):
self.rolling_restart_cluster(context, cluster_id)

View File

@ -47,9 +47,9 @@ class MongoDbGuestAgentAPI(guest_api.API):
def add_shard(self, replica_set_name, replica_set_member):
LOG.debug("Adding shard with replSet %(replica_set_name)s and member "
"%(replica_set_member)s for instance "
"%(id)s" % {'replica_set_name': replica_set_name,
'replica_set_member': replica_set_member,
'id': self.id})
"%(id)s", {'replica_set_name': replica_set_name,
'replica_set_member': replica_set_member,
'id': self.id})
version = guest_api.API.API_BASE_VERSION
return self._call("add_shard", guest_api.AGENT_HIGH_TIMEOUT,
@ -58,7 +58,7 @@ class MongoDbGuestAgentAPI(guest_api.API):
replica_set_member=replica_set_member)
def add_members(self, members):
LOG.debug("Adding members %(members)s on instance %(id)s" % {
LOG.debug("Adding members %(members)s on instance %(id)s", {
'members': members, 'id': self.id})
version = guest_api.API.API_BASE_VERSION
@ -67,8 +67,8 @@ class MongoDbGuestAgentAPI(guest_api.API):
def add_config_servers(self, config_servers):
LOG.debug("Adding config servers %(config_servers)s for instance "
"%(id)s" % {'config_servers': config_servers,
'id': self.id})
"%(id)s", {'config_servers': config_servers,
'id': self.id})
version = guest_api.API.API_BASE_VERSION
return self._call("add_config_servers", guest_api.AGENT_HIGH_TIMEOUT,
@ -129,7 +129,7 @@ class MongoDbGuestAgentAPI(guest_api.API):
version=version)
def is_shard_active(self, replica_set_name):
LOG.debug("Checking if replica set %s is active" % replica_set_name)
LOG.debug("Checking if replica set %s is active", replica_set_name)
version = guest_api.API.API_BASE_VERSION
return self._call("is_shard_active",

View File

@ -61,20 +61,20 @@ class MongoDbTaskManagerStrategy(base.BaseTaskManagerStrategy):
class MongoDbClusterTasks(task_models.ClusterTasks):
def create_cluster(self, context, cluster_id):
LOG.debug("begin create_cluster for id: %s" % cluster_id)
LOG.debug("begin create_cluster for id: %s", cluster_id)
def _create_cluster():
# fetch instances by cluster_id against instances table
db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
instance_ids = [db_instance.id for db_instance in db_instances]
LOG.debug("instances in cluster %s: %s" % (cluster_id,
instance_ids))
LOG.debug("instances in cluster %(cluster_id)s: %(instance_ids)s",
{'cluster_id': cluster_id, 'instance_ids': instance_ids})
if not self._all_instances_ready(instance_ids, cluster_id):
return
LOG.debug("all instances in cluster %s ready." % cluster_id)
LOG.debug("all instances in cluster %s ready.", cluster_id)
instances = [Instance.load(context, instance_id) for instance_id
in instance_ids]
@ -82,17 +82,17 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
# filter query routers in instances into a new list: query_routers
query_routers = [instance for instance in instances if
instance.type == 'query_router']
LOG.debug("query routers: %s" %
LOG.debug("query routers: %s",
[instance.id for instance in query_routers])
# filter config servers in instances into new list: config_servers
config_servers = [instance for instance in instances if
instance.type == 'config_server']
LOG.debug("config servers: %s" %
LOG.debug("config servers: %s",
[instance.id for instance in config_servers])
# filter members (non router/configsvr) into a new list: members
members = [instance for instance in instances if
instance.type == 'member']
LOG.debug("members: %s" %
LOG.debug("members: %s",
[instance.id for instance in members])
# for config_server in config_servers, append ip/hostname to
@ -100,7 +100,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
# peel off the replica-set name and ip/hostname from 'x'
config_server_ips = [self.get_ip(instance)
for instance in config_servers]
LOG.debug("config server ips: %s" % config_server_ips)
LOG.debug("config server ips: %s", config_server_ips)
if not self._add_query_routers(query_routers,
config_server_ips):
@ -126,21 +126,22 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("end create_cluster for id: %s" % cluster_id)
LOG.debug("end create_cluster for id: %s", cluster_id)
def add_shard_cluster(self, context, cluster_id, shard_id,
replica_set_name):
LOG.debug("begin add_shard_cluster for cluster %s shard %s"
% (cluster_id, shard_id))
LOG.debug("begin add_shard_cluster for cluster %(cluster_id)s "
"shard %(shard_id)s", {'cluster_id': cluster_id,
'shard_id': shard_id})
def _add_shard_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id,
shard_id=shard_id).all()
instance_ids = [db_instance.id for db_instance in db_instances]
LOG.debug("instances in shard %s: %s" % (shard_id,
instance_ids))
LOG.debug("instances in shard %(shard_id)s: %(instance_ids)s",
{'shard_id': shard_id, 'instance_ids': instance_ids})
if not self._all_instances_ready(instance_ids, cluster_id,
shard_id):
return
@ -173,11 +174,12 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("end add_shard_cluster for cluster %s shard %s"
% (cluster_id, shard_id))
LOG.debug("end add_shard_cluster for cluster %(cluster_id)s "
"shard %(shard_id)s", {'cluster_id': cluster_id,
'shard_id': shard_id})
def grow_cluster(self, context, cluster_id, instance_ids):
LOG.debug("begin grow_cluster for MongoDB cluster %s" % cluster_id)
LOG.debug("begin grow_cluster for MongoDB cluster %s", cluster_id)
def _grow_cluster():
new_instances = [db_instance for db_instance in self.db_instances
@ -194,8 +196,10 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
if not query_router_id:
return
for shard_id in shard_ids:
LOG.debug('growing cluster by adding shard %s on query '
'router %s' % (shard_id, query_router_id))
LOG.debug('growing cluster by adding shard %(shard_id)s '
'on query router %(router_id)s',
{'shard_id': shard_id,
'router_id': query_router_id})
member_ids = [db_instance.id for db_instance in new_members
if db_instance.shard_id == shard_id]
if not self._all_instances_ready(
@ -214,9 +218,10 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
config_servers_ids = [db_instance.id for db_instance
in self.db_instances
if db_instance.type == 'config_server']
LOG.debug('growing cluster by adding query routers %s, '
'with config servers %s'
% (query_router_ids, config_servers_ids))
LOG.debug('growing cluster by adding query routers '
'%(router)s, with config servers %(server)s',
{'router': query_router_ids,
'server': config_servers_ids})
if not self._all_instances_ready(
query_router_ids, cluster_id
):
@ -249,10 +254,10 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("end grow_cluster for MongoDB cluster %s" % self.id)
LOG.debug("end grow_cluster for MongoDB cluster %s", self.id)
def shrink_cluster(self, context, cluster_id, instance_ids):
LOG.debug("begin shrink_cluster for MongoDB cluster %s" % cluster_id)
LOG.debug("begin shrink_cluster for MongoDB cluster %s", cluster_id)
def _shrink_cluster():
def all_instances_marked_deleted():
@ -284,7 +289,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("end shrink_cluster for MongoDB cluster %s" % self.id)
LOG.debug("end shrink_cluster for MongoDB cluster %s", self.id)
def get_cluster_admin_password(self, context):
"""The cluster admin's user credentials are stored on all query
@ -297,7 +302,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
"""Initialize the replica set by calling the primary member guest's
add_members.
"""
LOG.debug('initializing replica set on %s' % primary_member.id)
LOG.debug('initializing replica set on %s', primary_member.id)
other_members_ips = []
try:
for member in other_members:
@ -321,8 +326,10 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
if not self._init_replica_set(primary_member, other_members):
return False
replica_set = self.get_guest(primary_member).get_replica_set_name()
LOG.debug('adding replica set %s as shard %s to cluster %s'
% (replica_set, primary_member.shard_id, self.id))
LOG.debug('adding replica set %(replica_set)s as shard %(shard_id)s '
'to cluster %(cluster_id)s',
{'replica_set': replica_set,
'shard_id': primary_member.shard_id, 'cluster_id': self.id})
try:
self.get_guest(query_router).add_shard(
replica_set, self.get_ip(primary_member))
@ -352,13 +359,13 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
generated password, else the password needs to be retrieved from
and existing query router.
"""
LOG.debug('adding new query router(s) %s with config server '
'ips %s' % ([i.id for i in query_routers],
config_server_ips))
LOG.debug('adding new query router(s) %(routers)s with config server '
'ips %(ips)s', {'routers': [i.id for i in query_routers],
'ips': config_server_ips})
for query_router in query_routers:
try:
LOG.debug("calling add_config_servers on query router %s"
% query_router.id)
LOG.debug("calling add_config_servers on query router %s",
query_router.id)
guest = self.get_guest(query_router)
guest.add_config_servers(config_server_ips)
if not admin_password:
@ -378,7 +385,7 @@ class MongoDbTaskManagerAPI(task_api.API):
def mongodb_add_shard_cluster(self, cluster_id, shard_id,
replica_set_name):
LOG.debug("Making async call to add shard cluster %s " % cluster_id)
LOG.debug("Making async call to add shard cluster %s ", cluster_id)
version = task_api.API.API_BASE_VERSION
cctxt = self.client.prepare(version=version)
cctxt.cast(self.context,

View File

@ -43,7 +43,7 @@ class RedisTaskManagerStrategy(base.BaseTaskManagerStrategy):
class RedisClusterTasks(task_models.ClusterTasks):
def create_cluster(self, context, cluster_id):
LOG.debug("Begin create_cluster for id: %s." % cluster_id)
LOG.debug("Begin create_cluster for id: %s.", cluster_id)
def _create_cluster():
@ -100,10 +100,10 @@ class RedisClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("End create_cluster for id: %s." % cluster_id)
LOG.debug("End create_cluster for id: %s.", cluster_id)
def grow_cluster(self, context, cluster_id, new_instance_ids):
LOG.debug("Begin grow_cluster for id: %s." % cluster_id)
LOG.debug("Begin grow_cluster for id: %s.", cluster_id)
def _grow_cluster():
@ -144,12 +144,12 @@ class RedisClusterTasks(task_models.ClusterTasks):
LOG.exception(_("Timeout for growing cluster."))
self.update_statuses_on_failure(cluster_id)
except Exception:
LOG.exception(_("Error growing cluster %s.") % cluster_id)
LOG.exception(_("Error growing cluster %s."), cluster_id)
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("End grow_cluster for id: %s." % cluster_id)
LOG.debug("End grow_cluster for id: %s.", cluster_id)
class RedisTaskManagerAPI(task_api.API):

View File

@ -42,14 +42,14 @@ class VerticaGuestAgentAPI(guest_api.API):
"""
def get_public_keys(self, user):
LOG.debug("Getting public keys for user: %s." % user)
LOG.debug("Getting public keys for user: %s.", user)
version = guest_api.API.API_BASE_VERSION
return self._call("get_public_keys", guest_api.AGENT_HIGH_TIMEOUT,
version=version, user=user)
def authorize_public_keys(self, user, public_keys):
LOG.debug("Authorizing public keys for user: %s." % user)
LOG.debug("Authorizing public keys for user: %s.", user)
version = guest_api.API.API_BASE_VERSION
return self._call("authorize_public_keys",
@ -58,28 +58,28 @@ class VerticaGuestAgentAPI(guest_api.API):
user=user, public_keys=public_keys)
def install_cluster(self, members):
LOG.debug("Installing Vertica cluster on members: %s." % members)
LOG.debug("Installing Vertica cluster on members: %s.", members)
version = guest_api.API.API_BASE_VERSION
return self._call("install_cluster", CONF.cluster_usage_timeout,
version=version, members=members)
def grow_cluster(self, members):
LOG.debug("Growing Vertica cluster with members: %s." % members)
LOG.debug("Growing Vertica cluster with members: %s.", members)
version = guest_api.API.API_BASE_VERSION
return self._call("grow_cluster", CONF.cluster_usage_timeout,
version=version, members=members)
def shrink_cluster(self, members):
LOG.debug("Shrinking Vertica cluster with members: %s." % members)
LOG.debug("Shrinking Vertica cluster with members: %s.", members)
version = guest_api.API.API_BASE_VERSION
return self._call("shrink_cluster", CONF.cluster_usage_timeout,
version=version, members=members)
def mark_design_ksafe(self, k):
LOG.debug("Setting vertica k-safety level to : %s." % k)
LOG.debug("Setting vertica k-safety level to : %s.", k)
version = guest_api.API.API_BASE_VERSION
return self._call("mark_design_ksafe", CONF.cluster_usage_timeout,

View File

@ -44,7 +44,7 @@ class VerticaTaskManagerStrategy(base.BaseTaskManagerStrategy):
class VerticaClusterTasks(task_models.ClusterTasks):
def create_cluster(self, context, cluster_id):
LOG.debug("Begin create_cluster for id: %s." % cluster_id)
LOG.debug("Begin create_cluster for id: %s.", cluster_id)
def _create_cluster():
@ -79,7 +79,7 @@ class VerticaClusterTasks(task_models.ClusterTasks):
for guest in guests:
guest.authorize_public_keys(user, pub_key)
LOG.debug("Installing cluster with members: %s." % member_ips)
LOG.debug("Installing cluster with members: %s.", member_ips)
for db_instance in db_instances:
if db_instance['type'] == 'master':
master_instance = Instance.load(context,
@ -107,12 +107,12 @@ class VerticaClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("End create_cluster for id: %s." % cluster_id)
LOG.debug("End create_cluster for id: %s.", cluster_id)
def grow_cluster(self, context, cluster_id, new_instance_ids):
def _grow_cluster():
LOG.debug("begin grow_cluster for Vertica cluster %s" % cluster_id)
LOG.debug("begin grow_cluster for Vertica cluster %s", cluster_id)
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
@ -165,7 +165,7 @@ class VerticaClusterTasks(task_models.ClusterTasks):
LOG.exception(_("Timeout for growing cluster."))
self.update_statuses_on_failure(cluster_id)
except Exception:
LOG.exception(_("Error growing cluster %s.") % cluster_id)
LOG.exception(_("Error growing cluster %s."), cluster_id)
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
@ -196,7 +196,7 @@ class VerticaClusterTasks(task_models.ClusterTasks):
db_instance.id)
if self.get_ip(master_instance) in remove_member_ips:
raise RuntimeError(_("Cannot remove master instance!"))
LOG.debug("Marking cluster k-safety: %s" % k)
LOG.debug("Marking cluster k-safety: %s", k)
self.get_guest(master_instance).mark_design_ksafe(k)
self.get_guest(master_instance).shrink_cluster(
remove_member_ips)
@ -217,12 +217,12 @@ class VerticaClusterTasks(task_models.ClusterTasks):
finally:
timeout.cancel()
LOG.debug("end shrink_cluster for Vertica cluster id %s" % self.id)
LOG.debug("end shrink_cluster for Vertica cluster id %s", self.id)
class VerticaTaskManagerAPI(task_api.API):
def _cast(self, method_name, version, **kwargs):
LOG.debug("Casting %s" % method_name)
LOG.debug("Casting %s", method_name)
cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, method_name, **kwargs)

View File

@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
def load_api_strategy(manager):
clazz = CONF.get(manager).get('api_strategy')
LOG.debug("Loading class %s" % clazz)
LOG.debug("Loading class %s", clazz)
api_strategy = import_class(clazz)
return api_strategy()
@ -34,7 +34,7 @@ def load_api_strategy(manager):
def load_taskmanager_strategy(manager):
try:
clazz = CONF.get(manager).get('taskmanager_strategy')
LOG.debug("Loading class %s" % clazz)
LOG.debug("Loading class %s", clazz)
taskmanager_strategy = import_class(clazz)
return taskmanager_strategy()
except NoSuchOptError:
@ -44,7 +44,7 @@ def load_taskmanager_strategy(manager):
def load_guestagent_strategy(manager):
try:
clazz = CONF.get(manager).get('guestagent_strategy')
LOG.debug("Loading class %s" % clazz)
LOG.debug("Loading class %s", clazz)
guestagent_strategy = import_class(clazz)
return guestagent_strategy()
except NoSuchOptError:

View File

@ -22,5 +22,5 @@ LOG = logging.getLogger(__name__)
def get_storage_strategy(storage_driver, ns=__name__):
LOG.debug("Getting storage strategy: %s." % storage_driver)
LOG.debug("Getting storage strategy: %s.", storage_driver)
return Strategy.get_strategy(storage_driver, ns)

View File

@ -112,11 +112,11 @@ class SwiftStorage(base.Storage):
which is typically in the format '<backup_id>.<ext>.gz'
"""
LOG.info(_('Saving %(filename)s to %(container)s in swift.')
% {'filename': filename, 'container': BACKUP_CONTAINER})
LOG.info(_('Saving %(filename)s to %(container)s in swift.'),
{'filename': filename, 'container': BACKUP_CONTAINER})
# Create the container if it doesn't already exist
LOG.debug('Creating container %s.' % BACKUP_CONTAINER)
LOG.debug('Creating container %s.', BACKUP_CONTAINER)
self.connection.put_container(BACKUP_CONTAINER)
# Swift Checksum is the checksum of the concatenated segment checksums
@ -124,7 +124,7 @@ class SwiftStorage(base.Storage):
# Wrap the output of the backup process to segment it for swift
stream_reader = StreamReader(stream, filename, MAX_FILE_SIZE)
LOG.debug('Using segment size %s' % stream_reader.max_file_size)
LOG.debug('Using segment size %s', stream_reader.max_file_size)
url = self.connection.url
# Full location where the backup manifest is stored
@ -135,7 +135,7 @@ class SwiftStorage(base.Storage):
# Read from the stream and write to the container in swift
while not stream_reader.end_of_file:
LOG.debug('Saving segment %s.' % stream_reader.segment)
LOG.debug('Saving segment %s.', stream_reader.segment)
path = stream_reader.segment_path
etag = self.connection.put_object(BACKUP_CONTAINER,
stream_reader.segment,
@ -164,7 +164,7 @@ class SwiftStorage(base.Storage):
# All segments uploaded.
num_segments = len(segment_results)
LOG.debug('File uploaded in %s segments.' % num_segments)
LOG.debug('File uploaded in %s segments.', num_segments)
# An SLO will be generated if the backup was more than one segment in
# length.
@ -178,11 +178,11 @@ class SwiftStorage(base.Storage):
for key, value in metadata.items():
headers[self._set_attr(key)] = value
LOG.debug('Metadata headers: %s' % str(headers))
LOG.debug('Metadata headers: %s', str(headers))
if large_object:
LOG.info(_('Creating the manifest file.'))
manifest_data = json.dumps(segment_results)
LOG.debug('Manifest contents: %s' % manifest_data)
LOG.debug('Manifest contents: %s', manifest_data)
# The etag returned from the manifest PUT is the checksum of the
# manifest object (which is empty); this is not the checksum we
# want.
@ -195,9 +195,9 @@ class SwiftStorage(base.Storage):
final_swift_checksum = swift_checksum.hexdigest()
else:
LOG.info(_('Backup fits in a single segment. Moving segment '
'%(segment)s to %(filename)s.')
% {'segment': stream_reader.first_segment,
'filename': filename})
'%(segment)s to %(filename)s.'),
{'segment': stream_reader.first_segment,
'filename': filename})
segment_result = segment_results[0]
# Just rename it via a special put copy.
headers['X-Copy-From'] = segment_result['path']
@ -205,8 +205,8 @@ class SwiftStorage(base.Storage):
filename, '',
headers=headers)
# Delete the old segment file that was copied
LOG.debug('Deleting the old segment file %s.'
% stream_reader.first_segment)
LOG.debug('Deleting the old segment file %s.',
stream_reader.first_segment)
self.connection.delete_object(BACKUP_CONTAINER,
stream_reader.first_segment)
final_swift_checksum = segment_result['etag']