Avoid load deleted instances that belong to a cluster.

Cluster node may be set deleted by "shrink" operation. In this case,
when use DBInstance.find_all(cluster_id=<id>).all(), it will return
all instances include deleted which may raise errors.

Change-Id: I088f3a99e0185ae33df4ce84c080adb7d813f17c
This commit is contained in:
zhanggang 2017-10-23 05:59:35 -04:00
parent 3ea7d210b8
commit b2b54ff05c
8 changed files with 16 additions and 7 deletions

View File

@ -117,7 +117,8 @@ class CassandraClusterTasks(task_models.ClusterTasks):
@classmethod
def find_cluster_node_ids(cls, cluster_id):
db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
return [db_instance.id for db_instance in db_instances]
@classmethod

View File

@ -248,6 +248,7 @@ class MongoDbCluster(models.Cluster):
raise exception.UnprocessableEntity(msg)
db_insts = inst_models.DBInstance.find_all(cluster_id=self.id,
deleted=False,
type='member').all()
num_unique_shards = len(set([db_inst.shard_id for db_inst
in db_insts]))

View File

@ -138,6 +138,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
def _add_shard_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False,
shard_id=shard_id).all()
instance_ids = [db_instance.id for db_instance in db_instances]
LOG.debug("instances in shard %(shard_id)s: %(instance_ids)s",

View File

@ -107,7 +107,8 @@ class RedisClusterTasks(task_models.ClusterTasks):
def _grow_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
cluster_head = next(Instance.load(context, db_inst.id)
for db_inst in db_instances
if db_inst.id not in new_instance_ids)

View File

@ -74,7 +74,8 @@ class VerticaCluster(models.Cluster):
vertica_conf = CONF.get(datastore_version.manager)
num_instances = len(instances)
existing = inst_models.DBInstance.find_all(cluster_id=db_info.id).all()
existing = inst_models.DBInstance.find_all(cluster_id=db_info.id,
deleted=False).all()
num_existing = len(existing)
# Matching number of instances with configured cluster_member_count

View File

@ -190,7 +190,9 @@ class ClusterRootController(DefaultRootController):
cluster_instances)
def _find_cluster_node_ids(self, tenant_id, cluster_id):
args = {'tenant_id': tenant_id, 'cluster_id': cluster_id}
args = {'tenant_id': tenant_id,
'cluster_id': cluster_id,
'deleted': False}
cluster_instances = DBInstance.find_all(**args).all()
return [db_instance.id for db_instance in cluster_instances]

View File

@ -33,7 +33,7 @@ class MongoDBRootController(ClusterRootController):
def _find_query_router_ids(self, tenant_id, cluster_id):
args = {'tenant_id': tenant_id, 'cluster_id': cluster_id,
'type': 'query_router'}
'deleted': False, 'type': 'query_router'}
query_router_instances = DBInstance.find_all(**args).all()
return [db_instance.id for db_instance in query_router_instances]

View File

@ -336,7 +336,8 @@ class ClusterTasks(Cluster):
cluster_notification = context.notification
request_info = cluster_notification.serialize(context)
try:
node_db_inst = DBInstance.find_all(cluster_id=cluster_id).all()
node_db_inst = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
for index, db_inst in enumerate(node_db_inst):
if index > 0:
LOG.debug(
@ -380,7 +381,8 @@ class ClusterTasks(Cluster):
cluster_notification = context.notification
request_info = cluster_notification.serialize(context)
try:
for db_inst in DBInstance.find_all(cluster_id=cluster_id).all():
for db_inst in DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all():
instance = BuiltInstanceTasks.load(
context, db_inst.id)
_upgrade_cluster_instance(instance)