Change module parameter use consts configure

- Change local parameter use public parameter
- Add VM status to consts module

Change-Id: I1540da6fe68d829d1f50a140aafaf83fbd86329b
Signed-off-by: Yuanbin.Chen <cybing4@gmail.com>
This commit is contained in:
Yuanbin.Chen 2018-02-14 10:27:41 +08:00
parent e5bc16b3fe
commit c1b3fa83a8
7 changed files with 32 additions and 18 deletions

View File

@ -259,10 +259,11 @@ CLUSTER_DEFAULT_VALUE = (
ACTION_STATUSES = (
ACTION_INIT, ACTION_WAITING, ACTION_READY, ACTION_RUNNING,
ACTION_SUCCEEDED, ACTION_FAILED, ACTION_CANCELLED,
ACTION_WAITING_LIFECYCLE_COMPLETION,
ACTION_WAITING_LIFECYCLE_COMPLETION, ACTION_SUSPENDED,
) = (
'INIT', 'WAITING', 'READY', 'RUNNING',
'SUCCEEDED', 'FAILED', 'CANCELLED', 'WAITING_LIFECYCLE_COMPLETION'
'SUCCEEDED', 'FAILED', 'CANCELLED', 'WAITING_LIFECYCLE_COMPLETION',
'SUSPENDED',
)
EVENT_LEVELS = {
@ -305,3 +306,9 @@ LIFECYCLE_TRANSITION_TYPE = (
) = (
'termination',
)
VM_STATUS = (
VS_ACTIVE, VS_ERROR,
) = (
'ACTIVE', 'ERROR',
)

View File

@ -329,7 +329,7 @@ def node_update(context, node_id, values):
cluster = session.query(models.Cluster).get(node.cluster_id)
if cluster is not None:
if values['status'] == 'ERROR':
cluster.status = 'WARNING'
cluster.status = consts.CS_WARNING
if 'status_reason' in values:
cluster.status_reason = 'Node %(node)s: %(reason)s' % {
'node': node.name, 'reason': values['status_reason']}
@ -1319,8 +1319,9 @@ def action_delete(context, action_id):
action = session.query(models.Action).get(action_id)
if not action:
return
if ((action.status == 'WAITING') or (action.status == 'RUNNING') or
(action.status == 'SUSPENDED')):
if ((action.status == consts.ACTION_WAITING) or
(action.status == consts.ACTION_RUNNING) or
(action.status == consts.ACTION_SUSPENDED)):
raise exception.EResourceBusy(type='action', id=action_id)
session.delete(action)

View File

@ -13,6 +13,7 @@
from oslo_config import cfg
from oslo_log import log
from senlin.common import consts
from senlin.drivers import base
from senlin.drivers import sdk
@ -148,7 +149,8 @@ class NovaClient(base.DriverBase):
return self.conn.compute.create_server_image(server, name, metadata)
@sdk.translate_exception
def wait_for_server(self, server, status='ACTIVE', failures=['ERROR'],
def wait_for_server(self, server, status=consts.VS_ACTIVE,
failures=[consts.VS_ERROR],
interval=2, timeout=None):
'''Wait for server creation complete'''
if timeout is None:

View File

@ -457,7 +457,7 @@ class ClusterAction(base.Action):
'' % {'n': nid, 'c': node.cluster_id})
continue
if node.status != "ACTIVE":
if node.status != consts.NS_ACTIVE:
errors.append('Node %s is not in ACTIVE status.' % nid)
continue
@ -777,7 +777,8 @@ class ClusterAction(base.Action):
dobj.Dependency.create(self.context, [c for c in children],
self.id)
for cid in children:
ao.Action.update(self.context, cid, {'status': 'READY'})
ao.Action.update(self.context, cid,
{'status': consts.ACTION_READY})
dispatcher.start_action()
# Wait for dependent action if any

View File

@ -544,7 +544,7 @@ class Cluster(object):
active_count = 0
for node in self.nodes:
if node.status == 'ACTIVE':
if node.status == consts.NS_ACTIVE:
active_count += 1
# get provided desired_capacity/min_size/max_size

View File

@ -222,8 +222,10 @@ class HealthManager(service.Service):
with timeutils.StopWatch(timeout) as timeout_watch:
while timeout > 0:
action = self.rpc_client.call(ctx, 'action_get', req)
if action['status'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
if action['status'] == 'SUCCEEDED':
if action['status'] in [consts.ACTION_SUCCEEDED,
consts.ACTION_FAILED,
consts.ACTION_CANCELLED]:
if action['status'] == consts.ACTION_SUCCEEDED:
done = True
break
time.sleep(2)
@ -272,7 +274,7 @@ class HealthManager(service.Service):
# loop through nodes to trigger recovery
nodes = objects.Node.get_all_by_cluster(ctx, cluster_id)
for node in nodes:
if node.status != 'ACTIVE':
if node.status != consts.NS_ACTIVE:
LOG.info("Requesting node recovery: %s", node.id)
req = objects.NodeRecoverRequest(identity=node.id,
params=recover_action)

View File

@ -1002,7 +1002,7 @@ class ServerProfile(base.Profile):
msg = six.text_type(ex)
try:
cc.server_resize_revert(obj.physical_id)
cc.wait_for_server(obj.physical_id, 'ACTIVE')
cc.wait_for_server(obj.physical_id, consts.VS_ACTIVE)
except exc.InternalError as ex1:
msg = six.text_type(ex1)
raise exc.EResourceUpdate(type='server', id=obj.physical_id,
@ -1010,7 +1010,7 @@ class ServerProfile(base.Profile):
try:
cc.server_resize_confirm(obj.physical_id)
cc.wait_for_server(obj.physical_id, 'ACTIVE')
cc.wait_for_server(obj.physical_id, consts.VS_ACTIVE)
except exc.InternalError as ex:
raise exc.EResourceUpdate(type='server', id=obj.physical_id,
message=six.text_type(ex))
@ -1058,7 +1058,7 @@ class ServerProfile(base.Profile):
try:
driver.server_rebuild(obj.physical_id, new_image_id,
new_name, new_password)
driver.wait_for_server(obj.physical_id, 'ACTIVE')
driver.wait_for_server(obj.physical_id, consts.VS_ACTIVE)
except exc.InternalError as ex:
raise exc.EResourceUpdate(type='server', id=obj.physical_id,
message=six.text_type(ex))
@ -1440,7 +1440,7 @@ class ServerProfile(base.Profile):
id=obj.physical_id,
message=six.text_type(ex))
if (server is None or server.status != 'ACTIVE'):
if (server is None or server.status != consts.VS_ACTIVE):
return False
return True
@ -1482,7 +1482,8 @@ class ServerProfile(base.Profile):
return False
self.compute(obj).server_reboot(obj.physical_id, reboot_type)
self.compute(obj).wait_for_server(obj.physical_id, 'ACTIVE')
self.compute(obj).wait_for_server(obj.physical_id,
consts.VS_ACTIVE)
return True
def handle_rebuild(self, obj, **options):
@ -1536,7 +1537,7 @@ class ServerProfile(base.Profile):
try:
nova_driver.server_rebuild(server_id, image_id,
name, admin_pass)
nova_driver.wait_for_server(server_id, 'ACTIVE')
nova_driver.wait_for_server(server_id, consts.VS_ACTIVE)
return server_id
except exc.InternalError as ex:
raise exc.EResourceOperation(op='rebuilding', type='server',