Fixed launch deployment task w/o specifying list of nodes
We should select all nodes those were provisioned
for deploy in LCM. The method nodes_to_deploy has overridden for
clusters that are LCM ready.
Also changed selecting default nodes to get deployment info.
now by default will be selected all nodes except nodes, those are marked
for deletion.
Closes-Bug: 1566384
Change-Id: I264bece7da8aebaef78c15bef43c800ca565e583
(cherry picked from commit ae4f575173
)
This commit is contained in:
parent
cc5e820407
commit
fbb5a91e2d
|
@ -88,6 +88,9 @@ class DefaultOrchestratorInfo(NodesFilterMixin, BaseHandler):
|
|||
def _serialize(self, cluster, nodes):
|
||||
raise NotImplementedError('Override the method')
|
||||
|
||||
def get_default_nodes(self, cluster):
|
||||
return objects.Cluster.get_nodes_not_for_deletion(cluster)
|
||||
|
||||
|
||||
class OrchestratorInfo(BaseHandler):
|
||||
"""Base class for replaced data."""
|
||||
|
@ -146,9 +149,6 @@ class DefaultProvisioningInfo(DefaultOrchestratorInfo):
|
|||
return provisioning_serializers.serialize(
|
||||
cluster, nodes, ignore_customized=True)
|
||||
|
||||
def get_default_nodes(self, cluster):
|
||||
return TaskHelper.nodes_to_provision(cluster)
|
||||
|
||||
|
||||
class DefaultDeploymentInfo(DefaultOrchestratorInfo):
|
||||
|
||||
|
@ -161,9 +161,6 @@ class DefaultDeploymentInfo(DefaultOrchestratorInfo):
|
|||
return deployment_serializers.serialize(
|
||||
graph, cluster, nodes, ignore_customized=True)
|
||||
|
||||
def get_default_nodes(self, cluster):
|
||||
return TaskHelper.nodes_to_deploy(cluster)
|
||||
|
||||
|
||||
class DefaultPrePluginsHooksInfo(DefaultOrchestratorInfo):
|
||||
|
||||
|
@ -175,9 +172,6 @@ class DefaultPrePluginsHooksInfo(DefaultOrchestratorInfo):
|
|||
graph = orchestrator_graph.AstuteGraph(cluster)
|
||||
return pre_deployment_serialize(graph, cluster, nodes)
|
||||
|
||||
def get_default_nodes(self, cluster):
|
||||
return TaskHelper.nodes_to_deploy(cluster)
|
||||
|
||||
|
||||
class DefaultPostPluginsHooksInfo(DefaultOrchestratorInfo):
|
||||
|
||||
|
@ -189,9 +183,6 @@ class DefaultPostPluginsHooksInfo(DefaultOrchestratorInfo):
|
|||
graph = orchestrator_graph.AstuteGraph(cluster)
|
||||
return post_deployment_serialize(graph, cluster, nodes)
|
||||
|
||||
def get_default_nodes(self, cluster):
|
||||
return TaskHelper.nodes_to_deploy(cluster)
|
||||
|
||||
|
||||
class ProvisioningInfo(OrchestratorInfo):
|
||||
|
||||
|
|
|
@ -1281,6 +1281,25 @@ class Node(NailgunObject):
|
|||
for interface in instance.bond_interfaces:
|
||||
Bond.refresh_interface_dpdk_properties(interface, dpdk_drivers)
|
||||
|
||||
@classmethod
|
||||
def is_provisioned(cls, instance):
|
||||
"""Checks that node has been provisioned already.
|
||||
|
||||
:param instance: the Node object
|
||||
:returns: True if provisioned otherwise False
|
||||
"""
|
||||
already_provisioned_statuses = (
|
||||
consts.NODE_STATUSES.deploying,
|
||||
consts.NODE_STATUSES.ready,
|
||||
consts.NODE_STATUSES.provisioned,
|
||||
consts.NODE_STATUSES.stopped
|
||||
)
|
||||
return (
|
||||
instance.status in already_provisioned_statuses or
|
||||
(instance.status == consts.NODE_STATUSES.error and
|
||||
instance.error_type == consts.NODE_ERRORS.deploy)
|
||||
)
|
||||
|
||||
|
||||
class NodeCollection(NailgunCollection):
|
||||
"""Node collection"""
|
||||
|
|
|
@ -154,9 +154,7 @@ class TaskHelper(object):
|
|||
# TODO(aroma): considering moving this code to
|
||||
# nailgun Cluster object's methods
|
||||
@classmethod
|
||||
def nodes_to_deploy(cls, cluster, force=False):
|
||||
from nailgun import objects # preventing cycle import error
|
||||
|
||||
def _nodes_to_deploy_legacy(cls, cluster, force, objects):
|
||||
nodes_to_deploy = []
|
||||
update_required = set()
|
||||
update_once = set()
|
||||
|
@ -187,6 +185,24 @@ class TaskHelper(object):
|
|||
|
||||
return nodes_to_deploy
|
||||
|
||||
@classmethod
|
||||
def _nodes_to_deploy_lcm(cls, cluster, objects):
|
||||
# we should select all nodes that have been provisioned because
|
||||
# this method is used only to get default nodes list
|
||||
# for separate deployment
|
||||
return list(six.moves.filter(
|
||||
objects.Node.is_provisioned,
|
||||
objects.Cluster.get_nodes_not_for_deletion(cluster)
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def nodes_to_deploy(cls, cluster, force=False):
|
||||
from nailgun import objects # preventing cycle import error
|
||||
|
||||
if objects.Release.is_lcm_supported(cluster.release):
|
||||
return cls._nodes_to_deploy_lcm(cluster, objects)
|
||||
return cls._nodes_to_deploy_legacy(cluster, force, objects)
|
||||
|
||||
@classmethod
|
||||
def add_required_for_update_nodes(
|
||||
cls, cluster, nodes_to_deploy, update_required):
|
||||
|
|
|
@ -149,7 +149,6 @@ class ApplyChangesTaskManager(TaskManager, DeploymentCheckMixin):
|
|||
return
|
||||
|
||||
if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
|
||||
db().rollback()
|
||||
raise errors.WrongNodeStatus("No changes to deploy")
|
||||
|
||||
def get_nodes_to_deploy(self, force=False):
|
||||
|
@ -613,7 +612,7 @@ class DeploymentTaskManager(TaskManager):
|
|||
return tasks.DeploymentTask
|
||||
|
||||
def execute(self, nodes_to_deployment, deployment_tasks=None,
|
||||
graph_type=None, force=False):
|
||||
graph_type=None, force=False, **kwargs):
|
||||
deployment_tasks = deployment_tasks or []
|
||||
|
||||
logger.debug('Nodes to deploy: {0}'.format(
|
||||
|
|
|
@ -135,12 +135,8 @@ class BaseDeploymentTask(object):
|
|||
:param kwargs: the keyword arguments
|
||||
"""
|
||||
|
||||
available_methods = iter(
|
||||
cls.get_deployment_methods(transaction.cluster)
|
||||
)
|
||||
|
||||
error_messages = []
|
||||
|
||||
available_methods = cls.get_deployment_methods(transaction.cluster)
|
||||
for method in available_methods:
|
||||
try:
|
||||
args = getattr(cls, method)(transaction, **kwargs)
|
||||
|
|
|
@ -190,7 +190,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
|||
],
|
||||
release_kwargs={
|
||||
'operating_system': consts.RELEASE_OS.ubuntu,
|
||||
'version': 'liberty-9.0',
|
||||
'version': 'mitaka-9.0',
|
||||
},
|
||||
)
|
||||
cluster = self.env.clusters[-1]
|
||||
|
@ -1326,6 +1326,32 @@ class TestTaskManagers(BaseIntegrationTest):
|
|||
else:
|
||||
self.assertEqual(task['type'], 'skipped')
|
||||
|
||||
@mock.patch('nailgun.rpc.cast')
|
||||
def test_deployment_task_uses_all_nodes_by_default(self, rpc_mock):
|
||||
cluster = self.env.create(
|
||||
release_kwargs={
|
||||
'operating_system': consts.RELEASE_OS.ubuntu,
|
||||
'version': 'mitaka-9.0'
|
||||
},
|
||||
nodes_kwargs=[{'roles': ['controller'],
|
||||
'status': consts.NODE_STATUSES.ready}] * 3
|
||||
)
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
'DeploySelectedNodes',
|
||||
kwargs={'cluster_id': cluster.id}
|
||||
),
|
||||
'{}',
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertIn(resp.status_code, [200, 202])
|
||||
tasks_graph = rpc_mock.call_args[0][1]['args']['tasks_graph']
|
||||
# check that all nodes present in message
|
||||
self.assertItemsEqual(
|
||||
[n.uid for n in cluster.nodes] + [consts.MASTER_NODE_UID, None],
|
||||
tasks_graph
|
||||
)
|
||||
|
||||
|
||||
class TestUpdateDnsmasqTaskManagers(BaseIntegrationTest):
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ class TestDeploymentNodesFiltering(BaseIntegrationTest):
|
|||
super(TestDeploymentNodesFiltering, self).setUp()
|
||||
self.env.create(
|
||||
release_kwargs={
|
||||
'version': 'liberty-9.0'
|
||||
'version': 'liberty-8.0'
|
||||
},
|
||||
nodes_kwargs=[
|
||||
{'roles': ['controller'], 'status': 'ready'},
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
import mock
|
||||
import web
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.db.sqlalchemy.models import Cluster
|
||||
from nailgun.db.sqlalchemy.models import Task
|
||||
from nailgun import objects
|
||||
|
@ -268,3 +269,40 @@ class TestTaskHelpers(BaseTestCase):
|
|||
kwargs = TaskHelper.prepare_action_log_kwargs(check_task)
|
||||
self.assertIn('actor_id', kwargs)
|
||||
self.assertEqual(actor_id, kwargs['actor_id'])
|
||||
|
||||
def test_nodes_to_deploy_if_lcm(self):
|
||||
cluster = self.env.create(
|
||||
nodes_kwargs=[
|
||||
{'status': consts.NODE_STATUSES.ready},
|
||||
{'status': consts.NODE_STATUSES.discover},
|
||||
{'status': consts.NODE_STATUSES.provisioning},
|
||||
{'status': consts.NODE_STATUSES.provisioned},
|
||||
{'status': consts.NODE_STATUSES.deploying},
|
||||
{'status': consts.NODE_STATUSES.error,
|
||||
'error_type': consts.NODE_ERRORS.deploy},
|
||||
{'status': consts.NODE_STATUSES.error,
|
||||
'error_type': consts.NODE_ERRORS.provision},
|
||||
{'status': consts.NODE_STATUSES.stopped},
|
||||
{'status': consts.NODE_STATUSES.removing},
|
||||
{'status': consts.NODE_STATUSES.ready,
|
||||
'pending_deletion': True},
|
||||
],
|
||||
release_kwargs={
|
||||
'version': 'mitaka-9.0',
|
||||
'operating_system': consts.RELEASE_OS.ubuntu
|
||||
}
|
||||
)
|
||||
nodes_to_deploy = TaskHelper.nodes_to_deploy(cluster)
|
||||
self.assertEqual(5, len(nodes_to_deploy))
|
||||
|
||||
expected_status = [
|
||||
consts.NODE_STATUSES.provisioned,
|
||||
consts.NODE_STATUSES.stopped,
|
||||
consts.NODE_STATUSES.ready,
|
||||
consts.NODE_STATUSES.error,
|
||||
consts.NODE_STATUSES.deploying
|
||||
]
|
||||
for node in nodes_to_deploy:
|
||||
self.assertIn(node.status, expected_status)
|
||||
self.assertIn(node.error_type, [None, consts.NODE_ERRORS.deploy])
|
||||
self.assertFalse(node.pending_deletion)
|
||||
|
|
Loading…
Reference in New Issue