Set nodes' statuses to 'error' when their nodegroup is deleted

According to the bug below and the spec, we did not implement
one multirack feature aspect.

https://specs.openstack.org/openstack/fuel-specs/specs/8.0/multi-rack-static.html#notifications-impact

Now we add resetting node to error to node group deletion callback and
send a notification.

Change-Id: I6b2bae5601ba7dbca620bb3861e95b0e554f8699
Closes-bug: #1644630
This commit is contained in:
Vladimir Kuklin 2017-01-18 21:46:42 +03:00 committed by Georgy Kibardin
parent 3d6d48a233
commit cd2ee13830
3 changed files with 52 additions and 3 deletions

View File

@ -182,6 +182,18 @@ class NetworkManagerExtension(BaseExtension):
@classmethod
def on_nodegroup_delete(cls, ng):
netmanager = objects.Cluster.get_network_manager(ng.cluster)
default_admin_net = objects.NetworkGroup.get_default_admin_network()
for node in ng.nodes:
objects.Node.remove_from_cluster(node)
if not netmanager.is_same_network(node.ip, default_admin_net.cidr):
objects.Node.set_error_status_and_file_notification(
node,
consts.NODE_ERRORS.discover,
"Node '{0}' nodegroup was deleted which means that it may "
"not be able to boot correctly unless it is a member of "
"another node group admin network".format(node.hostname)
)
try:
task = UpdateDnsmasqTaskManager().execute()
except errors.TaskAlreadyRunning:

View File

@ -1452,7 +1452,7 @@ class EnvironmentManager(object):
expect_errors)
def _create_network_group(self, expect_errors=False, cluster=None,
**kwargs):
group_id=None, **kwargs):
if not cluster:
cluster = self.clusters[0]
ng = {
@ -1461,7 +1461,7 @@ class EnvironmentManager(object):
"vlan_start": 50,
"cidr": "10.3.0.0/24",
"gateway": "10.3.0.1",
"group_id": Cluster.get_default_group(cluster).id,
"group_id": group_id or Cluster.get_default_group(cluster).id,
"meta": {
"notation": consts.NETWORK_NOTATION.cidr,
"use_gateway": True,

View File

@ -26,6 +26,7 @@ from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun import errors
from nailgun import objects
from nailgun.rpc.receiver import NailgunReceiver
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
@ -37,7 +38,7 @@ class TestNodeGroups(BaseIntegrationTest):
def setUp(self):
super(TestNodeGroups, self).setUp()
self.cluster = self.env.create(
release_kwargs={'version': '1111-8.0'},
release_kwargs={'version': '1111-9.0'},
cluster_kwargs={
'api': False,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
@ -159,6 +160,42 @@ class TestNodeGroups(BaseIntegrationTest):
self.assertEqual(err.exception.message,
'Default node group cannot be deleted.')
@patch('nailgun.task.task.rpc.cast')
@patch('objects.Notification.create')
def test_delete_non_default_node_group_reset_node_to_error(
self, _, notify):
node_group = self.env.create_node_group(api=False,
cluster_id=self.cluster.id)
self.env._create_network_group(cluster=self.cluster,
group_id=node_group.id)
node2 = self.env.create_node(group_id=node_group.id,
roles=['compute'],
status=consts.NODE_STATUSES.provisioned,
cluster_id=self.cluster.id,
ip='10.3.0.42')
task = self.env.launch_deployment()
NailgunReceiver.deploy_resp(
task_uuid=task.uuid,
status=consts.TASK_STATUSES.ready,
progress=100,
nodes=[{'uid': n.uid, 'status': consts.NODE_STATUSES.ready,
'progress': 100}
for n in self.env.nodes],
)
reset_task = self.env.reset_environment()
NailgunReceiver.reset_environment_resp(
task_uuid=reset_task.uuid,
status=consts.TASK_STATUSES.ready,
progress=100,
nodes=[{'uid': n.uid}
for n in self.env.nodes],
)
self.env.delete_node_group(node_group.id)
self.assertEqual(node2.status, consts.NODE_STATUSES.error)
self.assertEqual(node2.error_type, consts.NODE_ERRORS.discover)
self.assertIsNone(node2.cluster)
notify.assert_called()
def test_delete_non_default_node_group_error(self):
node_group = self.env.create_node_group(api=False,
cluster_id=self.cluster.id)