Raise an error when trying to upload config with invalid cluster_id

Check if node is assigned to specified cluster when trying to
upload new configuration. Otherwise fail.

Change-Id: I434b61d3887fbe2b23ae2ddedb41c80b944b2451
Closes-Bug: #1523534
This commit is contained in:
Alexander Saprykin 2015-12-08 14:29:53 +01:00
parent fe79f827f3
commit 4f028385f8
8 changed files with 72 additions and 7 deletions

View File

@ -58,11 +58,6 @@ class OpenstackConfigCollectionHandler(BaseHandler):
:reutrn: New config object in JSON format.
"""
data = self.checked_data()
self.get_object_or_404(objects.Cluster, data['cluster_id'])
if 'node_id' in data:
self.get_object_or_404(objects.Node, data['node_id'])
obj = objects.OpenstackConfig.create(data)
raise self.http(201, objects.OpenstackConfig.to_json(obj))

View File

@ -17,6 +17,7 @@ import six
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.api.v1.validators.json_schema import openstack_config as schema
from nailgun.errors import errors
from nailgun import objects
class OpenstackConfigValidator(BasicValidator):
@ -38,6 +39,17 @@ class OpenstackConfigValidator(BasicValidator):
data = super(OpenstackConfigValidator, cls).validate(data)
cls.validate_schema(data, schema)
cls._check_exclusive_fields(data)
cluster = objects.Cluster.get_by_uid(data['cluster_id'],
fail_if_not_found=True)
if 'node_id' in data:
node = objects.Node.get_by_uid(
data['node_id'], fail_if_not_found=True)
if node.cluster_id != cluster.id:
raise errors.InvalidData(
"Node '{0}' is not assigned to cluster '{1}'".format(
data['node_id'], cluster.id))
return data
@classmethod

View File

@ -592,6 +592,9 @@ class Cluster(NailgunObject):
from nailgun.objects import NodeCollection
NodeCollection.reset_network_template(nodes_to_remove)
from nailgun.objects import OpenstackConfig
OpenstackConfig.disable_by_nodes(nodes_to_remove)
map(
net_manager.assign_networks_by_default,
nodes_to_add

View File

@ -859,6 +859,10 @@ class Node(NailgunObject):
instance.kernel_params = None
instance.primary_roles = []
instance.hostname = cls.default_slave_name(instance)
from nailgun.objects import OpenstackConfig
OpenstackConfig.disable_by_nodes([instance])
db().flush()
db().refresh(instance)

View File

@ -120,6 +120,18 @@ class OpenstackConfig(NailgunObject):
return configs
@classmethod
def disable_by_nodes(cls, nodes):
"""Disactivate all active configurations for specified nodes."""
node_ids = [n.id for n in nodes]
(db().query(cls.model)
.filter_by(
config_type=consts.OPENSTACK_CONFIG_TYPES.node,
is_active=True)
.filter(models.OpenstackConfig.node_id.in_(node_ids))
.update({models.OpenstackConfig.is_active: False},
synchronize_session=False))
class OpenstackConfigCollection(NailgunCollection):

View File

@ -211,6 +211,7 @@ class EnvironmentManager(object):
config = OpenstackConfig.create(kwargs)
db().flush()
self.openstack_configs.append(config)
return config
def update_role(self, release_id, role_name, data, expect_errors=False):
return self.app.put(

View File

@ -393,18 +393,23 @@ class TestNodeObject(BaseIntegrationTest):
self.assertEqual(2, nodes_w_public_ip_count)
def test_removing_from_cluster(self):
self.env.create(
cluster = self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"role": "controller"}
]
)
node_db = self.env.nodes[0]
config = self.env.create_openstack_config(
cluster_id=cluster['id'], node_id=node_db.id, configuration={})
node2_db = self.env.create_node()
objects.Node.remove_from_cluster(node_db)
self.db().refresh(config)
self.assertEqual(node_db.cluster_id, None)
self.assertEqual(node_db.roles, [])
self.assertEqual(node_db.pending_roles, [])
self.assertFalse(config.is_active)
exclude_fields = [
"group_id",
@ -1354,6 +1359,20 @@ class TestClusterObject(BaseTestCase):
self.assertTrue(
editable_attrs[u'additional_components'][u'sahara'][u'value'])
def test_cleanup_openstack_config(self):
cluster = self.env.create_cluster(
api=False, nodes=[self.env.nodes[0].id])
config = self.env.create_openstack_config(
cluster_id=cluster.id, node_id=self.env.nodes[0].id,
configuration={'key': 'value'})
self.assertTrue(config.is_active)
objects.Cluster.update(cluster, {'nodes': []})
self.db().refresh(config)
self.assertFalse(config.is_active)
class TestClusterObjectVirtRoles(BaseTestCase):

View File

@ -17,6 +17,7 @@ import urllib
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.db import db
from nailgun import objects
from nailgun.objects.serializers.openstack_config import \
@ -34,7 +35,9 @@ class TestOpenstackConfigHandlers(BaseIntegrationTest):
self.env.create_cluster(api=False)
self.clusters = self.env.clusters
self.nodes = self.env.create_nodes(3)
self.nodes = self.env.create_nodes(
3, cluster_id=self.clusters[0].id,
status=consts.NODE_STATUSES.ready)
self.configs = []
self.create_openstack_config(
@ -90,6 +93,22 @@ class TestOpenstackConfigHandlers(BaseIntegrationTest):
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json_body['is_active'], False)
def test_openstack_config_upload_fail(self):
data = {
'cluster_id': self.clusters[1].id,
'node_id': self.nodes[1].id,
'configuration': {}
}
resp = self.app.post(
reverse('OpenstackConfigCollectionHandler'),
jsonutils.dumps(data),
headers=self.default_headers, expect_errors=True)
self.assertEqual(resp.status_code, 400)
self.assertEqual(
resp.json_body['message'],
"Node '{0}' is not assigned to cluster '{1}'".format(
self.nodes[1].id, self.clusters[1].id))
def test_openstack_config_list(self):
url = self._make_filter_url(cluster_id=self.clusters[0].id)
resp = self.app.get(url, headers=self.default_headers)