Add check if VIP name already present in network config

Add check to 'assign_vips_for_net_groups_for_api' method of 7.0 network
serializer. The check detects data of particular VIP being overwritten
by VIP with the same name. In case it does specific error is raised.
Such situation may occur when plugins of the cluster introduce
conflicting network configuration (that is shown by added unit test).

The exception is caught in
ApplyChangesTaskManager.check_before_deployment that fails the
deployment process in such case.

Change-Id: Icdd6ed649d92ba5745d093a9612c2f30c082071c
Closes-Bug: #1487482
This commit is contained in:
Artem Roma 2015-10-12 19:18:20 +03:00
parent b94e3840a7
commit fdc483ddac
12 changed files with 150 additions and 28 deletions

View File

@ -100,6 +100,8 @@ class ProviderHandler(BaseHandler):
return self.serializer.serialize_for_cluster(cluster)
except errors.OutOfIPs as exc:
raise self.http(400, six.text_type(exc))
except errors.DuplicatedVIPNames as exc:
raise self.http(400, six.text_type(exc))
except Exception:
logger.exception('Serialization failed')
raise

View File

@ -81,6 +81,8 @@ default_messages = {
"NetworkRoleConflict": "Cannot override existing network role",
"NetworkTemplateMissingRoles": "Roles are missing from network template",
"NetworkTemplateMissingNetRoles": "Network roles are missing",
"DuplicatedVIPNames": ("Cannot assign VIPs for the cluster due to "
"overlapping of names of the VIPs"),
# RPC errors
"CannotFindTask": "Cannot find task",

View File

@ -1594,12 +1594,16 @@ class AllocateVIPs70Mixin(object):
:type cluster: Cluster model
:return: dict with vip definitions
"""
# check VIPs names overlapping before assigning them
cls.check_unique_vip_names_for_cluster(cluster)
vips = {}
vips['vips'] = {}
for role, vip_info, vip_addr in cls._assign_vips_for_net_groups(
cluster):
vip_name = vip_info['name']
vips['vips'][vip_name] = cls._build_advanced_vip_info(vip_info,
role,
vip_addr)
@ -1630,6 +1634,13 @@ class AllocateVIPs70Mixin(object):
:type cluster: Cluster model
:return: dict with vip definitions
"""
# NOTE(aroma): VIPs names intersection must be checked here too
# since deployment can be started omitting ApplyChangesTaskManager
# so, in turn, 'check_before_deployment' will not be executed.
# But it is not very good idea to put additional check into
# serialization process. Hence the issue remains possible unless
# some general validation of network data will be introduced for
# all kinds of deployment flow
vips = {}
for role, vip_info, vip_addr in cls._assign_vips_for_net_groups(
cluster):
@ -1638,3 +1649,31 @@ class AllocateVIPs70Mixin(object):
role,
vip_addr)
return vips
@classmethod
def check_unique_vip_names_for_cluster(cls, cluster):
""" Detect situation when VIPs with same names
are present in vip_info. We must stop processing
immediately because rewritting of existing VIP data
by another VIP info could lead to failed deployment
"""
vip_names = []
duplicate_vip_names = set()
for role in objects.Cluster.get_network_roles(cluster):
properties = role.get('properties', {})
for vip_info in properties.get('vip', ()):
if vip_info['name'] in vip_names:
duplicate_vip_names.add(vip_info['name'])
vip_names.append(vip_info['name'])
if duplicate_vip_names:
raise errors.DuplicatedVIPNames(
"Duplicate VIP names found in network "
"configuration of the cluster with id {0}. "
"Conflicting names: {1}"
.format(cluster.id, ', '.join(duplicate_vip_names))
)

View File

@ -131,7 +131,9 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def get_node_networks_with_ips(cls, node):
"""Returns IP and network data (meta, gateway) for each node network"""
"""Returns IP and network data (meta, gateway) for each node network.
"""
if not node.group_id:
return {}
@ -226,7 +228,7 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def assign_ips_in_node_group(
cls, net_id, net_name, node_ids, ip_ranges):
"Assigns IP addresses for nodes in given network"
"""Assigns IP addresses for nodes in given network."""
ips_by_node_id = db().query(
models.IPAddr.ip_addr,
models.IPAddr.node

View File

@ -47,7 +47,7 @@ from nailgun.utils import traverse
class Attributes(NailgunObject):
"""Cluster attributes object"""
"""Cluster attributes object."""
#: SQLAlchemy model for Cluster attributes
model = models.Attributes
@ -119,7 +119,7 @@ class Attributes(NailgunObject):
class Cluster(NailgunObject):
"""Cluster object"""
"""Cluster object."""
#: SQLAlchemy model for Cluster
model = models.Cluster
@ -916,7 +916,7 @@ class Cluster(NailgunObject):
@classmethod
def is_vmware_enabled(cls, instance):
"""Check if current cluster supports vmware configuration"""
"""Check if current cluster supports vmware configuration."""
attributes = cls.get_attributes(instance).editable
return attributes.get('common', {}).get('use_vcenter', {}).get('value')
@ -1041,7 +1041,7 @@ class Cluster(NailgunObject):
class ClusterCollection(NailgunCollection):
"""Cluster collection"""
"""Cluster collection."""
#: Single Cluster object class
single = Cluster

View File

@ -66,7 +66,7 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
critical_roles = ['controller', 'ceph-osd', 'primary-mongo']
def serialize(self, cluster, nodes, ignore_customized=False):
"""Method generates facts which are passed to puppet"""
"""Method generates facts which are passed to puppet."""
def keyfunc(node):
return bool(node.replaced_deployment_info)
@ -236,7 +236,7 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
return serialized_nodes
def serialize_node(self, node, role):
"""Serialize node, then it will be merged with common attributes"""
"""Serialize node, then it will be merged with common attributes."""
node_attrs = {
# Yes, uid is really should be a string
'uid': node.uid,
@ -338,7 +338,7 @@ class DeploymentHASerializer(DeploymentMultinodeSerializer):
@classmethod
def node_list(cls, nodes):
"""Node list"""
"""Node list."""
node_list = super(
DeploymentHASerializer,
cls
@ -350,7 +350,7 @@ class DeploymentHASerializer(DeploymentMultinodeSerializer):
return node_list
def get_common_attrs(self, cluster):
"""Common attributes for all facts"""
"""Common attributes for all facts."""
common_attrs = super(
DeploymentHASerializer,
self
@ -368,7 +368,7 @@ class DeploymentHASerializer(DeploymentMultinodeSerializer):
return common_attrs
def get_assigned_vips(self, cluster):
"""Assign and get vips for net groups"""
"""Assign and get vips for net groups."""
return objects.Cluster.get_network_manager(cluster).\
assign_vips_for_net_groups(cluster)
@ -546,7 +546,7 @@ def get_serializer_for_cluster(cluster):
def serialize(orchestrator_graph, cluster, nodes, ignore_customized=False):
"""Serialization depends on deployment mode"""
"""Serialization depends on deployment mode."""
objects.Cluster.set_primary_roles(cluster, nodes)
env_version = cluster.release.environment_version

View File

@ -55,7 +55,7 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def network_provider_node_attrs(cls, cluster, node):
"""Serialize node, then it will be merged with common attributes"""
"""Serialize node, then it will be merged with common attributes."""
nm = Cluster.get_network_manager(cluster)
networks = nm.get_node_networks(node)
node_attrs = {
@ -175,7 +175,7 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def neutron_attrs(cls, cluster):
"""Network configuration for Neutron"""
"""Network configuration for Neutron."""
attrs = {}
attrs['L3'] = cls.generate_l3(cluster)
attrs['L2'] = cls.generate_l2(cluster)

View File

@ -171,13 +171,6 @@ class ApplyChangesTaskManager(TaskManager, DeploymentCheckMixin):
)
)
network_info = self.serialize_network_cfg(self.cluster)
logger.info(
u"Network info:\n{0}".format(
jsonutils.dumps(network_info, indent=4)
)
)
self.check_no_running_deployment(self.cluster)
self._remove_obsolete_tasks()
@ -429,8 +422,22 @@ class ApplyChangesTaskManager(TaskManager, DeploymentCheckMixin):
:param supertask: task SqlAlchemy object
"""
try:
# if there are VIPs with same names in the network configuration
# the error will be raised. Such situation may occur when, for
# example, enabled plugins contain conflicting network
# configuration
network_info = self.serialize_network_cfg(self.cluster)
except errors.DuplicatedVIPNames as e:
raise errors.CheckBeforeDeploymentError(e.message)
logger.info(
u"Network info:\n{0}".format(
jsonutils.dumps(network_info, indent=4)
)
)
# checking admin intersection with untagged
network_info = self.serialize_network_cfg(self.cluster)
network_info["networks"] = [
n for n in network_info["networks"] if n["name"] != "fuelweb_admin"
]
@ -508,7 +515,7 @@ class SpawnVMsTaskManager(ApplyChangesTaskManager):
class ProvisioningTaskManager(TaskManager):
def execute(self, nodes_to_provision):
"""Run provisioning task on specified nodes"""
"""Run provisioning task on specified nodes."""
# locking nodes
nodes_ids = [node.id for node in nodes_to_provision]
nodes = objects.NodeCollection.filter_by_list(

View File

@ -814,7 +814,7 @@ class BaseNetworkVerification(object):
@classmethod
def enabled(cls, cluster):
"""Verify that subtask is enabled based on cluster configuration"""
"""Verify that subtask is enabled based on cluster configuration."""
return True
@ -982,7 +982,7 @@ class VerifyNetworksTask(VerifyNetworksForTemplateMixin,
class CheckDhcpTask(VerifyNetworksForTemplateMixin,
BaseNetworkVerification):
"""Task for dhcp verification"""
"""Task for dhcp verification."""
class MulticastVerificationTask(BaseNetworkVerification):
@ -1206,7 +1206,7 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _check_mongo_nodes(cls, task):
"""Check for mongo nodes presence in env with external mongo"""
"""Check for mongo nodes presence in env with external mongo."""
components = objects.Attributes.merged_attrs(
task.cluster.attributes).get("additional_components", None)
if (components and components["ceilometer"]["value"]
@ -1222,7 +1222,7 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _check_vmware_consistency(cls, task):
"""Checks vmware attributes consistency and proper values"""
"""Checks vmware attributes consistency and proper values."""
attributes = task.cluster.attributes.editable
vmware_attributes = task.cluster.vmware_attributes
# Old(< 6.1) clusters haven't vmware support

View File

@ -589,6 +589,35 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
ipaddr = resp.json_body['vips']['my-vip']['ipaddr']
self.assertEqual('10.42.0.2', ipaddr)
def test_get_returns_error_if_vip_names_are_intersected(self):
cluster = self.env.create(
release_kwargs={'version': '2015.1.0-7.0'},
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre,
'api': False,
},
nodes_kwargs=[{'roles': ['controller']}]
)
cluster.release.network_roles_metadata.append({
'id': 'mymgmt/vip',
'default_mapping': consts.NETWORKS.management,
'properties': {
'subnet': True,
'gateway': False,
'vip': [{
'name': 'management',
'node_roles': ['compute'],
}]
}})
self.db.flush()
resp = self.env.neutron_networks_get(cluster.id, expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertIn(
'Duplicate VIP names found in network configuration',
resp.json_body['message']
)
class TestNovaNetworkConfigurationHandlerHA(BaseIntegrationTest):

View File

@ -142,7 +142,7 @@ class TestTaskManagers(BaseIntegrationTest):
self.assertEqual(al.additional_info["message"], "")
self.assertEqual(al.additional_info["output"], {})
def test_check_before_deployment_with_error(self):
def test_action_log_created_for_check_before_deployment_with_error(self):
self.env.create(
nodes_kwargs=[
{"pending_addition": True, "online": False}

View File

@ -16,9 +16,11 @@ from nailgun import consts
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import node
from nailgun.db.sqlalchemy.models import Task
from nailgun.errors import errors
from nailgun.network.checker import NetworkCheck
from nailgun.task import helpers
from nailgun.task.manager import ApplyChangesTaskManager
from nailgun.test.base import BaseIntegrationTest
from mock import MagicMock
@ -549,3 +551,42 @@ class TestNetworkCheck(BaseIntegrationTest):
get_by_cluster_id_mock.return_value = nodegroup_mock
self.assertRaises(errors.NetworkCheckError,
checker.neutron_check_gateways)
class TestCheckVIPsNames(BaseIntegrationTest):
def setUp(self):
super(TestCheckVIPsNames, self).setUp()
self.env.create(
release_kwargs={'version': '2015.1.0-7.0'},
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre,
},
nodes_kwargs=[{'roles': ['controller']}]
)
self.cluster = self.env.clusters[0]
self.task = Task(cluster_id=self.cluster.id)
self.db.add(self.task)
self.db.flush()
def test_check_vip_names(self):
# in order VIPAssigningConflict error to be raised within
# 'check_before_deployment' VIP names introduced by plugins
# for the cluster must overlap with those in network configuration
# of the cluster itself, so we make here such overlapping
cluster_net_roles = self.cluster.release.network_roles_metadata
with patch(
'nailgun.objects.cluster.PluginManager.get_network_roles',
new=MagicMock(return_value=cluster_net_roles)
):
with self.assertRaises(errors.CheckBeforeDeploymentError) as exc:
ApplyChangesTaskManager(self.cluster.id)\
.check_before_deployment(self.task)
self.assertIn('Duplicate VIP names found in network configuration',
exc.exception.message)