Allow VLAN IDs intersection

Remove old validation of VLAN IDs intersection and add new one.
Now VLAN IDs cannot be equal within any particular NIC (or bond)
but they can be equal for different networks in general.
This new validation covers the case when network template is not in use.

Partial-Bug: #1473047

Change-Id: I27a8326edc00acc6b95ee51fce203527e82e47d7
Blueprint: nodegroups-share-network-parameters
This commit is contained in:
Aleksey Kasatkin 2015-11-10 20:24:44 +02:00
parent 36b28711ac
commit 2eb30ed70c
4 changed files with 183 additions and 21 deletions

View File

@ -16,11 +16,12 @@
from itertools import combinations
from itertools import groupby
from itertools import product
import netaddr
from nailgun import consts
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun import objects
@ -44,7 +45,7 @@ class NetworkCheck(object):
self.net_man = objects.Cluster.get_network_manager(self.cluster)
self.net_provider = self.cluster.net_provider
admin_ng = self.net_man.get_admin_network_group()
fields = NetworkGroup.__mapper__.columns.keys() + ['meta']
fields = models.NetworkGroup.__mapper__.columns.keys() + ['meta']
net = NetworkConfigurationSerializer.serialize_network_group(admin_ng,
fields)
# change Admin name for UI
@ -327,9 +328,8 @@ class NetworkCheck(object):
def neutron_check_segmentation_ids(self):
"""Check neutron segmentation ids
1. check networks VLAN IDs not in Neutron L2 private VLAN ID range
check networks VLAN IDs not in Neutron L2 private VLAN ID range
for VLAN segmentation only
2. check networks VLAN IDs should not intersect
(neutron)
"""
tagged_nets = dict((n["name"], n["vlan_start"]) for n in filter(
@ -352,15 +352,6 @@ class NetworkCheck(object):
format(nets_with_errors)
raise errors.NetworkCheckError(err_msg)
# check networks VLAN IDs should not intersect
net_intersect = [name for name, vlan in tagged_nets.iteritems()
if tagged_nets.values().count(vlan) >= 2]
if net_intersect:
err_msg = u"{0} networks use the same VLAN tags. " \
u"You should assign different VLAN tag " \
u"to every network.".format(", ".join(net_intersect))
raise errors.NetworkCheckError(err_msg)
def neutron_check_network_address_spaces_intersection(self):
"""Check intersection of address spaces of all networks including admin
@ -673,6 +664,51 @@ class NetworkCheck(object):
})
self.expose_error_messages()
def check_vlan_ids_intersection(self):
"""Networks VLAN IDs should not intersect for any node's interface."""
if self.cluster.network_config.configuration_template is not None:
# TODO(akasatkin) checking of network templates to be considered
return
tagged_nets = dict((n["id"], n["vlan_start"]) for n in filter(
lambda n: (n["vlan_start"] is not None), self.networks))
# nothing to check
if len(tagged_nets) < 2:
return
nodes_networks = \
objects.Cluster.get_networks_to_interfaces_mapping_on_all_nodes(
self.cluster)
# first, group by hostname
for node_name, data in groupby(nodes_networks, lambda x: x[0]):
# then group by interface name for particular node
for if_name, nic_nets in groupby(data, lambda x: x[1]):
net_ids = [net_id
for _, _, net_id in nic_nets
if net_id in tagged_nets]
if len(net_ids) < 2:
# no more than 1 tagged network is on the interface
continue
vlan_ids = [tagged_nets[n] for n in net_ids]
if len(set(vlan_ids)) < len(vlan_ids):
# some VLAN IDs are not unique for this interface
seen_vlan_ids = set()
duplicate_net_ids = set()
for idx in range(len(vlan_ids)):
if vlan_ids[idx] in seen_vlan_ids:
duplicate_net_ids.add(net_ids[idx])
seen_vlan_ids.add(vlan_ids[idx])
net_names = [net['name'] for net in self.networks
if net['id'] in duplicate_net_ids]
err_msg = u"Node {0}, interface {1}: {2} networks use " \
u"the same VLAN tags. Different VLAN tags should be " \
u"assigned to the networks on the same interface.".\
format(node_name, if_name, ", ".join(net_names))
raise errors.NetworkCheckError(err_msg)
def check_configuration(self):
"""check network configuration parameters"""
if self.net_provider == consts.CLUSTER_NET_PROVIDERS.neutron:
@ -694,4 +730,5 @@ class NetworkCheck(object):
"""check mapping of networks to NICs"""
self.check_untagged_intersection()
self.check_bond_slaves_speeds()
self.check_vlan_ids_intersection()
return self.err_msgs

View File

@ -1100,6 +1100,43 @@ class Cluster(NailgunObject):
return bool(instance.attributes.editable['additional_components'].
get((component), {}).get('value'))
@classmethod
def get_networks_to_interfaces_mapping_on_all_nodes(cls, instance):
"""Query networks to interfaces mapping on all nodes in cluster.
Returns combined results for NICs and bonds for every node.
Names are returned for node and interface (NIC or bond),
IDs are returned for networks. Results are sorted by node name then
interface name.
"""
nodes_nics_networks = db().query(
models.Node.hostname,
models.NodeNICInterface.name,
models.NetworkGroup.id,
).join(
models.Node.nic_interfaces,
models.NodeNICInterface.assigned_networks_list
).filter(
models.Node.cluster_id == instance.id,
)
nodes_bonds_networks = db().query(
models.Node.hostname,
models.NodeBondInterface.name,
models.NetworkGroup.id,
).join(
models.Node.bond_interfaces,
models.NodeBondInterface.assigned_networks_list
).filter(
models.Node.cluster_id == instance.id,
)
return nodes_nics_networks.union(
nodes_bonds_networks
).order_by(
# column 1 then 2 from the result. cannot call them by name as
# names for column 2 are different in this union
'1', '2'
)
class ClusterCollection(NailgunCollection):
"""Cluster collection."""

View File

@ -1213,6 +1213,14 @@ class BaseTestCase(TestCase):
self.assertIsInstance(inst, exc)
self.assertEqual(inst.message, msg)
def assertRaisesWithMessageIn(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as inst:
self.assertIsInstance(inst, exc)
self.assertIn(msg, inst.message)
def assertValidJSON(self, data):
self.assertNotRaises(ValueError, jsonutils.loads, data)

View File

@ -14,14 +14,19 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from netaddr import IPAddress
from netaddr import IPNetwork
import six
from nailgun import consts
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import NeutronConfig
from nailgun.db.sqlalchemy.models import NovaNetworkConfig
from nailgun.errors import errors
from nailgun.network.checker import NetworkCheck
from nailgun.test.base import BaseIntegrationTest
@ -83,6 +88,11 @@ class TestNetworkChecking(BaseIntegrationTest):
self.assertEqual(resp.status_code, 200)
return resp.json_body
def verify_neutron_networks_w_error(self, nets):
task = self.env.launch_verify_networks(nets)
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
return task
class TestNovaHandlers(TestNetworkChecking):
@ -550,15 +560,85 @@ class TestNeutronHandlersGre(TestNetworkChecking):
def test_network_checking_fails_on_network_vlan_match(self):
self.find_net_by_name('management')['vlan_start'] = 111
self.find_net_by_name('storage')['vlan_start'] = 111
self.update_neutron_networks_success(self.cluster.id, self.nets)
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
" networks use the same VLAN tags. "
"You should assign different VLAN tag "
"to every network.",
task['message'])
self.assertIn("management", task['message'])
self.assertIn("storage", task['message'])
checker = NetworkCheck(mock.Mock(cluster=self.cluster), {})
self.assertRaisesWithMessageIn(
errors.NetworkCheckError,
" networks use the same VLAN tags."
" Different VLAN tags"
" should be assigned to the networks on the same"
" interface.",
checker.check_interface_mapping
)
def test_network_checking_ok_with_template_on_network_vlan_match(self):
self.find_net_by_name('management')['vlan_start'] = 111
self.find_net_by_name('storage')['vlan_start'] = 111
self.update_neutron_networks_success(self.cluster.id, self.nets)
checker = NetworkCheck(mock.Mock(cluster=self.cluster), {})
self.cluster.network_config.configuration_template = {}
self.assertNotRaises(
errors.NetworkCheckError,
checker.check_interface_mapping
)
@mock.patch('nailgun.task.task.rpc.cast')
def test_network_checking_with_equal_vlans_on_multi_groups(self, _):
new_nets = {'management': {'cidr': '199.99.20.0/24'},
'storage': {'cidr': '199.98.20.0/24'},
'private': {'cidr': '199.95.20.0/24'},
'public': {'cidr': '199.96.20.0/24'}}
# save VLAN IDs from networks of default node group
# to set them for networks of new node group
nets = self.env.neutron_networks_get(self.cluster.id).json_body
for net in nets['networks']:
if net['name'] in new_nets:
new_nets[net['name']]['vlan_start'] = net['vlan_start']
resp = self.env.create_node_group()
group_id = resp.json_body['id']
nets = self.env.neutron_networks_get(self.cluster.id).json_body
for net in nets['networks']:
if net['name'] in new_nets:
if net['group_id'] == group_id:
# set CIDRs and VLAN IDs for new networks
# VLAN IDs are equal to those in default node group
for param, value in six.iteritems(new_nets[net['name']]):
net[param] = value
if net['meta']['notation'] == 'ip_ranges':
net['ip_ranges'] = [[
str(IPAddress(IPNetwork(net['cidr']).first + 2)),
str(IPAddress(IPNetwork(net['cidr']).first + 126)),
]]
if not net['meta']['use_gateway']:
net['ip_ranges'] = [[
str(IPAddress(IPNetwork(net['cidr']).first + 2)),
str(IPAddress(IPNetwork(net['cidr']).first + 254)),
]]
net['meta']['use_gateway'] = True
net['gateway'] = str(
IPAddress(IPNetwork(net['cidr']).first + 1))
resp = self.env.neutron_networks_put(self.cluster.id, nets)
self.assertEqual(resp.status_code, 200)
self.env.create_nodes_w_interfaces_count(
nodes_count=3,
if_count=2,
roles=['compute'],
pending_addition=True,
cluster_id=self.cluster.id,
group_id=group_id)
resp = self.env.cluster_changes_put(self.cluster.id)
self.assertEqual(resp.status_code, 202)
task = resp.json_body
self.assertEqual(task['status'], consts.TASK_STATUSES.pending)
self.assertEqual(task['name'], consts.TASK_NAMES.deploy)
def test_network_checking_fails_if_internal_gateway_not_in_cidr(self):
self.nets['networking_parameters']['internal_gateway'] = '172.16.10.1'