Assign networks based on template

When a node is added to a cluster with a network template
networks will be mapped to NICs based on transformations
in the template. Any bonds will be created in the database.
Any time the cluster's network template is changed the NIC
mapping will be recreated.

Change-Id: I2b52444f3019241362e5a957bf191645b94ebaa7
Closes-bug: #1493391
This commit is contained in:
Ryan Moe 2015-09-15 14:58:11 -07:00
parent c2be415016
commit 09a256d9c8
10 changed files with 561 additions and 53 deletions

View File

@ -59,11 +59,6 @@
"action": "add-port",
"bridge": "br-storage",
"name": "<% if3 %>"
},
{
"action": "add-port",
"bridge": "br-mgmt",
"name": "<% if4 %>"
}
],
"roles": {
@ -95,7 +90,7 @@
{
"action": "add-port",
"bridge": "br-mgmt",
"name": "<% if2 %>"
"name": "<% if2 %>.104"
}
],
"roles": {
@ -408,4 +403,256 @@
}
}
}
},
{
"pk": 2,
"adv_net_template": {
"default": {
"network_assignments": {
"mongo": {
"ep": "br-mongo"
},
"private": {
"ep": "br-prv"
},
"fuelweb_admin": {
"ep": "br-fw-admin"
},
"management": {
"ep": "br-mgmt"
},
"keystone": {
"ep": "br-keystone"
},
"public": {
"ep": "br-ex"
},
"storage": {
"ep": "br-storage"
},
"murano": {
"ep": "<% if5 %>"
}
},
"templates_for_node_role": {
"cinder": [
"common",
"storage",
"custom"
],
"controller": [
"public",
"private",
"storage",
"common",
"custom"
],
"compute": [
"common",
"private",
"storage",
"custom"
]
},
"nic_mapping": {
"default": {
"if4": "eth4",
"if5": "eth2",
"if1": "eth0",
"if2": "eth1",
"if3": "eth3"
}
},
"network_scheme": {
"common": {
"endpoints": [
"br-fw-admin",
"br-mgmt"
],
"transformations": [
{
"action": "add-br",
"name": "br-fw-admin"
},
{
"action": "add-port",
"bridge": "br-fw-admin",
"name": "<% if1 %>"
},
{
"action": "add-br",
"name": "br-mgmt"
},
{
"action": "add-port",
"name": "<% if3 %>.101"
},
{
"action": "add-port",
"name": "<% if4 %>.101"
},
{
"action": "add-bond",
"bridge": "br-mgmt",
"interface_properties": {},
"bond_properties": {
"mode": "active-backup"
},
"interfaces": ["<% if3 %>.101", "<% if4 %>.101"],
"name": "lnxbond0"
}
],
"roles": {
"mgmt/api": "br-mgmt",
"management": "br-mgmt",
"admin/pxe": "br-fw-admin",
"fw-admin": "br-fw-admin",
"mgmt/vip": "br-mgmt"
}
},
"storage": {
"endpoints": [
"br-storage"
],
"transformations": [
{
"action": "add-br",
"name": "br-storage"
},
{
"action": "add-port",
"bridge": "br-storage",
"name": "<% if2 %>.102"
}
],
"roles": {
"storage": "br-storage",
"ceph/public": "br-storage"
}
},
"public": {
"endpoints": [
"br-ex"
],
"transformations": [
{
"action": "add-br",
"name": "br-ex"
},
{
"action": "add-br",
"name": "br-floating",
"provider": "ovs"
},
{
"action": "add-patch",
"bridges": [
"br-floating",
"br-ex"
],
"provider": "ovs",
"mtu": 65000
},
{
"action": "add-port",
"bridge": "br-ex",
"name": "<% if2 %>"
}
],
"roles": {
"public/vip": "br-ex",
"neutron/floating": "br-floating",
"ex": "br-ex"
}
},
"private": {
"endpoints": [
"br-prv"
],
"transformations": [
{
"action": "add-br",
"name": "br-prv",
"provider": "ovs"
},
{
"action": "add-br",
"name": "br-aux"
},
{
"action": "add-patch",
"bridges": [
"br-prv",
"br-aux"
],
"provider": "ovs",
"mtu": 65000
},
{
"action": "add-port",
"bridge": "br-aux",
"name": "<% if5 %>.103"
}
],
"roles": {
"neutron/private": "br-prv"
}
},
"custom": {
"endpoints": [
"br-mongo",
"br-keystone",
"<% if5 %>"
],
"transformations": [
{
"action": "add-br",
"name": "br-mongo"
},
{
"action": "add-port",
"bridge": "br-mongo",
"name": "<% if4 %>.201"
},
{
"action": "add-br",
"name": "br-keystone"
},
{
"action": "add-port",
"bridge": "br-keystone",
"name": "<% if4 %>.202"
},
{
"action": "add-port",
"name": "<% if5 %>"
}
],
"roles": {
"murano/api": "<% if5 %>",
"keystone/api": "br-keystone",
"neutron/mesh": "br-mgmt",
"mgmt/database": "br-mgmt",
"sahara/api": "br-mgmt",
"mongo/db": "br-mongo",
"ceilometer/api": "br-mgmt",
"mgmt/messaging": "br-mgmt",
"glance/api": "br-mgmt",
"swift/api": "br-mgmt",
"heat/api": "br-mgmt",
"cinder/api": "br-mgmt",
"neutron/api": "br-mgmt",
"mgmt/corosync": "br-mgmt",
"nova/api": "br-mgmt",
"horizon": "br-mgmt",
"nova/migration": "br-mgmt",
"mgmt/memcache": "br-mgmt",
"cinder/iscsi": "br-mgmt",
"swift/replication": "br-mgmt"
}
}
}
}
}
}]

View File

@ -26,6 +26,10 @@ from nailgun.logger import logger
from nailgun.network.manager import AllocateVIPs70Mixin
from nailgun.network.manager import NetworkManager
from nailgun import objects
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkTemplateSerializer70
class NeutronManager(NetworkManager):
@ -222,7 +226,7 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def assign_ips_in_node_group(
cls, net_id, net_name, node_ids, ip_ranges):
"""Assigns IP addresses for nodes in given network"""
"Assigns IP addresses for nodes in given network"
ips_by_node_id = db().query(
models.IPAddr.ip_addr,
models.IPAddr.node
@ -312,3 +316,94 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
net_id, net_name, node_ids, ip_ranges)
cls.assign_admin_ips(nodes)
@classmethod
def _split_iface_name(cls, iface):
try:
iface, vlan = iface.split('.')
except ValueError:
vlan = None
return (iface, vlan)
@classmethod
def get_interfaces_from_template(cls, node):
"""Parse transformations for all node role templates.
Returns a list of bare interfaces and bonds.
"""
transformations = \
NeutronNetworkTemplateSerializer70.generate_transformations(node)
interfaces = {}
for tx in transformations:
if tx['action'] == 'add-port':
key = tx.get('bridge', tx['name'])
interfaces[key] = {
'name': tx['name'],
'type': consts.NETWORK_INTERFACE_TYPES.ether
}
if tx['action'] == 'add-bond':
key = tx.get('bridge', tx['name'])
interfaces[key] = {
'name': tx['name'],
'slaves': [{'name': cls._split_iface_name(i)[0]}
for i in tx['interfaces']],
'type': consts.NETWORK_INTERFACE_TYPES.bond,
'bond_properties': tx.get('bond_properties', {})
}
return interfaces
@classmethod
def assign_networks_by_template(cls, node):
"""Configures a node's network-to-nic mapping based on its template.
This also creates bonds in the database, ensures network
groups match the data in the template and all networks
are assigned to the correct interface or bond.
"""
interfaces = cls.get_interfaces_from_template(node)
endpoint_mapping = cls.get_node_network_mapping(node)
em = dict((reversed(ep) for ep in endpoint_mapping))
node_ifaces = {}
for bridge, values in interfaces.items():
network = em.get(bridge)
# There is no network associated with this bridge (e.g. br-aux)
if not network:
continue
iface, vlan = cls._split_iface_name(values['name'])
node_ifaces.setdefault(iface, values)
node_ifaces[iface].setdefault('assigned_networks', [])
# Default admin network has no node group
if network == consts.NETWORKS.fuelweb_admin:
net_db = cls.get_admin_network_group(node.id)
else:
net_db = objects.NetworkGroup.get_from_node_group_by_name(
node.group_id, network)
# Ensure network_group configuration is consistent
# with the template
if vlan != net_db.vlan_start:
net_db.vlan_start = vlan
db().add(net_db)
db().flush()
ng = {'id': net_db.id}
node_ifaces[iface]['assigned_networks'].append(ng)
if values['type'] == consts.NETWORK_INTERFACE_TYPES.ether:
nic = objects.Node.get_nic_by_name(node, iface)
node_ifaces[iface]['id'] = nic.id
node_data = {
'id': node.id,
'interfaces': node_ifaces.values()
}
cls._update_attrs(node_data)

View File

@ -986,6 +986,12 @@ class Cluster(NailgunObject):
cls.update_nodes_network_template(instance, instance.nodes)
db().flush()
if template is None:
net_manager = cls.get_network_manager(instance)
for node in instance.nodes:
net_manager.clear_bond_configuration(node)
net_manager.assign_networks_by_default(node)
@classmethod
def update_nodes_network_template(cls, instance, nodes):
from nailgun.objects import Node

View File

@ -433,6 +433,7 @@ class Node(NailgunObject):
cls.update_interfaces(instance, update_by_agent)
cluster_changed = False
add_to_cluster = False
if "cluster_id" in data:
new_cluster_id = data.pop("cluster_id")
if instance.cluster_id:
@ -450,7 +451,8 @@ class Node(NailgunObject):
if new_cluster_id is not None:
# assigning node to cluster
cluster_changed = True
cls.add_into_cluster(instance, new_cluster_id)
add_to_cluster = True
instance.cluster_id = new_cluster_id
if "group_id" in data:
new_group_id = data.pop("group_id")
@ -459,7 +461,7 @@ class Node(NailgunObject):
nm.clear_assigned_networks(instance)
nm.clear_bond_configuration(instance)
instance.group_id = new_group_id
cls.add_into_cluster(instance, instance.cluster_id)
add_to_cluster = True
# calculating flags
roles_changed = (
@ -477,6 +479,9 @@ class Node(NailgunObject):
if pending_roles_changed:
cls.update_pending_roles(instance, pending_roles)
if add_to_cluster:
cls.add_into_cluster(instance, instance.cluster_id)
if any((
roles_changed,
pending_roles_changed,
@ -703,7 +708,7 @@ class Node(NailgunObject):
:returns: None
"""
instance.cluster_id = cluster_id
db().flush()
cls.assign_group(instance)
network_manager = Cluster.get_network_manager(instance.cluster)
network_manager.assign_networks_by_default(instance)
@ -873,6 +878,11 @@ class Node(NailgunObject):
output['roles'][role] = ep
instance.network_template = output
db().flush()
if instance.cluster:
nm = Cluster.get_network_manager(instance.cluster)
nm.assign_networks_by_template(instance)
@classmethod
def get_unique_hostname(cls, node, cluster_id):
@ -887,6 +897,16 @@ class Node(NailgunObject):
hostname = 'node-{0}'.format(node.uuid)
return hostname
@classmethod
def get_nic_by_name(cls, instance, iface_name):
nic = db().query(models.NodeNICInterface).filter_by(
name=iface_name
).filter_by(
node_id=instance.id
).first()
return nic
class NodeCollection(NailgunCollection):
"""Node collection"""

View File

@ -1097,3 +1097,122 @@ class TestNovaNetworkManager70(TestNeutronManager70):
objects.Cluster.get_controllers_node_group(self.cluster),
mock.ANY, vip_type='public')
self.assertEqual(endpoint_ip, vip)
class TestTemplateManager70(BaseNetworkManagerTest):
def setUp(self):
super(TestTemplateManager70, self).setUp()
self.cluster = self.env.create(
release_kwargs={'version': '1111-7.0'},
cluster_kwargs={
'api': True,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
}
)
self.cluster = objects.Cluster.get_by_uid(self.cluster['id'])
self.nm = objects.Cluster.get_network_manager(self.cluster)
self.net_template = self.env.read_fixtures(['network_template'])[1]
self.env.create_nodes_w_interfaces_count(
1, 5,
roles=['controller'],
cluster_id=self.cluster['id']
)
self.env._create_network_group(name='mongo', vlan_start=None)
self.env._create_network_group(name='keystone', vlan_start=None)
self.env._create_network_group(name='murano', vlan_start=None)
objects.Cluster.set_network_template(
self.cluster,
self.net_template
)
def _check_nic_mapping(self, node, expected_mapping):
for nic in node.nic_interfaces + node.bond_interfaces:
assigned_nets = [net['name'] for net in nic.assigned_networks]
self.assertItemsEqual(assigned_nets, expected_mapping[nic.name])
def test_assign_networks_based_on_template(self):
expected_mapping = {
'eth0': ['fuelweb_admin'],
'eth1': ['public', 'storage'],
'eth2': ['murano'],
'eth3': [],
'eth4': ['mongo', 'keystone'],
'eth5': [],
'lnxbond0': ['management']
}
node = self.env.nodes[0]
self._check_nic_mapping(node, expected_mapping)
# Network groups should have their vlan updated to match what
# is defined in the template.
node_networks = self.nm.get_node_networks(node)
keystone_ng = self.nm.get_network_by_netname('keystone', node_networks)
self.assertEqual(keystone_ng['vlan'], 202)
def test_get_interfaces_from_template(self):
expected_interfaces = {
'br-aux': {
'interface_properties': {},
'name': 'eth3.103',
'offloading_modes': [],
'type': 'ether'
},
'br-ex': {
'interface_properties': {},
'name': 'eth1',
'offloading_modes': [],
'type': 'ether'
},
'br-fw-admin': {
'interface_properties': {},
'name': 'eth0',
'offloading_modes': [],
'type': 'ether'
},
'br-keystone': {
'interface_properties': {},
'name': 'eth4.202',
'offloading_modes': [],
'type': 'ether'
},
'br-mgmt': {
'bond_properties': {'mode': u'active-backup'},
'name': u'lnxbond0',
'offloading_modes': [],
'slaves': [{'name': u'eth3'}, {'name': u'eth4'}],
'type': 'bond'
},
'br-mongo': {
'interface_properties': {},
'name': u'eth4.201',
'offloading_modes': [],
'type': 'ether'
},
'br-storage': {
'interface_properties': {},
'name': 'eth1.102',
'offloading_modes': [],
'type': 'ether'
},
'eth2': {
'interface_properties': {},
'name': 'eth2',
'offloading_modes': [],
'type': 'ether'
},
'eth3.101': {
'interface_properties': {},
'name': u'eth3.101',
'offloading_modes': [],
'type': 'ether'
},
'eth4.101': {
'interface_properties': {},
'name': u'eth4.101',
'offloading_modes': [],
'type': 'ether'
}
}
interfaces = self.nm.get_interfaces_from_template(self.env.nodes[0])
self.assertItemsEqual(interfaces, expected_interfaces)

View File

@ -1002,29 +1002,34 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
AstuteGraph(cluster_db)).serialize(self.cluster, cluster_db.nodes)
def create_env(self, segment_type):
return self.env.create(
cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'api': False,
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': segment_type},
nodes_kwargs=[
{'roles': ['controller'],
'pending_addition': True,
'name': self.node_name},
{'roles': ['compute', 'cinder'],
'pending_addition': True,
'name': self.node_name}
])
)
nodes_kwargs = {
'roles': ['controller'],
'pending_addition': True,
'name': self.node_name,
'cluster_id': cluster['id']
}
self.env.create_nodes_w_interfaces_count(1, 4, **nodes_kwargs)
nodes_kwargs['roles'] = ['compute', 'cinder']
self.env.create_nodes_w_interfaces_count(1, 4, **nodes_kwargs)
def create_more_nodes(self):
self.env.create_node(
roles=['cinder'], cluster_id=self.cluster.id)
self.env.create_node(
return cluster
def create_more_nodes(self, iface_count=2):
self.env.create_nodes_w_interfaces_count(
1, iface_count, roles=['cinder'], cluster_id=self.cluster.id)
self.env.create_nodes_w_interfaces_count(
1, iface_count,
roles=['cinder', 'controller'], cluster_id=self.cluster.id)
self.env.create_node(
roles=['compute'], cluster_id=self.cluster.id)
self.env.create_nodes_w_interfaces_count(
1, iface_count, roles=['compute'], cluster_id=self.cluster.id)
def check_node_ips_on_certain_networks(self, node, net_names):
ips = db().query(models.IPAddr).filter_by(node=node.id)
@ -1045,7 +1050,7 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
self.assertIs(net_serializer, NeutronNetworkTemplateSerializer70)
def test_ip_assignment_according_to_template(self):
self.create_more_nodes()
self.create_more_nodes(iface_count=4)
# according to the template different node roles have different sets of
# networks
node_roles_vs_net_names = [
@ -1297,12 +1302,7 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
def test_multiple_node_roles_network_metadata(self):
nm = objects.Cluster.get_network_manager(self.env.clusters[0])
ip_by_net = {
'fuelweb_admin': None,
'storage': None,
'management': None,
'public': None
}
ip_by_net = {}
for node_data in self.serialized_for_astute:
self.assertItemsEqual(
node_data['network_metadata'], ['nodes', 'vips'])
@ -1315,7 +1315,9 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
)
node = objects.Node.get_by_uid(node_attrs['uid'])
networks = nm.get_node_networks(node)
for net in ip_by_net:
node_nets = [n['name'] for n in networks]
for net in node_nets:
netgroup = nm.get_network_by_netname(net, networks)
if netgroup.get('ip'):
ip_by_net[net] = netgroup['ip'].split('/')[0]

View File

@ -458,17 +458,20 @@ class TestNetworkVerificationWithTemplates(BaseIntegrationTest):
self.env.set_interfaces_in_meta(meta1, [
{"name": "eth0", "mac": "00:00:00:00:00:66"},
{"name": "eth1", "mac": "00:00:00:00:00:77"},
{"name": "eth2", "mac": "00:00:00:00:00:88"}]
{"name": "eth2", "mac": "00:00:00:00:00:88"},
{"name": "eth3", "mac": "00:00:00:00:00:99"}]
)
self.env.set_interfaces_in_meta(meta2, [
{"name": "eth0", "mac": "00:00:00:00:11:66"},
{"name": "eth1", "mac": "00:00:00:00:11:77"},
{"name": "eth2", "mac": "00:00:00:00:11:88"}]
{"name": "eth2", "mac": "00:00:00:00:11:88"},
{"name": "eth3", "mac": "00:00:00:00:11:99"}]
)
self.env.set_interfaces_in_meta(meta3, [
{"name": "eth0", "mac": "00:00:00:00:22:66"},
{"name": "eth1", "mac": "00:00:00:00:22:77"},
{"name": "eth2", "mac": "00:00:00:00:22:88"}]
{"name": "eth2", "mac": "00:00:00:00:22:88"},
{"name": "eth3", "mac": "00:00:00:00:22:99"}]
)
self.cluster = self.env.create(
release_kwargs={'version': '2015.1.0-7.0'},
@ -515,15 +518,15 @@ class TestNetworkVerificationWithTemplates(BaseIntegrationTest):
def expected_networks_on_undeployed_node(self):
compute_networks = [
{u'vlans': [0], u'iface': u'eth0'},
{u'vlans': [0], u'iface': u'eth1'},
{u'vlans': [104], u'iface': u'eth1'},
{u'vlans': [0], u'iface': u'eth2'},
{u'vlans': [0, 101] + self.private_vlan_ids, u'iface': u'eth3'},
{u'vlans': [101] + self.private_vlan_ids, u'iface': u'eth3'},
]
return [
[
{u'vlans': [0], u'iface': u'eth0'},
{u'vlans': [0], u'iface': u'eth1'},
{u'vlans': [104], u'iface': u'eth1'},
],
compute_networks,
compute_networks,
@ -533,16 +536,16 @@ class TestNetworkVerificationWithTemplates(BaseIntegrationTest):
def expected_networks_on_deployed_node(self):
compute_networks = [
{u'vlans': [0], u'iface': u'eth0'},
{u'vlans': [0], u'iface': u'eth1'},
{u'vlans': [104], u'iface': u'eth1'},
{u'vlans': [0], u'iface': u'eth2'},
{u'vlans': [0, 101] + self.private_vlan_ids,
{u'vlans': [101] + self.private_vlan_ids,
u'iface': u'eth3'},
]
return [
[
{u'vlans': [0], u'iface': u'eth0'},
{u'vlans': [0], u'iface': u'eth1'},
{u'vlans': [104], u'iface': u'eth1'},
],
compute_networks,
compute_networks,

View File

@ -17,7 +17,6 @@
from oslo_serialization import jsonutils
from nailgun.db import db
from nailgun.db.sqlalchemy.models import NodeGroup
from nailgun import consts
from nailgun.test import base
@ -60,18 +59,29 @@ class TestHandlers(BaseIntegrationTest):
self.assertEqual(template, resp.json_body)
def test_network_template_upload_on_multi_group_cluster(self):
cluster = self.env.create_cluster(api=False)
cluster = self.env.create_cluster(
api=False,
net_provider=consts.CLUSTER_NET_PROVIDERS.neutron
)
cluster.release.version = '1111-7.0'
custom_group_name = 'group-custom-1'
custom_group = NodeGroup(name=custom_group_name, cluster_id=cluster.id)
self.env.db.add(custom_group)
self.env.db.flush()
resp = self.env.create_node_group(
name=custom_group_name, cluster_id=cluster.id
)
custom_group = resp.json_body
node1 = self.env.create_node(cluster_id=cluster.id,
roles=["controller"])
node2 = self.env.create_node(cluster_id=cluster.id,
roles=["compute"],
group_id=custom_group.id)
node1 = self.env.create_nodes_w_interfaces_count(
1, 5,
cluster_id=cluster.id,
roles=["controller"],
)[0]
node2 = self.env.create_nodes_w_interfaces_count(
1, 5,
cluster_id=cluster.id,
roles=["compute"],
group_id=custom_group['id'],
)[0]
template = self.env.read_fixtures(['network_template'])[0]
template.pop('pk') # PK is not needed

View File

@ -20,6 +20,7 @@ from oslo_serialization import jsonutils
from nailgun.db.sqlalchemy.models import NodeBondInterface
from nailgun import consts
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
@ -282,7 +283,11 @@ class TestAssignmentHandlers(BaseIntegrationTest):
}
}
cluster = self.env.create_cluster(api=False)
cluster = self.env.create_cluster(
api=False,
net_provider=consts.CLUSTER_NET_PROVIDERS.neutron
)
cluster.release.version = '1111-7.0'
cluster.network_config.configuration_template = net_template
node = self.env.create_node()

View File

@ -269,6 +269,7 @@ class TestCheckBeforeDeploymentTask(BaseTestCase):
def setUp(self):
super(TestCheckBeforeDeploymentTask, self).setUp()
self.env.create(
release_kwargs={'version': '1111-7.0'},
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'gre'