Merge "Add DB mappings with NSX logical switches"

This commit is contained in:
Jenkins 2014-02-12 23:32:48 +00:00 committed by Gerrit Code Review
commit 4d63a13681
12 changed files with 319 additions and 95 deletions

View File

@ -0,0 +1,59 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_switch_mappings
Revision ID: 3d3cb89d84ee
Revises: 1421183d533f
Create Date: 2014-01-07 15:37:41.323020
"""
# revision identifiers, used by Alembic.
revision = '3d3cb89d84ee'
down_revision = '1421183d533f'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
# Create table for network mappings
op.create_table(
'neutron_nsx_network_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['neutron_id'], ['networks.id'],
ondelete='CASCADE'),
# There might be multiple switches for a neutron network
sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'),
)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('neutron_nsx_network_mappings')

View File

@ -22,6 +22,7 @@
import logging
import os
import uuid
from oslo.config import cfg
from sqlalchemy import exc as sql_exc
@ -394,9 +395,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
allow_extra_lswitches = True
break
try:
return self._handle_lswitch_selection(self.cluster, network,
network_bindings, max_ports,
allow_extra_lswitches)
return self._handle_lswitch_selection(
context, self.cluster, network, network_bindings,
max_ports, allow_extra_lswitches)
except NvpApiClient.NvpApiException:
err_desc = _("An exception occurred while selecting logical "
"switch for the port")
@ -823,12 +824,12 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
pnet.SEGMENTATION_ID: binding.vlan_id}
for binding in bindings]
def _handle_lswitch_selection(self, cluster, network,
def _handle_lswitch_selection(self, context, cluster, network,
network_bindings, max_ports,
allow_extra_lswitches):
lswitches = nvplib.get_lswitches(cluster, network.id)
lswitches = nsx_utils.fetch_nsx_switches(
context.session, cluster, network.id)
try:
# TODO(salvatore-orlando) find main_ls too!
return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0)
@ -837,23 +838,35 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
LOG.debug(_("No switch has available ports (%d checked)"),
len(lswitches))
if allow_extra_lswitches:
main_ls = [ls for ls in lswitches if ls['uuid'] == network.id]
tag_dict = dict((x['scope'], x['tag']) for x in main_ls[0]['tags'])
if 'multi_lswitch' not in tag_dict:
tags = main_ls[0]['tags']
# The 'main' logical switch is either the only one available
# or the one where the 'multi_lswitch' tag was set
while lswitches:
main_ls = lswitches.pop(0)
tag_dict = dict((x['scope'], x['tag'])
for x in main_ls['tags'])
if 'multi_lswitch' in tag_dict:
break
else:
# by construction this statement is hit if there is only one
# logical switch and the multi_lswitch tag has not been set.
# The tag must therefore be added.
tags = main_ls['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
nvplib.update_lswitch(cluster,
main_ls[0]['uuid'],
main_ls[0]['display_name'],
main_ls['uuid'],
main_ls['display_name'],
network['tenant_id'],
tags=tags)
transport_zone_config = self._convert_to_nvp_transport_zones(
cluster, network, bindings=network_bindings)
selected_lswitch = nvplib.create_lswitch(
cluster, network.tenant_id,
cluster, network.id, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
transport_zone_config,
network.id)
transport_zone_config)
# add a mapping between the neutron network and the newly
# created logical switch
nicira_db.add_neutron_nsx_network_mapping(
context.session, network.id, selected_lswitch['uuid'])
return selected_lswitch
else:
LOG.error(_("Maximum number of logical ports reached for "
@ -952,19 +965,21 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
transport_zone_config = self._convert_to_nvp_transport_zones(
self.cluster, net_data)
external = net_data.get(ext_net_extn.EXTERNAL)
# NOTE(salv-orlando): Pre-generating uuid for Neutron
# network. This will be removed once the network create operation
# becomes an asynchronous task
net_data['id'] = str(uuid.uuid4())
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
lswitch = nvplib.create_lswitch(
self.cluster, tenant_id, net_data.get('name'),
self.cluster, net_data['id'],
tenant_id, net_data.get('name'),
transport_zone_config,
shared=net_data.get(attr.SHARED))
net_data['id'] = lswitch['uuid']
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
# Ensure there's an id in net_data
net_data['id'] = new_net['id']
# Process port security extension
self._process_network_port_security_create(
context, net_data, new_net)
@ -977,7 +992,12 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self.get_qos_queue(context, net_queue_id)
self._process_network_queue_mapping(
context, new_net, net_queue_id)
# Add mapping between neutron network and NSX switch
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
nicira_db.add_neutron_nsx_network_mapping(
context.session, new_net['id'],
lswitch['uuid'])
if (net_data.get(mpnet.SEGMENTS) and
isinstance(provider_type, bool)):
net_bindings = []
@ -1007,7 +1027,11 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
for port in router_iface_ports:
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
# Before removing entry from Neutron DB, retrieve NSX switch
# identifiers for removing them from backend
if not external:
lswitch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id)
super(NvpPluginV2, self).delete_network(context, id)
# clean up network owned ports
for port in router_iface_ports:
@ -1036,8 +1060,6 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Do not go to NVP for external networks
if not external:
try:
lswitch_ids = [ls['uuid'] for ls in
nvplib.get_lswitches(self.cluster, id)]
nvplib.delete_networks(self.cluster, id, lswitch_ids)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)

View File

@ -1604,16 +1604,12 @@ class VcnsCallbacks(object):
def _process_base_create_lswitch_args(*args, **kwargs):
tags = [{"tag": nvplib.NEUTRON_VERSION, "scope": "quantum"}]
if args[1]:
tags.append({"tag": args[1], "scope": "os_tid"})
switch_name = args[2]
tz_config = args[3]
if "neutron_net_id" in kwargs or len(args) >= 5:
neutron_net_id = kwargs.get('neutron_net_id')
if neutron_net_id is None:
neutron_net_id = args[4]
tags.append({"tag": neutron_net_id,
"scope": "quantum_net_id"})
tags.append({"tag": args[1],
"scope": "quantum_net_id"})
if args[2]:
tags.append({"tag": args[2], "scope": "os_tid"})
switch_name = args[3]
tz_config = args[4]
if kwargs.get("shared", False) or len(args) >= 6:
tags.append({"tag": "true", "scope": "shared"})
if kwargs.get("tags"):

View File

@ -25,6 +25,54 @@ from neutron.plugins.nicira import nvplib
LOG = log.getLogger(__name__)
def fetch_nsx_switches(session, cluster, neutron_net_id):
"""Retrieve logical switches for a neutron network.
This function is optimized for fetching all the lswitches always
with a single NSX query.
If there is more than 1 logical switch (chained switches use case)
NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX
lswitch is directly retrieved by id (more efficient).
"""
nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id)
if len(nsx_switch_ids) > 1:
lswitches = nvplib.get_lswitches(cluster, neutron_net_id)
else:
lswitches = [nvplib.get_lswitch_by_id(
cluster, nsx_switch_ids[0])]
return lswitches
def get_nsx_switch_ids(session, cluster, neutron_network_id):
"""Return the NSX switch id for a given neutron network.
First lookup for mappings in Neutron database. If no mapping is
found, query the NSX backend and add the mappings.
"""
nsx_switch_ids = nicira_db.get_nsx_switch_ids(
session, neutron_network_id)
if not nsx_switch_ids:
# Find logical switches from backend.
# This is a rather expensive query, but it won't be executed
# more than once for each network in Neutron's lifetime
nsx_switches = nvplib.get_lswitches(cluster, neutron_network_id)
if not nsx_switches:
LOG.warn(_("Unable to find NSX switches for Neutron network %s"),
neutron_network_id)
return
nsx_switch_ids = []
with session.begin(subtransactions=True):
for nsx_switch in nsx_switches:
nsx_switch_id = nsx_switch['uuid']
nsx_switch_ids.append(nsx_switch_id)
# Create DB mapping
nicira_db.add_neutron_nsx_network_mapping(
session,
neutron_network_id,
nsx_switch_id)
return nsx_switch_ids
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
"""Return the NSX switch and port uuids for a given neutron port.

View File

@ -249,8 +249,9 @@ class NvpSynchronizer():
if not lswitches:
# Try to get logical switches from nvp
try:
lswitches = nvplib.get_lswitches(
self._cluster, neutron_network_data['id'])
lswitches = nsx_utils.fetch_nsx_switches(
context.session, self._cluster,
neutron_network_data['id'])
except exceptions.NetworkNotFound:
# TODO(salv-orlando): We should be catching
# NvpApiClient.ResourceNotFound here

View File

@ -48,6 +48,14 @@ def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
return binding
def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id):
with session.begin(subtransactions=True):
mapping = nicira_models.NeutronNsxNetworkMapping(
neutron_id=neutron_id, nsx_id=nsx_switch_id)
session.add(mapping)
return mapping
def add_neutron_nsx_port_mapping(session, neutron_id,
nsx_switch_id, nsx_port_id):
session.begin(subtransactions=True)
@ -74,6 +82,14 @@ def add_neutron_nsx_port_mapping(session, neutron_id,
return mapping
def get_nsx_switch_ids(session, neutron_id):
# This function returns a list of NSX switch identifiers because of
# the possibility of chained logical switches
return [mapping['nsx_id'] for mapping in
session.query(nicira_models.NeutronNsxNetworkMapping).filter_by(
neutron_id=neutron_id)]
def get_nsx_switch_and_port_id(session, neutron_id):
try:
mapping = (session.query(nicira_models.NeutronNsxPortMapping).

View File

@ -57,6 +57,19 @@ class NvpNetworkBinding(model_base.BASEV2):
self.vlan_id)
class NeutronNsxNetworkMapping(model_base.BASEV2):
"""Maps neutron network identifiers to NSX identifiers.
Because of chained logical switches more than one mapping might exist
for a single Neutron network.
"""
__tablename__ = 'neutron_nsx_network_mappings'
neutron_id = Column(String(36),
ForeignKey('networks.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = Column(String(36), primary_key=True)
class NeutronNsxPortMapping(model_base.BASEV2):
"""Represents the mapping between neutron and nvp port uuids."""

View File

@ -202,7 +202,29 @@ def get_all_query_pages(path, c):
# -------------------------------------------------------------------
# Network functions
# -------------------------------------------------------------------
def get_lswitch_by_id(cluster, lswitch_id):
try:
lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE, lswitch_id,
relations="LogicalSwitchStatus")
return do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando): this should not raise a neutron exception
raise exception.NetworkNotFound(net_id=lswitch_id)
def get_lswitches(cluster, neutron_net_id):
def lookup_switches_by_tag():
# Fetch extra logical switches
lswitch_query_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
return get_all_query_pages(lswitch_query_path, cluster)
lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
relations="LogicalSwitchStatus")
results = []
@ -212,33 +234,30 @@ def get_lswitches(cluster, neutron_net_id):
for tag in ls['tags']:
if (tag['scope'] == "multi_lswitch" and
tag['tag'] == "True"):
# Fetch extra logical switches
extra_lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
extra_switches = get_all_query_pages(extra_lswitch_uri_path,
cluster)
results.extend(extra_switches)
return results
results.extend(lookup_switches_by_tag())
except exception.NotFound:
# This is legit if the neutron network was created using
# a post-Havana version of the plugin
results.extend(lookup_switches_by_tag())
if results:
return results
else:
raise exception.NetworkNotFound(net_id=neutron_net_id)
def create_lswitch(cluster, tenant_id, display_name,
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
transport_zones_config,
neutron_net_id=None,
shared=None,
**kwargs):
# The tag scope adopts a slightly different naming convention for
# historical reasons
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"transport_zones": transport_zones_config,
"tags": [{"tag": tenant_id, "scope": "os_tid"},
{"tag": neutron_net_id, "scope": "quantum_net_id"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]}
if neutron_net_id:
lswitch_obj["tags"].append({"tag": neutron_net_id,
"scope": "quantum_net_id"})
# TODO(salv-orlando): Now that we have async status synchronization
# this tag is perhaps not needed anymore
if shared:
lswitch_obj["tags"].append({"tag": "true",
"scope": "shared"})

View File

@ -259,12 +259,14 @@ class TestProxyCreateLswitch(base.BaseTestCase):
]
self.tags = [
{'scope': 'quantum', 'tag': nvplib.NEUTRON_VERSION},
{'scope': 'quantum_net_id', 'tag': 'foo_id'},
{'scope': 'os_tid', 'tag': self.tenant_id}
]
self.cluster = None
def test_create_lswitch_with_basic_args(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config)
@ -272,26 +274,9 @@ class TestProxyCreateLswitch(base.BaseTestCase):
self.assertEqual(self.tz_config, result[1])
self.assertEqual(self.tags, result[2])
def test_create_lswitch_with_neutron_net_id_as_kwarg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
self.tenant_id,
self.display_name,
self.tz_config,
neutron_net_id='foo')
expected = self.tags + [{'scope': 'quantum_net_id', 'tag': 'foo'}]
self.assertEqual(expected, result[2])
def test_create_lswitch_with_neutron_net_id_as_arg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
self.tenant_id,
self.display_name,
self.tz_config,
'foo')
expected = self.tags + [{'scope': 'quantum_net_id', 'tag': 'foo'}]
self.assertEqual(expected, result[2])
def test_create_lswitch_with_shared_as_kwarg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
@ -301,19 +286,19 @@ class TestProxyCreateLswitch(base.BaseTestCase):
def test_create_lswitch_with_shared_as_arg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
'foo',
True)
additional_tags = [{'scope': 'quantum_net_id', 'tag': 'foo'},
{'scope': 'shared', 'tag': 'true'}]
additional_tags = [{'scope': 'shared', 'tag': 'true'}]
expected = self.tags + additional_tags
self.assertEqual(expected, result[2])
def test_create_lswitch_with_additional_tags(self):
more_tags = [{'scope': 'foo_scope', 'tag': 'foo_tag'}]
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,

View File

@ -39,6 +39,17 @@ class NsxUtilsTestCase(base.BaseTestCase):
module_name='dbexts.nicira_db')).start()
self.addCleanup(mock.patch.stopall)
def _mock_network_mapping_db_calls(self, ret_value):
# Mock relevant db calls
# This will allow for avoiding setting up the plugin
# for creating db entries
mock.patch(nicira_method('get_nsx_switch_ids',
module_name='dbexts.nicira_db'),
return_value=ret_value).start()
mock.patch(nicira_method('add_neutron_nsx_network_mapping',
module_name='dbexts.nicira_db')).start()
self.addCleanup(mock.patch.stopall)
def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid):
# The nvplib and db calls are mocked, therefore the cluster
# and the neutron_port_id parameters can be set to None
@ -47,6 +58,16 @@ class NsxUtilsTestCase(base.BaseTestCase):
self.assertEqual(exp_ls_uuid, ls_uuid)
self.assertEqual(exp_lp_uuid, lp_uuid)
def _verify_get_nsx_switch_ids(self, exp_ls_uuids):
# The nvplib and db calls are mocked, therefore the cluster
# and the neutron_router_id parameters can be set to None
ls_uuids = nsx_utils.get_nsx_switch_ids(
db_api.get_session(), None, None)
for ls_uuid in ls_uuids or []:
self.assertIn(ls_uuid, exp_ls_uuids)
exp_ls_uuids.remove(ls_uuid)
self.assertFalse(exp_ls_uuids)
def test_get_nsx_switch_and_port_id_from_db_mappings(self):
# This test is representative of the 'standard' case in which both the
# switch and the port mappings were stored in the neutron db
@ -94,3 +115,28 @@ class NsxUtilsTestCase(base.BaseTestCase):
with mock.patch(nicira_method('query_lswitch_lports'),
return_value=[]):
self._verify_get_nsx_switch_and_port_id(None, None)
def test_get_nsx_switch_ids_from_db_mappings(self):
# This test is representative of the 'standard' case in which the
# lswitch mappings were stored in the neutron db
exp_ls_uuids = [uuidutils.generate_uuid()]
self._mock_network_mapping_db_calls(exp_ls_uuids)
self._verify_get_nsx_switch_ids(exp_ls_uuids)
def test_get_nsx_switch_ids_no_db_mapping(self):
# This test is representative of the case where db mappings where not
# found for a given network identifier
exp_ls_uuids = [uuidutils.generate_uuid()]
self._mock_network_mapping_db_calls(None)
with mock.patch(nicira_method('get_lswitches'),
return_value=[{'uuid': uuid}
for uuid in exp_ls_uuids]):
self._verify_get_nsx_switch_ids(exp_ls_uuids)
def test_get_nsx_switch_ids_no_mapping_returns_None(self):
# This test verifies that the function returns None if the mappings
# are not found both in the db and in the backend
self._mock_network_mapping_db_calls(None)
with mock.patch(nicira_method('get_lswitches'),
return_value=[]):
self._verify_get_nsx_switch_ids(None)

View File

@ -379,7 +379,9 @@ class NvpSyncTestCase(base.BaseTestCase):
def _test_sync(self, exp_net_status,
exp_port_status, exp_router_status,
action_callback=None, sp=None):
neutron_net_id = ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
neutron_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
neutron_port_id = self._get_tag_dict(
self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id']
@ -540,7 +542,9 @@ class NvpSyncTestCase(base.BaseTestCase):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a network down to verify synchronization
q_net_id = ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
q_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
q_net_data = self._plugin._get_network(ctx, q_net_id)
self._plugin._synchronizer.synchronize_network(ctx, q_net_data)
@ -558,7 +562,9 @@ class NvpSyncTestCase(base.BaseTestCase):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a network down to verify punctual synchronization
q_net_id = ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
q_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
q_net_data = self._plugin.get_network(ctx, q_net_id)
self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status'])

View File

@ -262,7 +262,7 @@ class TestNvplibL2Gateway(NvplibTestCase):
node_uuid = _uuid()
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
lswitch = nvplib.create_lswitch(self.fake_cluster, _uuid(), tenant_id,
'fake-switch', transport_zones_config)
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = nvplib.create_lport(self.fake_cluster,
@ -294,6 +294,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id,
'fake-switch',
transport_zones_config)
@ -309,6 +310,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id,
_uuid(),
'*' * 50,
transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster,
@ -321,28 +323,36 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
network_id = _uuid()
main_lswitch = nvplib.create_lswitch(
self.fake_cluster, tenant_id, 'fake-switch',
transport_zones_config,
self.fake_cluster, network_id,
tenant_id, 'fake-switch', transport_zones_config,
tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
# Create secondary lswitch
nvplib.create_lswitch(
self.fake_cluster, tenant_id, 'fake-switch-2',
transport_zones_config,
neutron_net_id=main_lswitch['uuid'])
second_lswitch = nvplib.create_lswitch(
self.fake_cluster, network_id,
tenant_id, 'fake-switch-2', transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster,
main_lswitch['uuid'])
network_id)
self.assertEqual(len(res_lswitch), 2)
self.assertEqual(res_lswitch[0]['uuid'],
main_lswitch['uuid'])
switch_1_tags = self._build_tag_dict(res_lswitch[0]['tags'])
switch_2_tags = self._build_tag_dict(res_lswitch[1]['tags'])
self.assertIn('multi_lswitch', switch_1_tags)
self.assertNotIn('multi_lswitch', switch_2_tags)
self.assertNotIn('quantum_net_id', switch_1_tags)
self.assertIn('quantum_net_id', switch_2_tags)
self.assertEqual(switch_2_tags['quantum_net_id'],
main_lswitch['uuid'])
switch_uuids = [ls['uuid'] for ls in res_lswitch]
self.assertIn(main_lswitch['uuid'], switch_uuids)
self.assertIn(second_lswitch['uuid'], switch_uuids)
for ls in res_lswitch:
if ls['uuid'] == main_lswitch['uuid']:
main_ls = ls
else:
second_ls = ls
main_ls_tags = self._build_tag_dict(main_ls['tags'])
second_ls_tags = self._build_tag_dict(second_ls['tags'])
self.assertIn('multi_lswitch', main_ls_tags)
self.assertNotIn('multi_lswitch', second_ls_tags)
self.assertIn('quantum_net_id', main_ls_tags)
self.assertIn('quantum_net_id', second_ls_tags)
self.assertEqual(main_ls_tags['quantum_net_id'],
network_id)
self.assertEqual(second_ls_tags['quantum_net_id'],
network_id)
def test_update_lswitch(self):
new_name = 'new-name'
@ -350,6 +360,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
@ -373,6 +384,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
@ -933,6 +945,7 @@ class TestNvplibLogicalRouters(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, 'fake-switch',
transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
@ -1310,7 +1323,7 @@ class TestNvplibLogicalPorts(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id, 'fake-switch',
_uuid(), tenant_id, 'fake-switch',
transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, neutron_port_id,
@ -1349,7 +1362,7 @@ class TestNvplibLogicalPorts(NvplibTestCase):
neutron_port_id = 'whatever'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id, _uuid(),
'fake-switch', transport_zones_config)
lport = nvplib.get_port_by_neutron_tag(self.fake_cluster,
lswitch['uuid'],