ML2 Nexus VXLAN TunnelTypeDriver needs upgrade

At some point, TunnelTypeDriver class was replaced by
ML2TunnelTypeDriver.  This initially caused CI errors but a patch
came in which made CI failure go away.  However, when performing
a devstack stack with VXLAN config, another failure occurred during
start-up.  By upgrading to the ML2TunnelTypeDriver, the failure
has gone away and VXLAN works as expected.
Changes were also made to be backward compatible up to
branch newton.

Change-Id: I0419a64a0a223b165bcc8c376e6f1c209be64846
Closes-bug:  #1664339
This commit is contained in:
Carol Bouchard 2017-02-14 09:49:30 -05:00
parent d40f97c0df
commit 13533392ce
5 changed files with 65 additions and 29 deletions

View File

@ -16,6 +16,7 @@ from types import ModuleType
from distutils.version import StrictVersion
from neutron.plugins.ml2.drivers import type_tunnel
from neutron import version
@ -47,7 +48,9 @@ else:
setattr(constants, 'ATTR_NOT_SPECIFIED', getattr(attributes,
'ATTR_NOT_SPECIFIED'))
if NEUTRON_VERSION >= NEUTRON_OCATA_VERSION:
from neutron import context
from neutron_lib.api import extensions
from neutron_lib.db import model_base
from neutron_lib.plugins import directory
@ -55,8 +58,19 @@ if NEUTRON_VERSION >= NEUTRON_OCATA_VERSION:
get_plugin = directory.get_plugin
n_c_attr_names = dir(n_c)
HasProject = model_base.HasProject
VXLAN_TUNNEL_TYPE = type_tunnel.ML2TunnelTypeDriver
def get_context():
return context.Context()
def get_db_ref(context):
return context
def get_tunnel_session(context):
return context.session
else:
from neutron.api import extensions # noqa
from neutron.db import api as db_api
from neutron.db import model_base # noqa
from neutron.db import models_v2
from neutron import manager
@ -70,6 +84,17 @@ else:
HasProject = models_v2.HasTenant
setattr(constants, 'L3', getattr(svc_constants, 'L3_ROUTER_NAT'))
VXLAN_TUNNEL_TYPE = type_tunnel.TunnelTypeDriver
def get_context():
return None
def get_db_ref(context):
return db_api.get_session()
def get_tunnel_session(context):
return context
core_opts = base_config.core_opts
#extensions = extensions

View File

@ -30,7 +30,6 @@ from oslo_utils import excutils
from networking_cisco import backwards_compatibility as bc
from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_const
@ -292,6 +291,7 @@ class CiscoNexusMechanismDriver(api.MechanismDriver):
self.timer = None
self.monitor_timeout = conf.cfg.CONF.ml2_cisco.switch_heartbeat_time
self.monitor_lock = threading.Lock()
self.context = bc.get_context()
LOG.info(_LI("CiscoNexusMechanismDriver: initialize() called "
"pid %(pid)d thid %(tid)d"), {'pid': self._ppid,
'tid': threading.current_thread().ident})
@ -1842,8 +1842,9 @@ class CiscoNexusMechanismDriver(api.MechanismDriver):
# Retrieve the dynamically allocated segment.
# Database has provider_segment dictionary key.
network_id = context.current['network_id']
db_ref = bc.get_db_ref(self.context)
dynamic_segment = segments_db.get_dynamic_segment(
db_api.get_session(), network_id, physnet)
db_ref, network_id, physnet)
# Have other drivers bind the VLAN dynamic segment.
if dynamic_segment:

View File

@ -28,11 +28,11 @@ from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_models_v2)
from networking_cisco._i18n import _, _LE, _LI, _LW
from networking_cisco import backwards_compatibility as bc
from neutron.db import api as db_api
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
from neutron_lib import exceptions as exc
LOG = log.getLogger(__name__)
@ -54,12 +54,13 @@ nexus_vxlan_opts = [
cfg.CONF.register_opts(nexus_vxlan_opts, "ml2_type_nexus_vxlan")
class NexusVxlanTypeDriver(type_tunnel.TunnelTypeDriver):
class NexusVxlanTypeDriver(bc.VXLAN_TUNNEL_TYPE):
def __init__(self):
super(NexusVxlanTypeDriver, self).__init__(
nexus_models_v2.NexusVxlanAllocation)
def _get_mcast_group_for_vni(self, session, vni):
def _get_mcast_group_for_vni(self, context, vni):
session = bc.get_tunnel_session(context)
mcast_grp = (session.query(nexus_models_v2.NexusMcastGroup).
filter_by(associated_vni=vni).first())
if not mcast_grp:
@ -155,12 +156,13 @@ class NexusVxlanTypeDriver(type_tunnel.TunnelTypeDriver):
session.flush()
return mcast_for_vni
def allocate_tenant_segment(self, session):
alloc = self.allocate_partially_specified_segment(session)
def allocate_tenant_segment(self, context):
alloc = self.allocate_partially_specified_segment(context)
if not alloc:
return
vni = alloc.vxlan_vni
mcast_group = self._get_mcast_group_for_vni(session, vni)
mcast_group = self._get_mcast_group_for_vni(
context, vni)
return {api.NETWORK_TYPE: const.TYPE_NEXUS_VXLAN,
api.PHYSICAL_NETWORK: mcast_group,
api.SEGMENTATION_ID: alloc.vxlan_vni}
@ -208,35 +210,37 @@ class NexusVxlanTypeDriver(type_tunnel.TunnelTypeDriver):
session.execute(nexus_models_v2.NexusVxlanAllocation.
__table__.insert(), bulk)
def reserve_provider_segment(self, session, segment):
def reserve_provider_segment(self, context, segment):
if self.is_partial_segment(segment):
alloc = self.allocate_partially_specified_segment(session)
alloc = self.allocate_partially_specified_segment(context)
if not alloc:
raise exc.NoNetworkAvailable
else:
segmentation_id = segment.get(api.SEGMENTATION_ID)
alloc = self.allocate_fully_specified_segment(
session, vxlan_vni=segmentation_id)
context, vxlan_vni=segmentation_id)
if not alloc:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
return {api.NETWORK_TYPE: p_const.TYPE_VXLAN,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: alloc.vxlan_vni}
def release_segment(self, session, segment):
def release_segment(self, context, segment):
vxlan_vni = segment[api.SEGMENTATION_ID]
inside = any(lo <= vxlan_vni <= hi for lo, hi in self.tunnel_ranges)
session = bc.get_tunnel_session(context)
with session.begin(subtransactions=True):
query = (session.query(nexus_models_v2.NexusVxlanAllocation).
query = (session.query(
nexus_models_v2.NexusVxlanAllocation).
filter_by(vxlan_vni=vxlan_vni))
if inside:
count = query.update({"allocated": False})
if count:
mcast_row = (
session.query(nexus_models_v2.NexusMcastGroup)
.filter_by(associated_vni=vxlan_vni).first())
session.query(nexus_models_v2.NexusMcastGroup)
.filter_by(associated_vni=vxlan_vni).first())
session.delete(mcast_row)
LOG.debug("Releasing vxlan tunnel %s to pool",
vxlan_vni)

View File

@ -368,6 +368,7 @@ class TestCiscoNexusBase(testlib_api.SqlTestCase):
'physnet')] = PHYSNET
mech_instance.driver.nexus_switches = (
mech_instance._nexus_switches)
mech_instance.context = bc.get_context()
mock.patch.object(mech_cisco_nexus.CiscoNexusMechanismDriver,
'__init__', new=new_nexus_init).start()

View File

@ -13,11 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
constants as const)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import type_nexus_vxlan
import neutron.db.api as db
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
@ -39,7 +39,7 @@ class NexusVxlanTypeTest(testlib_api.SqlTestCase):
self.driver.conf_mcast_ranges = MCAST_GROUP_RANGES
self.driver.tunnel_ranges = VNI_RANGES
self.driver.sync_allocations()
self.session = db.get_session()
self.context = bc.get_db_ref(bc.get_context())
def vni_in_range(self, vni):
# SegmentTypeDriver.allocate_partially_specified_segment allocates
@ -47,7 +47,7 @@ class NexusVxlanTypeTest(testlib_api.SqlTestCase):
return any(lower <= vni <= upper for (lower, upper) in VNI_RANGES)
def test_allocate_tenant_segment(self):
segment = self.driver.allocate_tenant_segment(self.session)
segment = self.driver.allocate_tenant_segment(self.context)
self.assertEqual(segment[api.NETWORK_TYPE], const.TYPE_NEXUS_VXLAN)
self.assertEqual(segment[api.PHYSICAL_NETWORK], '224.0.0.1')
self.assertTrue(self.vni_in_range(segment[api.SEGMENTATION_ID]))
@ -55,7 +55,8 @@ class NexusVxlanTypeTest(testlib_api.SqlTestCase):
def test_allocate_shared_mcast_group(self):
segments = []
for i in range(0, 6):
segments.append(self.driver.allocate_tenant_segment(self.session))
segments.append(self.driver.allocate_tenant_segment(
self.context))
self.assertEqual(segments[0][api.NETWORK_TYPE],
const.TYPE_NEXUS_VXLAN)
self.assertEqual(segments[0][api.PHYSICAL_NETWORK], '224.0.0.1')
@ -70,11 +71,13 @@ class NexusVxlanTypeTest(testlib_api.SqlTestCase):
segment = {api.NETWORK_TYPE: const.TYPE_NEXUS_VXLAN,
api.PHYSICAL_NETWORK: '224.0.0.1',
api.SEGMENTATION_ID: '5000'}
result = self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
result[api.SEGMENTATION_ID])
mcast_group = self.driver._get_mcast_group_for_vni(self.session,
alloc.vxlan_vni)
result = self.driver.reserve_provider_segment(self.context, segment)
alloc = self.driver.get_allocation(
self.context,
result[api.SEGMENTATION_ID])
mcast_group = self.driver._get_mcast_group_for_vni(
self.context,
alloc.vxlan_vni)
self.assertTrue(alloc.allocated)
self.assertEqual(alloc.vxlan_vni, 5000)
self.assertEqual(mcast_group, '224.0.0.1')
@ -83,11 +86,13 @@ class NexusVxlanTypeTest(testlib_api.SqlTestCase):
segment = {api.NETWORK_TYPE: const.TYPE_NEXUS_VXLAN,
api.PHYSICAL_NETWORK: '224.0.0.1'}
result = self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
result[api.SEGMENTATION_ID])
mcast_group = self.driver._get_mcast_group_for_vni(self.session,
alloc.vxlan_vni)
result = self.driver.reserve_provider_segment(self.context, segment)
alloc = self.driver.get_allocation(
self.context,
result[api.SEGMENTATION_ID])
mcast_group = self.driver._get_mcast_group_for_vni(
self.context,
alloc.vxlan_vni)
self.assertTrue(alloc.allocated)
self.assertTrue(self.vni_in_range(alloc.vxlan_vni))
self.assertEqual(mcast_group, '224.0.0.1')