Freezing DB table definitions for data migrations

A few data migrations were added in the following module:
gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/data_migrations.py

which reference tables defined in the corresponding DB models. However,
this opens up the possibility that those definitions might change via
migration scripts that get added after the data migration scripts in which
they are referenced. This leads to the situation that when the data
migration is run, sqlalchemy loads the newer table definition, but the DB
itself does not have the changes (defined in the schema migration that has not
yet run).

To get around this, instead of referencing the tables from the DB models,
the table defintion must be added to the data migration script itself and
referenced. This freezes the table definition that is expected by the data
migration script and ensures that the data migration script will work.

This patch updates the aforementioned data migration script to add the relevant
table definitions (based on the commit at the time those migrations were added).

It also eliminates the use of DB mixin methods to retrieve resources (which
can change over time and might include new attributes not available to this
migration).

A sanity UT is added for the Security Groups migration which did not have
UT coverage. The other data migrations are already being tested in UTs, but
to ensure that the older models are loaded (and the newer model defintions are not
referenced) some more UTs can be added as follow up.

Going forward, each data migration script should be contained in the alembic
migration script along with the table definitions that it uses. For reference
see how its done here:
625de54de3/neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py

Change-Id: If7b74160a36b9946a7ab8de8c4c19fed2a87d850
This commit is contained in:
Sumit Naiksatam 2018-02-20 14:55:13 -08:00
parent 70731da6f2
commit 505f6737a9
2 changed files with 118 additions and 13 deletions

View File

@ -13,6 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
# Note: This module should be treated as legacy and should not be extended to
# add any new data migrations. New data migrations should be added
# directly to the alembic migration script along with the table definitions
# that are being referenced.
# For reference see how its done here:
# https://github.com/openstack/neutron/blob/
# 625de54de3936b0da8760c3da76d2d315d05f94e/neutron/db/migration/
# alembic_migrations/versions/newton/contract/
# 3b935b28e7a0_migrate_to_pluggable_ipam.py
import netaddr
from aim.aim_lib import nat_strategy
@ -32,8 +42,47 @@ from sqlalchemy.orm import lazyload
from gbpservice.neutron.extensions import cisco_apic as ext
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import apic_mapper
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import db
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import extension_db
# The following definitions have been taken from commit:
# 9b4b7276ad8a0f181c9be12ba5a0192432aa5027
# and is frozen for the data migration script that was included
# in this module. It should not be changed in this module.
NetworkExtensionDb = sa.Table(
'apic_aim_network_extensions', sa.MetaData(),
sa.Column('network_id', sa.String(36), nullable=False),
sa.Column('external_network_dn', sa.String(1024)),
sa.Column('nat_type', sa.Enum('distributed', 'edge', '')))
NetworkExtensionCidrDb = sa.Table(
'apic_aim_network_external_cidrs', sa.MetaData(),
sa.Column('network_id', sa.String(36), nullable=False),
sa.Column('cidr', sa.String(64), nullable=False))
AddressScopeMapping = sa.Table(
'apic_aim_address_scope_mappings', sa.MetaData(),
sa.Column('scope_id', sa.String(36)),
sa.Column('vrf_name', sa.String(64)),
sa.Column('vrf_tenant_name', sa.String(64)),
sa.Column('vrf_owned', sa.Boolean, nullable=False))
# The following definition has been taken from commit:
# f8b41855acbbb7e59a0bab439445c198fc6aa146
# and is frozen for the data migration script that was included
# in this module. It should not be changed in this module.
NetworkMapping = sa.Table(
'apic_aim_network_mappings', sa.MetaData(),
sa.Column('network_id', sa.String(36), nullable=False),
sa.Column('bd_name', sa.String(64), nullable=True),
sa.Column('bd_tenant_name', sa.String(64), nullable=True),
sa.Column('epg_name', sa.String(64), nullable=True),
sa.Column('epg_tenant_name', sa.String(64), nullable=True),
sa.Column('epg_app_profile_name', sa.String(64), nullable=True),
sa.Column('vrf_name', sa.String(64), nullable=True),
sa.Column('vrf_tenant_name', sa.String(64), nullable=True))
class DefunctAddressScopeExtensionDb(model_base.BASEV2):
@ -49,11 +98,26 @@ class DefunctAddressScopeExtensionDb(model_base.BASEV2):
vrf_dn = sa.Column(sa.String(1024))
def _add_address_scope_mapping(session, scope_id, vrf, vrf_owned=True):
session.execute(AddressScopeMapping.insert().values(
scope_id=scope_id, vrf_name=vrf.name, vrf_tenant_name=vrf.tenant_name,
vrf_owned=vrf_owned))
def _add_network_mapping(session, network_id, bd, epg, vrf, ext_net=None):
if not ext_net:
session.execute(NetworkMapping.insert().values(
network_id=network_id, bd_name=bd.name,
bd_tenant_name=bd.tenant_name, epg_name=epg.name,
epg_app_profile_name=epg.app_profile_name,
epg_tenant_name=epg.tenant_name, vrf_name=vrf.name,
vrf_tenant_name=vrf.tenant_name))
def do_apic_aim_persist_migration(session):
alembic_util.msg(
"Starting data migration for apic_aim mechanism driver persistence.")
db_mixin = db.DbMixin()
aim = aim_manager.AimManager()
aim_ctx = aim_context.AimContext(session)
mapper = apic_mapper.APICNameMapper()
@ -83,7 +147,7 @@ def do_apic_aim_persist_migration(session):
vrf = vrfs[0]
vrf_owned = True
if vrf:
db_mixin._add_address_scope_mapping(
_add_address_scope_mapping(
session, scope_db.id, vrf, vrf_owned)
else:
alembic_util.warn(
@ -97,7 +161,7 @@ def do_apic_aim_persist_migration(session):
bd = None
epg = None
vrf = None
ext_db = (session.query(extension_db.NetworkExtensionDb).
ext_db = (session.query(NetworkExtensionDb).
filter_by(network_id=net_db.id).
one_or_none())
if ext_db and ext_db.external_network_dn:
@ -149,8 +213,7 @@ def do_apic_aim_persist_migration(session):
if vrfs:
vrf = vrfs[0]
if bd and epg and vrf:
db_mixin._add_network_mapping(
session, net_db.id, bd, epg, vrf)
_add_network_mapping(session, net_db.id, bd, epg, vrf)
elif not net_db.external:
alembic_util.warn(
"AIM BD, EPG or VRF not found for network: %s" % net_db)
@ -159,6 +222,24 @@ def do_apic_aim_persist_migration(session):
"Finished data migration for apic_aim mechanism driver persistence.")
def _get_network_extn_db(session, network_id):
netres = (session.query(NetworkExtensionDb).filter_by(
network_id=network_id).first())
if netres:
_, ext_net_dn, nat_type = netres
db_cidrs = (session.query(NetworkExtensionCidrDb).filter_by(
network_id=network_id).all())
result = {}
if ext_net_dn is not None:
result[ext.EXTERNAL_NETWORK] = ext_net_dn
if nat_type is not None:
result[ext.NAT_TYPE] = nat_type
if result.get(ext.EXTERNAL_NETWORK):
result[ext.EXTERNAL_CIDRS] = [c for _, c in db_cidrs]
return result
def do_ap_name_change(session, conf=None):
alembic_util.msg("Starting data migration for apic_aim ap name change.")
cfg = conf or CONF
@ -166,12 +247,10 @@ def do_ap_name_change(session, conf=None):
aim_ctx = aim_context.AimContext(session)
system_id = cfg.apic_system_id
alembic_util.msg("APIC System ID: %s" % system_id)
ext_mixin = extension_db.ExtensionDbMixin()
db_mixin = db.DbMixin()
with session.begin(subtransactions=True):
net_dbs = session.query(models_v2.Network).options(lazyload('*')).all()
for net_db in net_dbs:
ext_db = ext_mixin.get_network_extn_db(session, net_db.id)
ext_db = _get_network_extn_db(session, net_db.id)
if ext_db and ext_db.get(ext.EXTERNAL_NETWORK):
alembic_util.msg("Migrating external network: %s" % net_db)
# Its a managed external network.
@ -203,8 +282,8 @@ def do_ap_name_change(session, conf=None):
l3out.name,
extc.name)] = extc
vrfs = ns.read_vrfs(aim_ctx, ext_net)
session.query(db.NetworkMapping).filter(
db.NetworkMapping.network_id == net_db.id).delete()
session.execute(NetworkMapping.delete().where(
NetworkMapping.c.network_id == net_db.id))
for vrf in vrfs:
ns.disconnect_vrf(aim_ctx, ext_net, vrf)
ns.delete_external_network(aim_ctx, ext_net)
@ -226,7 +305,7 @@ def do_ap_name_change(session, conf=None):
epg = resource
elif isinstance(resource, aim_resource.VRF):
vrf = resource
db_mixin._add_network_mapping(session, net_db.id, bd, epg, vrf)
_add_network_mapping(session, net_db.id, bd, epg, vrf)
eid = (ext_net.tenant_name, ext_net.l3out_name, ext_net.name)
for vrf in vrfs:
if eid in clone_ext_nets:

View File

@ -3801,6 +3801,32 @@ class TestMigrations(ApicAimTestCase, db.DbMixin):
self.assertIsNone(aim.get(aim_ctx, r),
'Resource: %s still in AIM' % r)
def test_security_group_migration_sanity(self):
aim_ctx = aim_context.AimContext(self.db_session)
self._make_network(self.fmt, 'net1', True)
sgs = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroup)
for sg in sgs:
self.aim_mgr.delete(aim_ctx, sg)
sgs = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroup)
self.assertEqual([], sgs)
sgsubs = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroupSubject)
for sgsub in sgsubs:
self.aim_mgr.delete(aim_ctx, sgsub)
sgsubs = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroupSubject)
self.assertEqual([], sgsubs)
sgrules = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroupRule)
for sgrule in sgrules:
self.aim_mgr.delete(aim_ctx, sgrule)
sgrules = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroupRule)
self.assertEqual([], sgrules)
data_migrations.do_apic_aim_security_group_migration(self.db_session)
sgs = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroup)
self.assertIsNotNone(sgs)
sgsubs = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroupSubject)
self.assertIsNotNone(sgsubs)
sgrules = self.aim_mgr.find(aim_ctx, aim_resource.SecurityGroupRule)
self.assertIsNotNone(sgrules)
class TestPortBinding(ApicAimTestCase):
def test_bind_opflex_agent(self):