Switch to 'subquery' for 1-M relationships

This switches to the use of subqueries for 1-m relationships
which will result in a higher constant query factor but will
eliminate the potential for cross-product explosions.

Closes-Bug: #1649317
Change-Id: I6952c48236153a8e2f2f155375b70573ddc2cf0f
This commit is contained in:
Kevin Benton 2017-01-09 05:02:42 -08:00
parent df7de345fc
commit 3ffe006743
13 changed files with 43 additions and 32 deletions

View File

@ -42,5 +42,5 @@ class ExtraDhcpOpt(model_base.BASEV2, model_base.HasId):
# eagerly load extra_dhcp_opts bindings
ports = orm.relationship(
models_v2.Port,
backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete'))
backref=orm.backref("dhcp_opts", lazy='subquery', cascade='delete'))
revises_on_change = ('ports', )

View File

@ -27,5 +27,5 @@ class AllowedAddressPair(model_base.BASEV2):
port = orm.relationship(
models_v2.Port,
backref=orm.backref("allowed_address_pairs",
lazy="joined", cascade="delete"))
lazy="subquery", cascade="delete"))
revises_on_change = ('port', )

View File

@ -60,7 +60,7 @@ class Router(standard_attr.HasStandardAttributes, model_base.BASEV2,
backref='router',
lazy='dynamic')
l3_agents = orm.relationship(
'Agent', lazy='joined', viewonly=True,
'Agent', lazy='subquery', viewonly=True,
secondary=rb_model.RouterL3AgentBinding.__table__)
api_collections = [l3.ROUTERS]
@ -113,6 +113,6 @@ class RouterRoute(model_base.BASEV2, models_v2.Route):
router = orm.relationship(Router,
backref=orm.backref("route_list",
lazy='joined',
lazy='subquery',
cascade='delete'))
revises_on_change = ('router', )

View File

@ -36,10 +36,11 @@ class MeteringLabel(model_base.BASEV2,
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE))
description = sa.Column(sa.String(db_const.LONG_DESCRIPTION_FIELD_SIZE))
rules = orm.relationship(MeteringLabelRule, backref="label",
cascade="delete", lazy="joined")
cascade="delete", lazy="subquery")
routers = orm.relationship(
l3_models.Router,
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
foreign_keys='MeteringLabel.tenant_id',
lazy='subquery',
uselist=True)
shared = sa.Column(sa.Boolean, default=False, server_default=sql.false())

View File

@ -86,7 +86,7 @@ class SecurityGroupRule(standard_attr.HasStandardAttributes, model_base.BASEV2,
remote_ip_prefix = sa.Column(sa.String(255))
security_group = orm.relationship(
SecurityGroup,
backref=orm.backref('rules', cascade='all,delete', lazy='joined'),
backref=orm.backref('rules', cascade='all,delete', lazy='subquery'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id")
source_group = orm.relationship(
SecurityGroup,

View File

@ -49,7 +49,7 @@ class NetworkSegment(standard_attr.HasStandardAttributes,
nullable=True)
network = orm.relationship(models_v2.Network,
backref=orm.backref("segments",
lazy='joined',
lazy='subquery',
cascade='delete'))
api_collections = [segment.SEGMENTS]
@ -71,6 +71,6 @@ class SegmentHostMapping(model_base.BASEV2):
# SQLAlchemy to eagerly load this association
network_segment = orm.relationship(
NetworkSegment, backref=orm.backref("segment_host_mapping",
lazy='joined',
lazy='subquery',
cascade='delete'))
revises_on_change = ('network_segment', )

View File

@ -33,7 +33,7 @@ class SubnetServiceType(model_base.BASEV2):
length=db_const.DEVICE_OWNER_FIELD_SIZE))
subnet = orm.relationship(models_v2.Subnet,
backref=orm.backref('service_types',
lazy='joined',
lazy='subquery',
cascade='all, delete-orphan',
uselist=True))
__table_args__ = (

View File

@ -27,5 +27,5 @@ class Tag(model_base.BASEV2):
tag = sa.Column(sa.String(60), nullable=False, primary_key=True)
standard_attr = orm.relationship(
'StandardAttribute',
backref=orm.backref('tags', lazy='joined', viewonly=True))
backref=orm.backref('tags', lazy='subquery', viewonly=True))
revises_on_change = ('standard_attr', )

View File

@ -78,7 +78,7 @@ class Port(standard_attr.HasStandardAttributes, model_base.BASEV2,
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE))
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
fixed_ips = orm.relationship(IPAllocation, backref='port', lazy='joined',
fixed_ips = orm.relationship(IPAllocation, backref='port', lazy='subquery',
cascade='all, delete-orphan',
order_by=(IPAllocation.ip_address,
IPAllocation.subnet_id))
@ -163,18 +163,18 @@ class Subnet(standard_attr.HasStandardAttributes, model_base.BASEV2,
revises_on_change = ('networks', )
allocation_pools = orm.relationship(IPAllocationPool,
backref='subnet',
lazy="joined",
lazy="subquery",
cascade='delete')
enable_dhcp = sa.Column(sa.Boolean())
dns_nameservers = orm.relationship(DNSNameServer,
backref='subnet',
cascade='all, delete, delete-orphan',
order_by=DNSNameServer.order,
lazy='joined')
lazy='subquery')
routes = orm.relationship(SubnetRoute,
backref='subnet',
cascade='all, delete, delete-orphan',
lazy='joined')
lazy='subquery')
ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC,
constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS,
@ -227,7 +227,7 @@ class SubnetPool(standard_attr.HasStandardAttributes, model_base.BASEV2,
prefixes = orm.relationship(SubnetPoolPrefix,
backref='subnetpools',
cascade='all, delete, delete-orphan',
lazy='joined')
lazy='subquery')
api_collections = [attr.SUBNETPOOLS]
@ -248,6 +248,6 @@ class Network(standard_attr.HasStandardAttributes, model_base.BASEV2,
cascade='all, delete, delete-orphan')
availability_zone_hints = sa.Column(sa.String(255))
dhcp_agents = orm.relationship(
'Agent', lazy='joined', viewonly=True,
'Agent', lazy='subquery', viewonly=True,
secondary=ndab_model.NetworkDhcpAgentBinding.__table__)
api_collections = [attr.NETWORKS]

View File

@ -28,7 +28,7 @@ class QosPolicy(standard_attr.HasStandardAttributes, model_base.BASEV2,
__tablename__ = 'qos_policies'
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE))
rbac_entries = sa.orm.relationship(rbac_db_models.QosPolicyRBAC,
backref='qos_policy', lazy='joined',
backref='qos_policy', lazy='subquery',
cascade='all, delete, delete-orphan')
api_collections = ['policies']

View File

@ -85,7 +85,8 @@ class PortBindingLevel(model_base.BASEV2):
# eagerly load port bindings
port = orm.relationship(
models_v2.Port,
backref=orm.backref("binding_levels", lazy='joined', cascade='delete'))
backref=orm.backref("binding_levels", lazy='subquery',
cascade='delete'))
class DistributedPortBinding(model_base.BASEV2):
@ -119,5 +120,5 @@ class DistributedPortBinding(model_base.BASEV2):
port = orm.relationship(
models_v2.Port,
backref=orm.backref("distributed_port_binding",
lazy='joined',
lazy='subquery',
cascade='delete'))

View File

@ -43,7 +43,7 @@ class Trunk(standard_attr.HasStandardAttributes, model_base.BASEV2,
cascade='delete'))
sub_ports = sa.orm.relationship(
'SubPort', lazy='joined', uselist=True, cascade="all, delete-orphan")
'SubPort', lazy='subquery', uselist=True, cascade="all, delete-orphan")
api_collections = ['trunks']

View File

@ -60,7 +60,6 @@ class L3HATestFramework(testlib_api.SqlTestCase):
def setUp(self):
super(L3HATestFramework, self).setUp()
self.admin_ctx = context.get_admin_context()
self.setup_coreplugin('ml2')
self.core_plugin = directory.get_plugin()
notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
@ -75,6 +74,12 @@ class L3HATestFramework(testlib_api.SqlTestCase):
self.agent2 = helpers.register_l3_agent(
'host_2', constants.L3_AGENT_MODE_DVR_SNAT)
@property
def admin_ctx(self):
# Property generates a new session on each reference so different
# API calls don't share a session with possible stale objects
return context.get_admin_context()
def _create_router(self, ha=True, tenant_id='tenant1', distributed=None,
ctx=None, admin_state_up=True):
if ctx is None:
@ -536,7 +541,7 @@ class L3HATestCase(L3HATestFramework):
router['tenant_id'])
network2 = self.plugin.get_ha_network(self.admin_ctx,
router2['tenant_id'])
self.assertEqual(network, network2)
self.assertEqual(network.network_id, network2.network_id)
self._migrate_router(router['id'], False)
self.assertIsNotNone(
@ -554,9 +559,10 @@ class L3HATestCase(L3HATestFramework):
self.assertNotEqual(ha0, ha1)
def test_add_ha_port_subtransactions_blocked(self):
with self.admin_ctx.session.begin():
ctx = self.admin_ctx
with ctx.session.begin():
self.assertRaises(RuntimeError, self.plugin.add_ha_port,
self.admin_ctx, 'id', 'id', 'id')
ctx, 'id', 'id', 'id')
def test_add_ha_port_binding_failure_rolls_back_port(self):
router = self._create_router()
@ -724,12 +730,13 @@ class L3HATestCase(L3HATestFramework):
router[n_const.HA_ROUTER_STATE_KEY])
def test_sync_ha_router_info_ha_interface_port_concurrently_deleted(self):
ctx = self.admin_ctx
router1 = self._create_router()
router2 = self._create_router()
# retrieve all router ha port bindings
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router1['id'], router2['id']])
ctx, [router1['id'], router2['id']])
self.assertEqual(4, len(bindings))
routers = self.plugin.get_ha_sync_data_for_host(
@ -737,7 +744,7 @@ class L3HATestCase(L3HATestFramework):
self.assertEqual(2, len(routers))
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router1['id'], router2['id']],
ctx, [router1['id'], router2['id']],
self.agent1['host'])
self.assertEqual(2, len(bindings))
@ -748,7 +755,7 @@ class L3HATestCase(L3HATestFramework):
self.plugin, "get_ha_router_port_bindings",
return_value=[bindings[0], fake_binding]):
routers = self.plugin.get_ha_sync_data_for_host(
self.admin_ctx, self.agent1['host'], self.agent1)
ctx, self.agent1['host'], self.agent1)
self.assertEqual(1, len(routers))
self.assertIsNotNone(routers[0].get(constants.HA_INTERFACE_KEY))
@ -779,12 +786,13 @@ class L3HATestCase(L3HATestFramework):
def test_set_router_states_handles_concurrently_deleted_router(self):
router1 = self._create_router()
router2 = self._create_router()
ctx = self.admin_ctx
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router1['id'], router2['id']])
ctx, [router1['id'], router2['id']])
self.plugin.delete_router(self.admin_ctx, router1['id'])
self.plugin._set_router_states(
self.admin_ctx, bindings, {router1['id']: 'active',
router2['id']: 'active'})
ctx, bindings, {router1['id']: 'active',
router2['id']: 'active'})
routers = self.plugin.get_ha_sync_data_for_host(
self.admin_ctx, self.agent1['host'], self.agent1)
self.assertEqual('active', routers[0][n_const.HA_ROUTER_STATE_KEY])
@ -1033,10 +1041,11 @@ class L3HAModeDbTestCase(L3HATestFramework):
self.plugin.add_router_interface(self.admin_ctx,
router['id'],
interface_info)
ctx = self.admin_ctx
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, router_ids=[router['id']],
ctx, router_ids=[router['id']],
host=self.agent2['host'])
self.plugin._set_router_states(self.admin_ctx, bindings,
self.plugin._set_router_states(ctx, bindings,
{router['id']: 'active'})
callback = l3_rpc.L3RpcCallback()
callback._l3plugin = self.plugin