Check if pool update is needed in reference driver
Commit6ed8f45fdf
had some negative impact on concurrent ip allocations. To make ipam driver aware of subnet updates (mostly for thirdparty drivers) ipam driver is always called with allocation pools even if pools are not changed. Current way of handling that event is deleting old pools and creating new pools. But on scale it may cause issues, because of this: - deleting allocation pools removes availability ranges by foreign key; - any ip allocation modifies availability range; These events concurently modify availability range records causing deadlocks. This fix prevents deleting and recreating pools and availability ranges in cases where allocation pools are not changed. So it eliminates negative impact on concurency added by always calling ipam driver on subnet update. This fix aims to provide backportable solution to be used with6ed8f45fdf
. Complete solution that eliminates concurrent modifications in availability range table is expected to be devivered with ticket #1543094, but it will not be backportable because of the scope of the change. Change-Id: I29e03a79c34b150a822697f7b556ed168a57c064 Related-Bug: #1534625 Closes-Bug: #1572474
This commit is contained in:
parent
213881e587
commit
310074b2d4
|
@ -370,11 +370,20 @@ class NeutronDbSubnet(ipam_base.Subnet):
|
|||
subnet_id=self.subnet_manager.neutron_id,
|
||||
ip_address=address)
|
||||
|
||||
def _no_pool_changes(self, session, pools):
|
||||
"""Check if pool updates in db are required."""
|
||||
db_pools = self.subnet_manager.list_pools(session)
|
||||
iprange_pools = [netaddr.IPRange(pool.first_ip, pool.last_ip)
|
||||
for pool in db_pools]
|
||||
return pools == iprange_pools
|
||||
|
||||
def update_allocation_pools(self, pools, cidr):
|
||||
# Pools have already been validated in the subnet request object which
|
||||
# was sent to the subnet pool driver. Further validation should not be
|
||||
# required.
|
||||
session = self._context.session
|
||||
if self._no_pool_changes(session, pools):
|
||||
return
|
||||
self.subnet_manager.delete_allocation_pools(session)
|
||||
self.create_allocation_pools(self.subnet_manager, session, pools, cidr)
|
||||
self._pools = pools
|
||||
|
|
|
@ -21,6 +21,7 @@ from neutron_lib import exceptions as n_exc
|
|||
from neutron.common import constants as n_const
|
||||
from neutron import context
|
||||
from neutron.db import api as ndb_api
|
||||
from neutron.ipam.drivers.neutrondb_ipam import db_models
|
||||
from neutron.ipam.drivers.neutrondb_ipam import driver
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
from neutron.ipam import requests as ipam_req
|
||||
|
@ -460,3 +461,43 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
ipam_subnet._allocate_specific_ip(self.ctx.session, ip)
|
||||
|
||||
self.assertRaises(ipam_exc.IPAllocationFailed, go)
|
||||
|
||||
def test_update_allocation_pools_with_no_pool_change(self):
|
||||
cidr = '10.0.0.0/24'
|
||||
ipam_subnet = self._create_and_allocate_ipam_subnet(
|
||||
cidr)[0]
|
||||
ipam_subnet.subnet_manager.delete_allocation_pools = mock.Mock()
|
||||
ipam_subnet.create_allocation_pools = mock.Mock()
|
||||
alloc_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')]
|
||||
# Make sure allocation pools recreation does not happen in case of
|
||||
# unchanged allocation pools
|
||||
ipam_subnet.update_allocation_pools(alloc_pools, cidr)
|
||||
self.assertFalse(
|
||||
ipam_subnet.subnet_manager.delete_allocation_pools.called)
|
||||
self.assertFalse(ipam_subnet.create_allocation_pools.called)
|
||||
|
||||
def _test__no_pool_changes(self, new_pools):
|
||||
id = 'some-id'
|
||||
ipam_subnet = driver.NeutronDbSubnet(id, self.ctx)
|
||||
pools = [db_models.IpamAllocationPool(ipam_subnet_id=id,
|
||||
first_ip='192.168.10.20',
|
||||
last_ip='192.168.10.41'),
|
||||
db_models.IpamAllocationPool(ipam_subnet_id=id,
|
||||
first_ip='192.168.10.50',
|
||||
last_ip='192.168.10.60')]
|
||||
|
||||
ipam_subnet.subnet_manager.list_pools = mock.Mock(return_value=pools)
|
||||
return ipam_subnet._no_pool_changes(self.ctx.session, new_pools)
|
||||
|
||||
def test__no_pool_changes_negative(self):
|
||||
pool_list = [[netaddr.IPRange('192.168.10.2', '192.168.10.254')],
|
||||
[netaddr.IPRange('192.168.10.20', '192.168.10.41')],
|
||||
[netaddr.IPRange('192.168.10.20', '192.168.10.41'),
|
||||
netaddr.IPRange('192.168.10.51', '192.168.10.60')]]
|
||||
for pools in pool_list:
|
||||
self.assertFalse(self._test__no_pool_changes(pools))
|
||||
|
||||
def test__no_pool_changes_positive(self):
|
||||
pools = [netaddr.IPRange('192.168.10.20', '192.168.10.41'),
|
||||
netaddr.IPRange('192.168.10.50', '192.168.10.60')]
|
||||
self.assertTrue(self._test__no_pool_changes(pools))
|
||||
|
|
Loading…
Reference in New Issue