Always call ipam driver on subnet update
Backport contains two commits: -6ed8f45fdf
had some negative impact on concurrency; -310074b2d4
fixes that negative impact; COMMIT 1: Previously ipam driver was not called on subnet update if allocation_pools are not in request. Changing it to call ipam driver each time subnet update is requested. If subnet_update is called without allocation_pools, then old allocation pools are passed to ipam driver. Contains a bit of refactoring to make that happen: - validate_allocation_pools is already called during update subnet workflow in update_subnet method, so just removing it; - reworked update_db_subnet workflow; previous workflow was: call driver allocation -> make local allocation -> rollback driver allocation in except block if local allocation failed. new workflow: make local allocation -> call driver allocation. By changing order of execution we eliminating need of rollback in this method, since failure in local allocation is rolled back by database transaction rollback. - make a copy of incoming subnet dict; _update_subnet_allocation_pools from ipam_backend_mixin removes 'allocation_pools' from subnet_dict, so create an unchanged copy to pass it to ipam driver COMMIT 2: Check if pool update is needed in reference driver Commit6ed8f45fdf
had some negative impact on concurrent ip allocations. To make ipam driver aware of subnet updates (mostly for thirdparty drivers) ipam driver is always called with allocation pools even if pools are not changed. Current way of handling that event is deleting old pools and creating new pools. But on scale it may cause issues, because of this: - deleting allocation pools removes availability ranges by foreign key; - any ip allocation modifies availability range; These events concurently modify availability range records causing deadlocks. This fix prevents deleting and recreating pools and availability ranges in cases where allocation pools are not changed. So it eliminates negative impact on concurency added by always calling ipam driver on subnet update. This fix aims to provide backportable solution to be used with6ed8f45fdf
. Complete solution that eliminates concurrent modifications in availability range table is expected to be devivered with ticket #1543094, but it will not be backportable because of the scope of the change. Change-Id: Ie4bd85d2ff2ab39bf803c5ef6c6ead151dbb74d7 Closes-Bug: #1534625 Closes-Bug: #1572474 (cherry picked from commit6ed8f45fdf
) (cherry picked from commit 7a586844ca4454c72876ec3084e4477ce521ece7)
This commit is contained in:
parent
6d1037cc30
commit
bf631b226d
|
@ -13,6 +13,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
import netaddr
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_log import log as logging
|
||||
|
@ -137,9 +139,6 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
return allocated
|
||||
|
||||
def _ipam_update_allocation_pools(self, context, ipam_driver, subnet):
|
||||
self.validate_allocation_pools(subnet['allocation_pools'],
|
||||
subnet['cidr'])
|
||||
|
||||
factory = ipam_driver.get_subnet_request_factory()
|
||||
subnet_request = factory.get_request(context, subnet, None)
|
||||
|
||||
|
@ -358,22 +357,22 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
port['fixed_ips'])
|
||||
|
||||
def update_db_subnet(self, context, id, s, old_pools):
|
||||
# 'allocation_pools' is removed from 's' in
|
||||
# _update_subnet_allocation_pools (ipam_backend_mixin),
|
||||
# so create unchanged copy for ipam driver
|
||||
subnet_copy = copy.deepcopy(s)
|
||||
subnet, changes = super(IpamPluggableBackend, self).update_db_subnet(
|
||||
context, id, s, old_pools)
|
||||
ipam_driver = driver.Pool.get_instance(None, context)
|
||||
if "allocation_pools" in s:
|
||||
self._ipam_update_allocation_pools(context, ipam_driver, s)
|
||||
|
||||
try:
|
||||
subnet, changes = super(IpamPluggableBackend,
|
||||
self).update_db_subnet(context, id,
|
||||
s, old_pools)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if "allocation_pools" in s and old_pools:
|
||||
LOG.error(
|
||||
_LE("An exception occurred during subnet update. "
|
||||
"Reverting allocation pool changes"))
|
||||
s['allocation_pools'] = old_pools
|
||||
self._ipam_update_allocation_pools(context, ipam_driver, s)
|
||||
# Set old allocation pools if no new pools are provided by user.
|
||||
# Passing old pools allows to call ipam driver on each subnet update
|
||||
# even if allocation pools are not changed. So custom ipam drivers
|
||||
# are able to track other fields changes on subnet update.
|
||||
if 'allocation_pools' not in subnet_copy:
|
||||
subnet_copy['allocation_pools'] = old_pools
|
||||
self._ipam_update_allocation_pools(context, ipam_driver, subnet_copy)
|
||||
|
||||
return subnet, changes
|
||||
|
||||
def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet):
|
||||
|
|
|
@ -370,11 +370,20 @@ class NeutronDbSubnet(ipam_base.Subnet):
|
|||
subnet_id=self.subnet_manager.neutron_id,
|
||||
ip_address=address)
|
||||
|
||||
def _no_pool_changes(self, session, pools):
|
||||
"""Check if pool updates in db are required."""
|
||||
db_pools = self.subnet_manager.list_pools(session)
|
||||
iprange_pools = [netaddr.IPRange(pool.first_ip, pool.last_ip)
|
||||
for pool in db_pools]
|
||||
return pools == iprange_pools
|
||||
|
||||
def update_allocation_pools(self, pools, cidr):
|
||||
# Pools have already been validated in the subnet request object which
|
||||
# was sent to the subnet pool driver. Further validation should not be
|
||||
# required.
|
||||
session = self._context.session
|
||||
if self._no_pool_changes(session, pools):
|
||||
return
|
||||
self.subnet_manager.delete_allocation_pools(session)
|
||||
self.create_allocation_pools(self.subnet_manager, session, pools, cidr)
|
||||
self._pools = pools
|
||||
|
|
|
@ -62,9 +62,11 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
|
|||
self.tenant_id = uuidutils.generate_uuid()
|
||||
self.subnet_id = uuidutils.generate_uuid()
|
||||
|
||||
def _prepare_mocks(self, address_factory=None):
|
||||
def _prepare_mocks(self, address_factory=None, subnet_factory=None):
|
||||
if address_factory is None:
|
||||
address_factory = ipam_req.AddressRequestFactory
|
||||
if subnet_factory is None:
|
||||
subnet_factory = ipam_req.SubnetRequestFactory
|
||||
|
||||
mocks = {
|
||||
'driver': mock.Mock(),
|
||||
|
@ -79,7 +81,7 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
|
|||
mocks['driver'].get_subnet.return_value = mocks['subnet']
|
||||
mocks['driver'].allocate_subnet.return_value = mocks['subnet']
|
||||
mocks['driver'].get_subnet_request_factory.return_value = (
|
||||
ipam_req.SubnetRequestFactory)
|
||||
subnet_factory)
|
||||
mocks['driver'].get_address_request_factory.return_value = (
|
||||
address_factory)
|
||||
mocks['subnet'].get_details.return_value = mocks['subnet_request']
|
||||
|
@ -90,8 +92,10 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
|
|||
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
|
||||
return mocks
|
||||
|
||||
def _prepare_mocks_with_pool_mock(self, pool_mock, address_factory=None):
|
||||
mocks = self._prepare_mocks(address_factory=address_factory)
|
||||
def _prepare_mocks_with_pool_mock(self, pool_mock, address_factory=None,
|
||||
subnet_factory=None):
|
||||
mocks = self._prepare_mocks(address_factory=address_factory,
|
||||
subnet_factory=subnet_factory)
|
||||
pool_mock.get_instance.return_value = mocks['driver']
|
||||
return mocks
|
||||
|
||||
|
@ -582,3 +586,52 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
|
|||
ip_dict)
|
||||
# Verify incoming port dict is not changed ('id' is not added to it)
|
||||
self.assertIsNone(port_dict['port'].get('id'))
|
||||
|
||||
def _test_update_db_subnet(self, pool_mock, subnet, expected_subnet,
|
||||
old_pools):
|
||||
subnet_factory = mock.Mock()
|
||||
context = mock.Mock()
|
||||
|
||||
mocks = self._prepare_mocks_with_pool_mock(
|
||||
pool_mock, subnet_factory=subnet_factory)
|
||||
|
||||
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
|
||||
mocks['ipam'].update_db_subnet(context, id, subnet, old_pools)
|
||||
|
||||
mocks['driver'].get_subnet_request_factory.assert_called_once_with()
|
||||
subnet_factory.get_request.assert_called_once_with(context,
|
||||
expected_subnet,
|
||||
None)
|
||||
|
||||
@mock.patch('neutron.ipam.driver.Pool')
|
||||
def test_update_db_subnet_unchanged_pools(self, pool_mock):
|
||||
old_pools = [netaddr.IPRange('192.1.1.2', '192.1.1.254')]
|
||||
subnet = {'id': uuidutils.generate_uuid(),
|
||||
'network_id': uuidutils.generate_uuid(),
|
||||
'cidr': '192.1.1.0/24',
|
||||
'ipv6_address_mode': None,
|
||||
'ipv6_ra_mode': None}
|
||||
subnet_with_pools = subnet.copy()
|
||||
subnet_with_pools['allocation_pools'] = old_pools
|
||||
# if subnet has no allocation pools set, then old pools has to
|
||||
# be added to subnet dict passed to request factory
|
||||
self._test_update_db_subnet(pool_mock, subnet, subnet_with_pools,
|
||||
old_pools)
|
||||
|
||||
@mock.patch('neutron.ipam.driver.Pool')
|
||||
def test_update_db_subnet_new_pools(self, pool_mock):
|
||||
old_pools = [netaddr.IPRange('192.1.1.2', '192.1.1.254')]
|
||||
subnet = {'id': uuidutils.generate_uuid(),
|
||||
'network_id': uuidutils.generate_uuid(),
|
||||
'cidr': '192.1.1.0/24',
|
||||
'allocation_pools': [
|
||||
netaddr.IPRange('192.1.1.10', '192.1.1.254')],
|
||||
'ipv6_address_mode': None,
|
||||
'ipv6_ra_mode': None}
|
||||
# make a copy of subnet for validation, since update_subnet changes
|
||||
# incoming subnet dict
|
||||
expected_subnet = subnet.copy()
|
||||
# validate that subnet passed to request factory is the same as
|
||||
# incoming one, i.e. new pools in it are not overwritten by old pools
|
||||
self._test_update_db_subnet(pool_mock, subnet, expected_subnet,
|
||||
old_pools)
|
||||
|
|
|
@ -21,6 +21,7 @@ from neutron.common import constants
|
|||
from neutron.common import exceptions as n_exc
|
||||
from neutron import context
|
||||
from neutron.db import api as ndb_api
|
||||
from neutron.ipam.drivers.neutrondb_ipam import db_models
|
||||
from neutron.ipam.drivers.neutrondb_ipam import driver
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
from neutron.ipam import requests as ipam_req
|
||||
|
@ -460,3 +461,43 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
ipam_subnet._allocate_specific_ip(self.ctx.session, ip)
|
||||
|
||||
self.assertRaises(ipam_exc.IPAllocationFailed, go)
|
||||
|
||||
def test_update_allocation_pools_with_no_pool_change(self):
|
||||
cidr = '10.0.0.0/24'
|
||||
ipam_subnet = self._create_and_allocate_ipam_subnet(
|
||||
cidr)[0]
|
||||
ipam_subnet.subnet_manager.delete_allocation_pools = mock.Mock()
|
||||
ipam_subnet.create_allocation_pools = mock.Mock()
|
||||
alloc_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')]
|
||||
# Make sure allocation pools recreation does not happen in case of
|
||||
# unchanged allocation pools
|
||||
ipam_subnet.update_allocation_pools(alloc_pools, cidr)
|
||||
self.assertFalse(
|
||||
ipam_subnet.subnet_manager.delete_allocation_pools.called)
|
||||
self.assertFalse(ipam_subnet.create_allocation_pools.called)
|
||||
|
||||
def _test__no_pool_changes(self, new_pools):
|
||||
id = 'some-id'
|
||||
ipam_subnet = driver.NeutronDbSubnet(id, self.ctx)
|
||||
pools = [db_models.IpamAllocationPool(ipam_subnet_id=id,
|
||||
first_ip='192.168.10.20',
|
||||
last_ip='192.168.10.41'),
|
||||
db_models.IpamAllocationPool(ipam_subnet_id=id,
|
||||
first_ip='192.168.10.50',
|
||||
last_ip='192.168.10.60')]
|
||||
|
||||
ipam_subnet.subnet_manager.list_pools = mock.Mock(return_value=pools)
|
||||
return ipam_subnet._no_pool_changes(self.ctx.session, new_pools)
|
||||
|
||||
def test__no_pool_changes_negative(self):
|
||||
pool_list = [[netaddr.IPRange('192.168.10.2', '192.168.10.254')],
|
||||
[netaddr.IPRange('192.168.10.20', '192.168.10.41')],
|
||||
[netaddr.IPRange('192.168.10.20', '192.168.10.41'),
|
||||
netaddr.IPRange('192.168.10.51', '192.168.10.60')]]
|
||||
for pools in pool_list:
|
||||
self.assertFalse(self._test__no_pool_changes(pools))
|
||||
|
||||
def test__no_pool_changes_positive(self):
|
||||
pools = [netaddr.IPRange('192.168.10.20', '192.168.10.41'),
|
||||
netaddr.IPRange('192.168.10.50', '192.168.10.60')]
|
||||
self.assertTrue(self._test__no_pool_changes(pools))
|
||||
|
|
Loading…
Reference in New Issue