Ensure DVR is restablished on member on cascade deletion

Traffic to member, if they have FIPs gets centralized when they
are part of a loadbalancer. However, when the loadbalancer gets
deleted, the traffic should be distributed again (if DVR was
enabled). To do that this patch also considers the cascade deletion

Closes-Bug: #2025637
Change-Id: Ie4b44c9f15fc9e33a68f9aacd766590b974c63fd
This commit is contained in:
Luis Tomas Bolivar 2023-06-23 18:22:46 +02:00
parent ede9b19309
commit 20997b185f
4 changed files with 37 additions and 4 deletions

View File

@ -422,6 +422,19 @@ class OvnProviderDriver(driver_base.ProviderDriver):
'info': request_info}
request_list.append(request)
# NOTE(mjozefcz): If LB has FIP on VIP
# and member had FIP we can decentralize
# the traffic now.
request_info = {'id': member_id,
'address': member_ip,
'pool_id': pool_id,
'action': ovn_const.REQ_INFO_MEMBER_DELETED}
if len(member_info) == 4:
request_info['subnet_id'] = subnet_id
request = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR,
'info': request_info}
request_list.append(request)
for request in request_list:
self._ovn_helper.add_request(request)

View File

@ -1219,6 +1219,25 @@ class OvnProviderHelper():
if value and len(value.split(',')) > 0:
for mem_info in value.split(','):
member_subnets.append(mem_info.split('_')[3])
member_id = mem_info.split("_")[1]
member_ip = mem_info.split('_')[2].split(":")[0]
member_port = mem_info.split('_')[2].split(":")[1]
member_subnet = mem_info.split("_")[3]
member = {
'id': member_id,
'address': member_ip,
'protocol_port': member_port,
'pool_id': pool_id,
'subnet_id': member_subnet}
self.member_delete(member)
member_info = {
'id': member_id,
'address': member_ip,
'pool_id': pool_id,
'subnet_id': member_subnet,
'action': ovn_const.REQ_INFO_MEMBER_DELETED}
self.handle_member_dvr(member_info)
status[constants.MEMBERS].append({
constants.ID: mem_info.split('_')[1],
constants.PROVISIONING_STATUS:

View File

@ -413,7 +413,7 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
def test_member_batch_update(self):
self.driver.member_batch_update(self.pool_id,
[self.ref_member, self.update_member])
self.assertEqual(self.mock_add_request.call_count, 3)
self.assertEqual(self.mock_add_request.call_count, 4)
def test_member_batch_update_member_delete(self):
info_md = {
@ -428,7 +428,7 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
expected = [
mock.call(expected_dict_md)]
self.driver.member_batch_update(self.pool_id, [])
self.assertEqual(self.mock_add_request.call_count, 1)
self.assertEqual(self.mock_add_request.call_count, 2)
self.mock_add_request.assert_has_calls(expected)
def test_member_batch_update_no_members(self):
@ -457,7 +457,7 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
def test_member_batch_update_unset_admin_state_up(self):
self.ref_member.admin_state_up = data_models.UnsetType()
self.driver.member_batch_update(self.pool_id, [self.ref_member])
self.assertEqual(self.mock_add_request.call_count, 2)
self.assertEqual(self.mock_add_request.call_count, 3)
def test_member_batch_update_toggle_admin_state_up(self):
info_mu = {

View File

@ -960,8 +960,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.uuid)
del_port.assert_called_once_with('foo_port')
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_port_exception(self, del_port):
def test_lb_delete_port_exception(self, del_port, net_cli):
del_port.side_effect = [Exception]
status = self.helper.lb_delete(self.ovn_lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],