Update pool upon HM deletion request

When a HM is deleted, Octavia API will block the related
pool with a provision_status to PENDING_UPDATE, waiting
for the new status after finishing the HM deletion on the
provider. When multiple pools are attached to a LB, this
status is sent for the first pool obtained, keeping the
related pool in PENDING_UPDATE.

This patch ensures that the update status sent by the ovn
provider is referencing the correct pool id.

Closes-Bug: 2024912
Change-Id: Ie5d01ce291409383558b3dd7c4d2fe91fd657255
This commit is contained in:
Fernando Royo 2023-06-23 17:08:38 +02:00
parent 382ddb0329
commit ed02dba2bc
2 changed files with 47 additions and 20 deletions

View File

@ -2833,7 +2833,7 @@ class OvnProviderHelper():
def hm_delete(self, info):
hm_id = info[constants.ID]
pool_id_related = info[constants.POOL_ID]
pool_id = info[constants.POOL_ID]
status = {
constants.HEALTHMONITORS: [
@ -2852,17 +2852,15 @@ class OvnProviderHelper():
# the LB should have this info. Also in order to delete the hm port
# used for health checks we need to get all subnets from the members
# on the pool
pool_id = None
pool_listeners = []
member_subnets = []
for k, v in ovn_lb.external_ids.items():
if ovn_const.LB_EXT_IDS_POOL_PREFIX in k:
if self._get_pool_key(pool_id) == k:
members = self._extract_member_info(ovn_lb.external_ids[k])
member_subnets = list(
set([mb_subnet
for (mb_ip, mb_port, mb_subnet, mb_id) in members])
)
pool_id = k.split('_')[1]
pool_listeners = self._get_pool_listeners(
ovn_lb, self._get_pool_key(pool_id))
break
@ -2873,7 +2871,7 @@ class OvnProviderHelper():
hms_key = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_HMS_KEY, [])
# Update status for members in the pool related to HM
member_status = self._update_member_statuses(ovn_lb, pool_id_related,
member_status = self._update_member_statuses(ovn_lb, pool_id,
constants.ACTIVE,
constants.NO_MONITOR)
@ -2883,7 +2881,7 @@ class OvnProviderHelper():
hms_key.remove(hm_id)
self._clean_ip_port_mappings(ovn_lb, ovn_const.LB_EXT_IDS_POOL_PREFIX +
str(pool_id_related))
str(pool_id))
commands = []
for lbhc in lbhcs:
@ -2916,26 +2914,22 @@ class OvnProviderHelper():
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}],
constants.POOLS: [
{constants.ID: pool_id,
constants.PROVISIONING_STATUS: constants.ACTIVE}],
constants.HEALTHMONITORS: [
{constants.ID: info[constants.ID],
constants.OPERATING_STATUS: constants.NO_MONITOR,
constants.PROVISIONING_STATUS: constants.DELETED}]}
if pool_id:
status[constants.POOLS] = [
{constants.ID: pool_id,
constants.PROVISIONING_STATUS: constants.ACTIVE}]
if member_status:
status[constants.MEMBERS] = member_status
if member_status:
status[constants.MEMBERS] = member_status
status[constants.LISTENERS] = []
for listener in pool_listeners:
status[constants.LISTENERS].append(
{constants.ID: listener,
constants.PROVISIONING_STATUS: constants.ACTIVE})
else:
LOG.warning('Pool not found for load balancer %s, status '
'update will have incomplete data', ovn_lb.name)
status[constants.LISTENERS] = []
for listener in pool_listeners:
status[constants.LISTENERS].append(
{constants.ID: listener,
constants.PROVISIONING_STATUS: constants.ACTIVE})
return status
def _get_lbs_on_hm_event(self, row):

View File

@ -4058,6 +4058,39 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.db_destroy.assert_has_calls(
expected_destroy_calls)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port')
def test_hm_delete_multiples_pools_sharing_members(self, del_hm_port):
self._get_pool_listeners.stop()
pool_key = 'pool_%s' % self.pool_id
self.ovn_hm_lb.external_ids[pool_key] = self.member_line
self.ovn_hm_lb.external_ids['pool_fake'] = self.member_line
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
execute.side_effect = [[self.ovn_hm_lb], [self.ovn_hm]]
status = self.helper.hm_delete(self.health_monitor)
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
constants.DELETED)
self.assertEqual(status['healthmonitors'][0]['operating_status'],
constants.NO_MONITOR)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['id'], self.pool_id)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ACTIVE)
expected_remove_calls = [
mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check',
self.ovn_hm.uuid),
mock.call('Load_Balancer', self.ovn_hm_lb.uuid,
'external_ids', ovn_const.LB_EXT_IDS_HMS_KEY)]
expected_destroy_calls = [
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)]
del_hm_port.assert_called_once_with(self.member_subnet_id)
self.helper.ovn_nbdb_api.db_remove.assert_has_calls(
expected_remove_calls)
self.helper.ovn_nbdb_api.db_destroy.assert_has_calls(
expected_destroy_calls)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port')
def test_hm_delete_without_members_in_pool(self, del_hm_port):
self._get_pool_listeners.stop()