Fix update member action

Upon receipt of a member update request, certain attribute checks
are done, which may result in an error and prevent the update from
being successfully completed.

As per [1], only the "admin_state_up" attribute holds significance
in enabling or disabling members on the ovn-provider side during
an update operation.

This patch remove other checks are deemed unnecessary.

[1] https://docs.openstack.org/api-ref/load-balancer/v2/?expanded=update-a-member-detail,list-pools-detail,create-pool-detail,batch-update-members-detail#update-a-member

Closes-Bug: 2017127
Change-Id: I388284968e27e0ad8ec7bb0a522aa2925b560146
This commit is contained in:
Fernando Royo 2023-04-20 13:04:33 +02:00
parent 220d8c8581
commit 8beeeb9112
6 changed files with 96 additions and 152 deletions

View File

@ -329,25 +329,6 @@ class OvnProviderDriver(driver_base.ProviderDriver):
self._ovn_helper.add_request(request)
def member_update(self, old_member, new_member):
# NOTE(froyo): OVN provider allow to create member without param
# subnet_id, in that case the driver search it according to the
# pool_id, but it is not propagated to Octavia. In this case, if
# the member is updated, Octavia send the object without subnet_id.
subnet_id = old_member.subnet_id
if (isinstance(subnet_id, o_datamodels.UnsetType) or not subnet_id):
subnet_id, subnet_cidr = self._ovn_helper._get_subnet_from_pool(
old_member.pool_id)
if not (subnet_id and
self._ovn_helper._check_ip_in_subnet(new_member.address,
subnet_cidr)):
msg = _('Subnet is required, or Loadbalancer associated with '
'Pool must have a subnet, for Member update '
'with OVN Provider Driver if it is not the same as '
'LB VIP subnet')
raise driver_exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
# Validate monitoring options if present
self._check_member_monitor_options(new_member)
if new_member.address and self._ip_version_differs(new_member):
@ -356,7 +337,6 @@ class OvnProviderDriver(driver_base.ProviderDriver):
'address': old_member.address,
'protocol_port': old_member.protocol_port,
'pool_id': old_member.pool_id,
'subnet_id': subnet_id,
'old_admin_state_up': old_member.admin_state_up}
if not isinstance(new_member.admin_state_up, o_datamodels.UnsetType):
request_info['admin_state_up'] = new_member.admin_state_up

View File

@ -909,7 +909,10 @@ class OvnProviderHelper():
return vip_port
return None
def _frame_vip_ips(self, lb_external_ids):
def _is_member_offline(self, ovn_lb, member_id):
return constants.OFFLINE == self._find_member_status(ovn_lb, member_id)
def _frame_vip_ips(self, ovn_lb, lb_external_ids):
vip_ips = {}
# If load balancer is disabled, return
if lb_external_ids.get('enabled') == 'False':
@ -933,27 +936,29 @@ class OvnProviderHelper():
ips = []
for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info(
lb_external_ids[pool_id]):
if netaddr.IPNetwork(mb_ip).version == 6:
ips.append(f'[{mb_ip}]:{mb_port}')
else:
ips.append(f'{mb_ip}:{mb_port}')
if not self._is_member_offline(ovn_lb, mb_id):
if netaddr.IPNetwork(mb_ip).version == 6:
ips.append(f'[{mb_ip}]:{mb_port}')
else:
ips.append(f'{mb_ip}:{mb_port}')
if netaddr.IPNetwork(lb_vip).version == 6:
lb_vip = f'[{lb_vip}]'
vip_ips[lb_vip + ':' + vip_port] = ','.join(ips)
if ips:
if netaddr.IPNetwork(lb_vip).version == 6:
lb_vip = f'[{lb_vip}]'
vip_ips[lb_vip + ':' + vip_port] = ','.join(ips)
if vip_fip:
if netaddr.IPNetwork(vip_fip).version == 6:
vip_fip = f'[{vip_fip}]'
vip_ips[vip_fip + ':' + vip_port] = ','.join(ips)
if vip_fip:
if netaddr.IPNetwork(vip_fip).version == 6:
vip_fip = f'[{vip_fip}]'
vip_ips[vip_fip + ':' + vip_port] = ','.join(ips)
return vip_ips
def _refresh_lb_vips(self, ovn_lb_uuid, lb_external_ids):
vip_ips = self._frame_vip_ips(lb_external_ids)
return [self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb_uuid,
def _refresh_lb_vips(self, ovn_lb, lb_external_ids):
vip_ips = self._frame_vip_ips(ovn_lb, lb_external_ids)
return [self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid,
'vips'),
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb_uuid,
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('vips', vip_ips))]
def _is_listener_in_lb(self, lb):
@ -1324,7 +1329,7 @@ class OvnProviderHelper():
('external_ids', enable_info))
)
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid,
self._refresh_lb_vips(ovn_lb,
ovn_lb.external_ids))
self._execute_commands(commands)
if lb_enabled:
@ -1373,7 +1378,7 @@ class OvnProviderHelper():
'Load_Balancer', ovn_lb.uuid,
('protocol',
str(listener[constants.PROTOCOL]).lower())))
commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids))
commands.extend(self._refresh_lb_vips(ovn_lb, external_ids))
self._execute_commands(commands)
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "creation of listener")
@ -1444,7 +1449,7 @@ class OvnProviderHelper():
# has pending delete operation.
if not lb_to_delete:
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
self._refresh_lb_vips(ovn_lb, external_ids))
self._execute_commands(commands)
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of listener")
@ -1550,7 +1555,7 @@ class OvnProviderHelper():
('external_ids', l_key_to_add)))
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
self._refresh_lb_vips(ovn_lb, external_ids))
self._execute_commands(commands)
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener")
@ -1650,7 +1655,7 @@ class OvnProviderHelper():
'external_ids', (pool_key)))
del external_ids[pool_key]
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
self._refresh_lb_vips(ovn_lb, external_ids))
# Remove Pool from Listener if it is associated
for key, value in ovn_lb.external_ids.items():
if (key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX) and
@ -1751,7 +1756,7 @@ class OvnProviderHelper():
('external_ids', p_key_to_add)))
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
self._refresh_lb_vips(ovn_lb, external_ids))
self._execute_commands(commands)
if pool[constants.ADMIN_STATE_UP]:
operating_status = constants.ONLINE
@ -1867,7 +1872,7 @@ class OvnProviderHelper():
('external_ids', pool_data)))
external_ids[pool_key] = pool_data[pool_key]
commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids))
commands.extend(self._refresh_lb_vips(ovn_lb, external_ids))
# Note (froyo): commands are now splitted to separate atomic process,
# leaving outside the not mandatory ones to allow add_member
# finish correctly
@ -1988,7 +1993,7 @@ class OvnProviderHelper():
('external_ids', pool_data)))
external_ids[pool_key] = ",".join(existing_members)
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
self._refresh_lb_vips(ovn_lb, external_ids))
self._execute_commands(commands)
self._update_lb_to_ls_association(
ovn_lb, subnet_id=member.get(constants.SUBNET_ID),
@ -2050,25 +2055,6 @@ class OvnProviderHelper():
return status
def _update_member(self, member, ovn_lb, pool_key):
commands = []
external_ids = copy.deepcopy(ovn_lb.external_ids)
existing_members = external_ids[pool_key].split(",")
member_info = self._get_member_info(member)
for mem in existing_members:
if (member_info.split('_')[1] == mem.split('_')[1] and
mem != member_info):
existing_members.remove(mem)
existing_members.append(member_info)
pool_data = {pool_key: ",".join(existing_members)}
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', pool_data)))
external_ids[pool_key] = ",".join(existing_members)
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
self._execute_commands(commands)
def member_update(self, member):
pool_listeners = []
try:
@ -2085,15 +2071,14 @@ class OvnProviderHelper():
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
pool_listeners = self._get_pool_listeners(ovn_lb, pool_key)
self._update_member(member, ovn_lb, pool_key)
last_status = self._find_member_status(
ovn_lb, member[constants.ID])
if constants.ADMIN_STATE_UP in member:
if member[constants.ADMIN_STATE_UP]:
# if HM exists trust on neutron:member_status
# as the last status valid for the member
if ovn_lb.health_check:
# search status of member_uuid
last_status = self._find_member_status(
ovn_lb, member[constants.ID])
member_status[constants.OPERATING_STATUS] = last_status
else:
member_status[constants.OPERATING_STATUS] = (
@ -2101,6 +2086,29 @@ class OvnProviderHelper():
else:
member_status[constants.OPERATING_STATUS] = (
constants.OFFLINE)
if constants.OPERATING_STATUS in member_status:
self._update_external_ids_member_status(
ovn_lb,
member[constants.ID],
member_status[constants.OPERATING_STATUS])
# NOTE(froyo): If we are toggling from/to OFFLINE due to an
# admin_state_up change, in that case we should update vips
if (
last_status != constants.OFFLINE and
member_status[constants.OPERATING_STATUS] ==
constants.OFFLINE
) or (
last_status == constants.OFFLINE and
member_status[constants.OPERATING_STATUS] !=
constants.OFFLINE
):
commands = []
commands.extend(self._refresh_lb_vips(ovn_lb,
ovn_lb.external_ids))
self._execute_commands(commands)
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "update of member")
status = {
@ -2114,12 +2122,6 @@ class OvnProviderHelper():
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
if constants.OPERATING_STATUS in member_status:
self._update_external_ids_member_status(
ovn_lb,
member[constants.ID],
member_status[constants.OPERATING_STATUS])
listener_status = []
for listener in pool_listeners:
listener_status.append(
@ -2253,7 +2255,7 @@ class OvnProviderHelper():
'Load_Balancer_Health_Check', lbhc.uuid))
break
commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids))
commands.extend(self._refresh_lb_vips(ovn_lb, external_ids))
self._execute_commands(commands)
def handle_member_dvr(self, info):

View File

@ -609,8 +609,6 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
field='external_ids')
p_members = ""
for m in p.members:
if not m.admin_state_up:
continue
m_info = 'member_' + m.member_id + '_' + m.address
m_info += ":" + str(m.protocol_port)
m_info += "_" + str(m.subnet_id)
@ -635,7 +633,10 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
external_ids[ovn_const.LB_EXT_IDS_HMS_KEY] = \
jsonutils.dumps([p.healthmonitor.healthmonitor_id])
else:
member_status[m.member_id] = o_constants.NO_MONITOR
if m.admin_state_up:
member_status[m.member_id] = o_constants.NO_MONITOR
else:
member_status[m.member_id] = o_constants.OFFLINE
pool_key = 'pool_' + p.pool_id
if not p.admin_state_up:
pool_key += ':D'
@ -866,13 +867,14 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
return m
def _update_member_and_validate(self, lb_data, pool_id, member_address,
remove_subnet_id=False):
remove_subnet_id=False,
admin_state_up=True):
pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id)
member = self._get_pool_member(pool, member_address)
self._o_driver_lib.update_loadbalancer_status.reset_mock()
old_member = copy.deepcopy(member)
member.admin_state_up = admin_state_up
# NOTE(froyo): In order to test update of member without passing the
# subnet_id parameter of the member, just to cover the case when a new
# member has been created without passing that argument

View File

@ -107,6 +107,8 @@ class TestOvnOctaviaProviderDriver(ovn_base.TestOvnOctaviaBase):
lb_data, pool_TCP_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
self._update_member_and_validate(lb_data, pool_TCP_id, "10.0.0.10")
self._update_member_and_validate(lb_data, pool_TCP_id, "10.0.0.10",
admin_state_up=False)
self._create_member_and_validate(
lb_data, pool_TCP_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.11')
@ -116,6 +118,8 @@ class TestOvnOctaviaProviderDriver(ovn_base.TestOvnOctaviaBase):
lb_data, pool_UDP_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
self._update_member_and_validate(lb_data, pool_UDP_id, "10.0.0.10")
self._update_member_and_validate(lb_data, pool_UDP_id, "10.0.0.10",
admin_state_up=False)
self._create_member_and_validate(
lb_data, pool_UDP_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.11')
@ -125,6 +129,8 @@ class TestOvnOctaviaProviderDriver(ovn_base.TestOvnOctaviaBase):
lb_data, pool_SCTP_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
self._update_member_and_validate(lb_data, pool_SCTP_id, "10.0.0.10")
self._update_member_and_validate(lb_data, pool_SCTP_id, "10.0.0.10",
admin_state_up=False)
self._create_member_and_validate(
lb_data, pool_SCTP_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.11')

View File

@ -366,8 +366,7 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
'protocol_port': self.ref_member.protocol_port,
'pool_id': self.ref_member.pool_id,
'admin_state_up': self.update_member.admin_state_up,
'old_admin_state_up': self.ref_member.admin_state_up,
'subnet_id': self.ref_member.subnet_id}
'old_admin_state_up': self.ref_member.admin_state_up}
expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE,
'info': info}
self.driver.member_update(self.ref_member, self.update_member)
@ -381,8 +380,7 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
'protocol_port': self.ref_member.protocol_port,
'pool_id': self.ref_member.pool_id,
'admin_state_up': self.update_member.admin_state_up,
'old_admin_state_up': self.ref_member.admin_state_up,
'subnet_id': self.ref_member.subnet_id}
'old_admin_state_up': self.ref_member.admin_state_up}
expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE,
'info': info}
member = copy.copy(self.ref_member)
@ -398,8 +396,7 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
'address': self.ref_member.address,
'protocol_port': self.ref_member.protocol_port,
'pool_id': self.ref_member.pool_id,
'old_admin_state_up': self.ref_member.admin_state_up,
'subnet_id': self.ref_member.subnet_id}
'old_admin_state_up': self.ref_member.admin_state_up}
expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE,
'info': info}
member = copy.copy(self.ref_member)
@ -407,15 +404,6 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
self.driver.member_update(member, self.update_member)
self.mock_add_request.assert_called_once_with(expected_dict)
def test_member_update_missing_subnet_id_differs_from_lb_vip(self):
self.driver._ovn_helper._get_subnet_from_pool.return_value = (
self.ref_member.subnet_id, '198.52.100.0/24')
self.driver._ovn_helper._check_ip_in_subnet.return_value = False
self.ref_member.subnet_id = data_models.UnsetType()
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.member_update, self.ref_member,
self.update_member)
@mock.patch.object(ovn_driver.OvnProviderDriver, '_ip_version_differs')
def test_member_update_no_ip_addr(self, mock_ip_differs):
self.update_member.address = None

View File

@ -1029,7 +1029,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.OFFLINE)
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
('external_ids', {'enabled': 'False'}))
@ -1045,7 +1045,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.ONLINE)
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
('external_ids', {'enabled': 'True'}))
@ -1058,7 +1058,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.ONLINE)
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
('external_ids', {'enabled': 'True'}))
@ -1079,10 +1079,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.ONLINE)
refresh_vips.assert_has_calls([
mock.call(self.ovn_lb.uuid, self.ovn_lb.external_ids),
mock.call(self.ovn_lb, self.ovn_lb.external_ids),
mock.ANY,
mock.ANY,
mock.call(udp_lb.uuid, udp_lb.external_ids)],
mock.call(udp_lb, udp_lb.external_ids)],
any_order=False)
self.helper.ovn_nbdb_api.db_set.assert_has_calls([
mock.call('Load_Balancer', self.ovn_lb.uuid,
@ -1113,7 +1113,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.external_ids.update({
'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id})
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
expected_calls = [
mock.call(
'Load_Balancer', self.ovn_lb.uuid,
@ -1137,7 +1137,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.listener['admin_state_up'] = True
status = self.helper.listener_create(self.listener)
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
expected_calls = [
mock.call(
'Load_Balancer', self.ovn_lb.uuid,
@ -1243,7 +1243,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.external_ids.update(
{'listener_%s' % self.listener_id: '123:pool_%s' % self.pool_id})
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips')
def test_listener_update_listener_disabled(self, refresh_vips):
@ -1265,7 +1265,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.external_ids.update(
{'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id})
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
# As it is marked disabled, a second call should not try and remove it
self.helper.ovn_nbdb_api.db_remove.reset_mock()
status = self.helper.listener_update(self.listener)
@ -1285,7 +1285,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
constants.ACTIVE)
self.helper.ovn_nbdb_api.db_remove.assert_not_called()
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips')
def test_listener_update_no_admin_state_up_or_default_pool_id(
@ -1345,7 +1345,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'external_ids', 'listener_%s' % self.listener_id)
self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id)
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
self.ovn_lb, self.ovn_lb.external_ids)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty')
def test_listener_delete_ovn_lb_not_empty(self, lb_empty):
@ -1915,54 +1915,13 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['listeners'][0]['id'],
'listener1')
@mock.patch.object(ovn_helper.OvnProviderHelper, '_update_member')
def test_member_update_exception(self, mock_update_member):
mock_update_member.side_effect = [RuntimeError]
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_member_status')
def test_member_update_exception(self, mock_find_member_status):
mock_find_member_status.side_effect = [TypeError]
status = self.helper.member_update(self.member)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
def test_member_update_new_member_line(self):
old_member_line = (
'member_%s_%s:%s' %
(self.member_id, self.member_address,
self.member_port))
new_member_line = (
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
self.member_port, self.member_subnet_id))
self.ovn_lb.external_ids.update(
{'pool_%s' % self.pool_id: old_member_line})
self.helper.member_update(self.member)
expected_calls = [
mock.call('Load_Balancer', self.ovn_lb.uuid,
('external_ids', {
'pool_%s' % self.pool_id: new_member_line}))]
self.helper.ovn_nbdb_api.db_set.assert_has_calls(
expected_calls)
def test_member_update_new_port(self):
new_port = 11
member_line = ('member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
new_port, self.member_subnet_id))
self.ovn_lb.external_ids.update(
{'pool_%s' % self.pool_id: member_line})
self.helper.member_update(self.member)
new_member_line = (
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
self.member_port, self.member_subnet_id))
expected_calls = [
mock.call('Load_Balancer', self.ovn_lb.uuid,
('external_ids', {
'pool_%s' % self.pool_id: new_member_line})),
mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {
'10.22.33.4:80': '192.168.2.149:1010',
'123.123.123.123:80': '192.168.2.149:1010'}))]
self.helper.ovn_nbdb_api.db_set.assert_has_calls(
expected_calls)
@mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.'
'_refresh_lb_vips')
def test_member_delete(self, mock_vip_command):
@ -3588,20 +3547,27 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.pool_id)
def test__frame_lb_vips(self):
ret = self.helper._frame_vip_ips(self.ovn_lb.external_ids)
ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids)
expected = {'10.22.33.4:80': '192.168.2.149:1010',
'123.123.123.123:80': '192.168.2.149:1010'}
self.assertEqual(expected, ret)
def test__frame_lb_vips_member_offline(self):
self.ovn_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \
'{"%s": "%s"}' % (self.member_id, constants.OFFLINE)
ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids)
expected = {}
self.assertEqual(expected, ret)
def test__frame_lb_vips_no_vip_fip(self):
self.ovn_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
ret = self.helper._frame_vip_ips(self.ovn_lb.external_ids)
ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids)
expected = {'10.22.33.4:80': '192.168.2.149:1010'}
self.assertEqual(expected, ret)
def test__frame_lb_vips_disabled(self):
self.ovn_lb.external_ids['enabled'] = 'False'
ret = self.helper._frame_vip_ips(self.ovn_lb.external_ids)
ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids)
self.assertEqual({}, ret)
def test__frame_lb_vips_ipv6(self):
@ -3615,7 +3581,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '2002::',
'pool_%s' % self.pool_id: self.member_line,
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
ret = self.helper._frame_vip_ips(self.ovn_lb.external_ids)
ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids)
expected = {'[2002::]:80': '[2001:db8::1]:1010',
'[fc00::]:80': '[2001:db8::1]:1010'}
self.assertEqual(expected, ret)