Update KLB .spec.provider when required

During the refactoring to allow proper Events creation for Services,
we've lost ability to update KLBs .spec.provider when configuration is
changed to use another provider. This commit fixes it by making sure
.spec.provider mismatch with the configured provider triggers the .spec
update.

Change-Id: Ic93a105b4a67b8dff15bf0e80b13f4743ffce7ff
Closes-Bug: 1960311
This commit is contained in:
Michał Dulko 2022-02-08 12:39:34 +01:00
parent c624887103
commit b9f68b40fa
4 changed files with 29 additions and 38 deletions

View File

@ -838,12 +838,16 @@ class LBaaSv2Driver(base.LBaaSDriver):
interval=_LB_STS_POLL_FAST_INTERVAL):
lbaas = clients.get_loadbalancer_client()
status = 'PENDING_DELETE'
for remaining in self._provisioning_timer(timeout, interval):
try:
lbaas.get_load_balancer(loadbalancer['id'])
lb = lbaas.get_load_balancer(loadbalancer['id'])
status = lb.provisioning_status
except os_exc.NotFoundException:
return
raise k_exc.LoadBalancerNotReady(loadbalancer['id'], status)
def _provisioning_timer(self, timeout,
interval=_LB_STS_POLL_FAST_INTERVAL):
# REVISIT(ivc): consider integrating with Retry

View File

@ -269,7 +269,12 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
def _has_lbaas_spec_changes(self, service, loadbalancer_crd):
return (self._has_ip_changes(service, loadbalancer_crd) or
utils.has_port_changes(service, loadbalancer_crd) or
self._has_timeout_changes(service, loadbalancer_crd))
self._has_timeout_changes(service, loadbalancer_crd) or
self._has_provider_changes(loadbalancer_crd))
def _has_provider_changes(self, loadbalancer_crd):
return (self._lb_provider and
loadbalancer_crd['spec'].get('provider') != self._lb_provider)
def _has_ip_changes(self, service, loadbalancer_crd):
link = utils.get_res_link(service)

View File

@ -876,30 +876,10 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
return False
def _ensure_release_lbaas(self, loadbalancer_crd):
attempts = 0
timeout = config.CONF.kubernetes.watch_retry_timeout
deadline = time.time() + timeout
while True:
try:
if not utils.exponential_sleep(deadline, attempts):
msg = (f'Timed out waiting for deletion of load balancer '
f'{utils.get_res_unique_name(loadbalancer_crd)}')
self._add_event(
loadbalancer_crd, 'KuryrLBReleaseTimeout', msg,
'Warning')
LOG.error(msg)
return
self._drv_lbaas.release_loadbalancer(
loadbalancer_crd['status'].get('loadbalancer'))
break
except k_exc.ResourceNotReady:
LOG.debug("Attempt %s to release LB %s failed."
" A retry will be triggered.", attempts,
utils.get_res_unique_name(loadbalancer_crd))
attempts += 1
loadbalancer_crd['status'] = {}
self._patch_status(loadbalancer_crd)
# NOTE(ltomasbo): give some extra time to ensure the Load
# Balancer VIP is also released
time.sleep(1)
self._drv_lbaas.release_loadbalancer(
loadbalancer_crd['status'].get('loadbalancer'))
utils.clean_lb_crd_status(
utils.get_res_unique_name(loadbalancer_crd))
# NOTE(ltomasbo): give some extra time to ensure the Load
# Balancer VIP is also released
time.sleep(1)

View File

@ -213,15 +213,17 @@ class TestServiceHandler(test_base.TestCase):
for has_ip_changes in (True, False):
for has_port_changes in (True, False):
for has_timeout_ in (True, False):
m_handler._has_ip_changes.return_value = has_ip_changes
m_port_changes.return_value = has_port_changes
m_handler._has_timeout_changes.return_value = has_timeout_
ret = h_lbaas.ServiceHandler._has_lbaas_spec_changes(
m_handler, service, lbaas_spec)
self.assertEqual(
has_ip_changes or has_port_changes or has_timeout_,
ret)
for timeout in (True, False):
for provider in (True, False):
m_handler._has_ip_changes.return_value = has_ip_changes
m_port_changes.return_value = has_port_changes
m_handler._has_timeout_changes.return_value = timeout
m_handler._has_provider_changes.return_value = provider
ret = h_lbaas.ServiceHandler._has_lbaas_spec_changes(
m_handler, service, lbaas_spec)
self.assertEqual(
has_ip_changes or has_port_changes or timeout
or provider, ret)
def test_has_ip_changes(self):
m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)