Make completed Pods Ports reusable

When the Pod has status Completed it has finalized
its job, consequently it's expected that Kuryr will
reclycle the Neutron Ports associated with it to be
used by other Pods. However, Kuryr is considering
Completed Pods as not scheduled and the event is skipped.
This commit fixes the issue by ensuring that Completed
Pods Ports recycle is done before checking for Scheduled
Pods.

Closes-Bug: #1945680
Change-Id: I23e7901fb016d59fa76762d1f24fee47974e72df
This commit is contained in:
Maysa Macedo 2021-09-30 16:33:09 +00:00
parent 2d303be3d4
commit 85a4cd14f3
5 changed files with 64 additions and 46 deletions

View File

@ -146,7 +146,8 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
raise
return
if 'deletionTimestamp' not in pod['metadata']:
if ('deletionTimestamp' not in pod['metadata'] and
not utils.is_pod_completed(pod)):
# NOTE(gryf): Ignore deleting KuryrPort, since most likely it was
# removed manually, while we need vifs for corresponding pod
# object which apparently is still running.

View File

@ -41,8 +41,17 @@ class VIFHandler(k8s_base.ResourceEventHandler):
OBJECT_WATCH_PATH = "%s/%s" % (constants.K8S_API_BASE, "pods")
def on_present(self, pod, *args, **kwargs):
if (driver_utils.is_host_network(pod) or
not self._is_pod_scheduled(pod)):
if driver_utils.is_host_network(pod):
return
pod_name = pod['metadata']['name']
if utils.is_pod_completed(pod):
LOG.debug("Pod %s has completed execution, "
"removing the vifs", pod_name)
self.on_finalize(pod)
return
if not self._is_pod_scheduled(pod):
# REVISIT(ivc): consider an additional configurable check that
# would allow skipping pods to enable heterogeneous environments
# where certain pods/namespaces/nodes can be managed by other
@ -53,7 +62,6 @@ class VIFHandler(k8s_base.ResourceEventHandler):
# subsequent updates of the pod, add_finalizer will ignore this if
# finalizer exists.
k8s = clients.get_kubernetes_client()
try:
if not k8s.add_finalizer(pod, constants.POD_FINALIZER):
# NOTE(gryf) It might happen that pod will be deleted even
@ -64,15 +72,6 @@ class VIFHandler(k8s_base.ResourceEventHandler):
raise
kp = driver_utils.get_kuryrport(pod)
if self._is_pod_completed(pod):
if kp:
LOG.debug("Pod has completed execution, removing the vifs")
self.on_finalize(pod)
else:
LOG.debug("Pod has completed execution, no KuryrPort found."
" Skipping")
return
LOG.debug("Got KuryrPort: %r", kp)
if not kp:
try:
@ -83,7 +82,7 @@ class VIFHandler(k8s_base.ResourceEventHandler):
LOG.warning('Namespace %s is being terminated, ignoring Pod '
'%s in that namespace.',
pod['metadata']['namespace'],
pod['metadata']['name'])
pod_name)
return
except k_exc.K8sClientException as ex:
LOG.exception("Kubernetes Client Exception creating "
@ -145,15 +144,6 @@ class VIFHandler(k8s_base.ResourceEventHandler):
except KeyError:
return False
@staticmethod
def _is_pod_completed(pod):
try:
return (pod['status']['phase'] in
(constants.K8S_POD_STATUS_SUCCEEDED,
constants.K8S_POD_STATUS_FAILED))
except KeyError:
return False
def _add_kuryrport_crd(self, pod, vifs=None):
LOG.debug('Adding CRD %s', pod["metadata"]["name"])

View File

@ -79,14 +79,12 @@ class TestVIFHandler(test_base.TestCase):
self._release_vif = self._handler._drv_vif_pool.release_vif
self._activate_vif = self._handler._drv_vif_pool.activate_vif
self._is_pod_scheduled = self._handler._is_pod_scheduled
self._is_pod_completed = self._handler._is_pod_completed
self._request_additional_vifs = \
self._multi_vif_drv.request_additional_vifs
self._request_vif.return_value = self._vif
self._request_additional_vifs.return_value = self._additioan_vifs
self._is_pod_scheduled.return_value = True
self._is_pod_completed.return_value = False
self._get_project.return_value = self._project_id
self._get_subnets.return_value = self._subnets
self._get_security_groups.return_value = self._security_groups
@ -108,17 +106,6 @@ class TestVIFHandler(test_base.TestCase):
self.assertFalse(h_vif.VIFHandler._is_pod_scheduled({'spec': {},
'status': {}}))
def test_is_pod_completed_pending(self):
self.assertFalse(h_vif.VIFHandler._is_pod_completed(self._pod))
def test_is_pod_completed_succeeded(self):
self.assertTrue(h_vif.VIFHandler._is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_SUCCEEDED}}))
def test_is_pod_completed_failed(self):
self.assertTrue(h_vif.VIFHandler._is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_FAILED}}))
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
@ -137,39 +124,58 @@ class TestVIFHandler(test_base.TestCase):
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.utils.is_pod_completed')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_not_scheduled(self, m_get_kuryrport, m_host_network,
m_get_k8s_client):
m_get_k8s_client, m_is_pod_completed):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
self._is_pod_scheduled.return_value = False
m_is_pod_completed.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_not_called()
m_get_kuryrport.assert_not_called()
k8s.add_finalizer.assert_called()
m_get_kuryrport.assert_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.utils.is_pod_completed')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_on_completed_without_annotation(self, m_get_kuryrport,
m_get_k8s_client):
self._is_pod_completed.return_value = True
def test_on_present_on_completed_without_kuryrport(self, m_get_kuryrport,
m_get_k8s_client,
m_is_pod_completed):
m_is_pod_completed.return_value = True
m_get_kuryrport.return_value = None
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
self._handler.on_finalize.assert_not_called()
self._handler.on_finalize.assert_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.utils.is_pod_completed')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_on_completed_with_kuryrport(self, m_get_kuryrport,
m_get_k8s_client,
m_is_pod_completed):
m_is_pod_completed.return_value = True
m_get_kuryrport.return_value = mock.MagicMock()
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._handler.on_finalize.assert_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()

View File

@ -488,3 +488,15 @@ class TestUtils(test_base.TestCase):
sub = utils.get_subnet_id(**filters)
m_net.subnets.assert_called_with(**filters)
self.assertIsNone(sub)
def test_is_pod_completed_pending(self):
self.assertFalse(utils.is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_PENDING}}))
def test_is_pod_completed_succeeded(self):
self.assertTrue(utils.is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_SUCCEEDED}}))
def test_is_pod_completed_failed(self):
self.assertTrue(utils.is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_FAILED}}))

View File

@ -646,3 +646,12 @@ def get_kuryrloadbalancer(name, namespace):
f'{name}')
except exceptions.K8sResourceNotFound:
return {}
def is_pod_completed(pod):
try:
return (pod['status']['phase'] in
(constants.K8S_POD_STATUS_SUCCEEDED,
constants.K8S_POD_STATUS_FAILED))
except KeyError:
return False