diff --git a/kuryr_tempest_plugin/config.py b/kuryr_tempest_plugin/config.py index 70769cea..05f5ec70 100644 --- a/kuryr_tempest_plugin/config.py +++ b/kuryr_tempest_plugin/config.py @@ -107,6 +107,8 @@ kuryr_k8s_opts = [ " number LB members"), cfg.BoolOpt("enable_reconciliation", default=False, help="Whether or not reconciliation is enabled"), + cfg.BoolOpt("enable_listener_reconciliation", default=False, + help="Whether or not listener reconciliation is enabled"), cfg.IntOpt("lb_reconcile_timeout", default=600, help="The max time (in seconds) it should take for LB " "reconciliation. It doesn't include the LB build time."), diff --git a/kuryr_tempest_plugin/tests/scenario/base.py b/kuryr_tempest_plugin/tests/scenario/base.py index 9d53c5a8..e7746780 100644 --- a/kuryr_tempest_plugin/tests/scenario/base.py +++ b/kuryr_tempest_plugin/tests/scenario/base.py @@ -1598,3 +1598,75 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest): for container in pod.status.container_statuses: containers[pod_name][container.name] = container.restart_count return containers + + +class BaseReconciliationScenarioTest(BaseKuryrScenarioTest): + + credentials = ['admin', 'primary', ['lb_admin', 'load-balancer_admin']] + + @classmethod + def skip_checks(cls): + super(BaseReconciliationScenarioTest, cls).skip_checks() + if not CONF.kuryr_kubernetes.service_tests_enabled: + raise cls.skipException("Service tests are not enabled") + if not CONF.kuryr_kubernetes.enable_reconciliation: + raise cls.skipException("Reconciliation is not enabled") + + @classmethod + def setup_clients(cls): + super(BaseReconciliationScenarioTest, cls).setup_clients() + cls.lbaas = cls.os_roles_lb_admin.load_balancer_v2.LoadbalancerClient() + cls.lsnr = cls.os_roles_lb_admin.load_balancer_v2.ListenerClient() + + def check_for_resource_reconciliation(self, service_name, svc_pods, + resource, resource_id, + show_resource, namespace='default'): + LOG.debug("Waiting for %s to be completely gone", resource) + start = time.time() + while time.time() - start < consts.LB_TIMEOUT: + try: + time.sleep(30) + show_resource(resource_id) + except lib_exc.NotFound: + LOG.debug("%s sucessfully deleted", resource) + break + else: + msg = ("Timed Out waiting for %s to be completely" + " deleted", resource_id) + raise lib_exc.TimeoutException(msg) + start = time.time() + timeout = consts.LB_RECONCILE_TIMEOUT + consts.LB_TIMEOUT + # (digitalsimboja) We need to add both timeouts to wait for the time + # for both rebuilding and reconciliation of the KuryrLoadBalancer CRD + while time.time() - start < timeout: + try: + time.sleep(60) + LOG.debug("Checking for %s Reconciliation", resource) + status = self.get_kuryr_loadbalancer_crds(service_name, + namespace).get( + 'status', {}) + if resource == consts.LISTENER: + listeners = status.get(resource, []) + if not listeners: + continue + new_resource_id = listeners[0].get('id') + else: + new_resource_id = status.get(resource, {}).get('id') + new_lb_members = status.get('members', []) + if (new_resource_id == resource_id or new_resource_id is None + or len(svc_pods) != len(new_lb_members)): + continue + else: + self.assertNotEqual(new_resource_id, resource_id) + self.assertEqual(len(svc_pods), len(new_lb_members)) + break + except kubernetes.client.rest.ApiException: + continue + else: + msg = ('Timed out waiting for the %s reconciliation', resource) + raise lib_exc.TimeoutException(msg) + LOG.info("%s successfully reconciled", resource) + # if there is a connectivity now, that means the KuryrLoadBalancer CRD + # resource is reconciled + self.check_service_internal_connectivity(service_name=service_name, + namespace=namespace) diff --git a/kuryr_tempest_plugin/tests/scenario/consts.py b/kuryr_tempest_plugin/tests/scenario/consts.py index 98929798..2b010e83 100644 --- a/kuryr_tempest_plugin/tests/scenario/consts.py +++ b/kuryr_tempest_plugin/tests/scenario/consts.py @@ -23,9 +23,13 @@ POD_CHECK_TIMEOUT = 240 POD_CHECK_SLEEP_TIME = 5 NP_CHECK_SLEEP_TIME = 10 NS_TIMEOUT = 600 +LB_TIMEOUT = 1200 +LB_RECONCILE_TIMEOUT = 600 REPETITIONS_PER_BACKEND = 10 KURYR_RESOURCE_CHECK_TIMEOUT = 300 KURYR_PORT_CRD_PLURAL = 'kuryrports' KURYR_LOAD_BALANCER_CRD_PLURAL = 'kuryrloadbalancers' KURYR_NETWORK_POLICY_CRD_PLURAL = 'kuryrnetworkpolicies' K8s_ANNOTATION_PROJECT = 'openstack.org/kuryr-project' +LOADBALANCER = 'loadbalancer' +LISTENER = 'listeners' diff --git a/kuryr_tempest_plugin/tests/scenario/test_service.py b/kuryr_tempest_plugin/tests/scenario/test_service.py index 92920d32..cbe6d600 100644 --- a/kuryr_tempest_plugin/tests/scenario/test_service.py +++ b/kuryr_tempest_plugin/tests/scenario/test_service.py @@ -261,27 +261,14 @@ class TestDeployment(base.BaseKuryrScenarioTest): self.check_lb_members(pool_id, 0) -class TestLoadBalancerReconciliationScenario(base.BaseKuryrScenarioTest): - - credentials = ['admin', 'primary', ['lb_admin', 'load-balancer_admin']] - - @classmethod - def skip_checks(cls): - super(TestLoadBalancerReconciliationScenario, cls).skip_checks() - if not CONF.kuryr_kubernetes.service_tests_enabled: - raise cls.skipException("Service tests are not enabled") - if not CONF.kuryr_kubernetes.enable_reconciliation: - raise cls.skipException("Reconciliation is not enabled") - - @classmethod - def setup_clients(cls): - super(TestLoadBalancerReconciliationScenario, cls).setup_clients() - cls.lbaas = cls.os_roles_lb_admin.load_balancer_v2.LoadbalancerClient() +class TestLoadBalancerReconciliationScenario( + base.BaseReconciliationScenarioTest): @decorators.idempotent_id('da9bd886-e895-4869-b356-228c92a4da7f') def test_loadbalancers_reconcilation(self): - service_name = "kuryr-reconciliation-demo" + service_name = data_utils.rand_name(prefix='kuryr-loadbalancer') namespace = "default" + resource = consts.LOADBALANCER _, svc_pods = self.create_setup_for_service_test( service_name=service_name) self.check_service_internal_connectivity(service_name=service_name) @@ -290,59 +277,55 @@ class TestLoadBalancerReconciliationScenario(base.BaseKuryrScenarioTest): try: klb_crd_id = self.get_kuryr_loadbalancer_crds(service_name, namespace).get( - 'status', - {}).get( - 'loadbalancer', - {}).get('id') + 'status', {}).get( + 'loadbalancer', + {}).get('id') except kubernetes.client.rest.ApiException: - raise lib_exc.ServerFault + raise lib_exc.ServerFault() # NOTE(digitalsimboja): We need to await for DELETE to # complete on Octavia self.lbaas.delete_loadbalancer(klb_crd_id, cascade=True) LOG.debug("Waiting for loadbalancer to be completely gone") - start = time.time() - while time.time() - start < CONF.kuryr_kubernetes.lb_build_timeout: - try: - time.sleep(30) - self.lbaas.show_loadbalancer(klb_crd_id) - except lib_exc.NotFound: - LOG.debug("LoadBalancer sucessfully deleted") - break - else: - msg = ("Timed Out waiting for loadbalancer %s to be completely" - " deleted" % klb_crd_id) - raise lib_exc.TimeoutException(msg) - start = time.time() - timeout = CONF.kuryr_kubernetes.lb_reconcile_timeout + \ - CONF.kuryr_kubernetes.lb_build_timeout - # We need to add both timeouts to wait for the time for both rebuilding - # and reconciliation of the LoadBalancer - while time.time() - start < timeout: - try: - time.sleep(60) - LOG.debug("Checking for LoadBalancers Reconciliation") - status = self.get_kuryr_loadbalancer_crds(service_name, - namespace).get( - 'status', {}) - new_lb_id = status.get('loadbalancer', {}).get('id') - new_lb_members = status.get('members', []) - if (new_lb_id == klb_crd_id or new_lb_id is None or - len(svc_pods) != len(new_lb_members)): - continue - else: - self.assertNotEqual(new_lb_id, klb_crd_id) - self.assertEqual(len(svc_pods), len(new_lb_members)) - break - except kubernetes.client.rest.ApiException: - continue - else: - msg = ('Timed out waiting for LoadBalancer %s reconciliation' % - klb_crd_id) - raise lib_exc.TimeoutException(msg) - LOG.info("LoadBalancer successfully reconciled") - # if there is a connectivity now, that means the LoadBalancer - # is reconciled + + self.check_for_resource_reconciliation(service_name, svc_pods, + resource, klb_crd_id, + self.lbaas.show_loadbalancer, + namespace) + + +class TestListenerReconciliationScenario(base.BaseReconciliationScenarioTest): + + @classmethod + def skip_checks(cls): + super(TestListenerReconciliationScenario, cls).skip_checks() + if not CONF.kuryr_kubernetes.enable_listener_reconciliation: + raise cls.skipException("Listener reconciliation is not enabled") + + @decorators.idempotent_id('da9bd886-e895-4869-b356-230c92a5da8c') + def test_listeners_reconcilation(self): + service_name = data_utils.rand_name( + prefix='kuryr-loadbalancer-listener') + namespace = "default" + resource = consts.LISTENER + _, svc_pods = self.create_setup_for_service_test( + service_name=service_name) self.check_service_internal_connectivity(service_name=service_name) + # if there is a connectivity + LOG.info("Retrieving the Listener ID from KuryrLoadBalancer CRD") + try: + klb_lsnr_id = self.get_kuryr_loadbalancer_crds(service_name, + namespace).get( + 'status', {}).get( + 'listeners', + [])[0].get('id') + except kubernetes.client.rest.ApiException: + raise lib_exc.ServerFault() + self.lsnr.delete_listener(klb_lsnr_id) + LOG.debug("Waiting for listener to be completely gone") + self.check_for_resource_reconciliation(service_name, svc_pods, + resource, klb_lsnr_id, + self.lsnr.show_listener, + namespace) class TestServiceWithNotReadyEndpoints(base.BaseKuryrScenarioTest):