Fixing service connectivity testing

The service connectivity to ClusterIp service should be tested
from the pod in the same namespace

Depends-On: I35f3be8fc16d71d0d2ac01c5451d398dd631f118

Change-Id: Ife099f1d05c0cae954119f0ef4616c95076f83e7
This commit is contained in:
Genadi Chereshnya 2019-02-12 15:18:40 +02:00 committed by Luis Tomas Bolivar
parent bfb6c8f931
commit 54220e17fa
3 changed files with 68 additions and 39 deletions

View File

@ -183,6 +183,15 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
except kubernetes.client.rest.ApiException:
return None
@classmethod
def get_host_ip_for_pod(cls, pod_name, namespace="default"):
try:
pod = cls.k8s_client.CoreV1Api().read_namespaced_pod(pod_name,
namespace)
return pod.status.host_ip
except kubernetes.client.rest.ApiException:
return None
@classmethod
def get_pod_status(cls, pod_name, namespace="default"):
try:
@ -506,18 +515,29 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
if get_ip:
cls.service_ip = cls.get_service_ip(
service_name, spec_type=spec_type, namespace=namespace)
cls.verify_lbaas_endpoints_configured(service_name, pod_num)
# This is already waiting for endpoint annotations to be made by
# Kuryr
cls.verify_lbaas_endpoints_configured(service_name, pod_num,
namespace)
cls.service_name = service_name
cls.wait_service_status(cls.service_ip,
CONF.kuryr_kubernetes.lb_build_timeout,
protocol, port, num_of_back_ends=pod_num)
actual_be = cls.wait_ep_members_status(
cls.service_name, namespace,
CONF.kuryr_kubernetes.lb_build_timeout)
if pod_num != actual_be:
LOG.error("Actual EP backend num(%d) != pod_num(%d)",
actual_be, pod_num)
raise lib_exc.ServerFault()
if spec_type != 'ClusterIP':
# FIXME(ltomasbo): adding workaround to use the clusterIP to
# check service status as there are some issues with the FIPs
# and OVN gates
clusterip_ip = cls.get_service_ip(service_name,
spec_type="ClusterIP",
namespace=namespace)
cls.wait_service_status(clusterip_ip,
CONF.kuryr_kubernetes.lb_build_timeout,
protocol, port,
num_of_back_ends=pod_num)
actual_be = cls.wait_ep_members_status(
cls.service_name, namespace,
CONF.kuryr_kubernetes.lb_build_timeout)
if pod_num != actual_be:
LOG.error("Actual EP backend num(%d) != pod_num(%d)",
actual_be, pod_num)
raise lib_exc.ServerFault()
if cleanup:
cls.addClassResourceCleanup(cls.delete_service, service_name,
@ -641,17 +661,20 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
self._run_and_assert(req, pred)
def assert_backend_amount_from_pod(self, url, amount, pod):
def assert_backend_amount_from_pod(self, url, amount, pod,
namespace_name='default'):
def req():
status_prefix = '\nkuryr-tempest-plugin-curl-http_code:"'
cmd = ['/usr/bin/curl', '-Ss', '-w',
status_prefix + '%{http_code}"\n', url]
stdout, stderr = self.exec_command_in_pod(pod, cmd, stderr=True)
stdout, stderr = self.exec_command_in_pod(pod, cmd,
namespace=namespace_name,
stderr=True)
# check if the curl command succeeded
if stderr:
LOG.error('Failed to curl the service at {}. '
'Err: {}'.format(url, stderr))
time.sleep(5)
time.sleep(10)
return
try:
delimiter = stdout.rfind(status_prefix)
@ -678,7 +701,7 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
self._run_and_assert(req, pred)
def _run_and_assert(self, fn, predicate, retry_repetitions=20):
def _run_and_assert(self, fn, predicate, retry_repetitions=100):
resps = [fn() for _ in range(retry_repetitions)]
predicate(self, resps)
@ -687,7 +710,7 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
namespace='default'):
cls._verify_endpoints_annotation(
ep_name=ep_name, ann_string=K8S_ANNOTATION_LBAAS_STATE,
poll_interval=10, pod_num=pod_num)
poll_interval=5, namespace=namespace, pod_num=pod_num)
@classmethod
def _verify_endpoints_annotation(cls, ep_name, ann_string,

View File

@ -202,10 +202,12 @@ class TestNamespaceScenario(base.BaseKuryrScenarioTest):
namespace=ns2_name)
# Wait for services to be ready
self.wait_service_status(svc_ns1_ip,
CONF.kuryr_kubernetes.lb_build_timeout)
self.wait_service_status(svc_ns2_ip,
CONF.kuryr_kubernetes.lb_build_timeout)
self.assert_backend_amount_from_pod(
'http://{}'.format(svc_ns1_ip), 1, pod_ns1_name,
namespace_name=ns1_name)
self.assert_backend_amount_from_pod(
'http://{}'.format(svc_ns2_ip), 1, pod_ns2_name,
namespace_name=ns2_name)
pod_nsdefault_name, pod_nsdefault = self.create_pod(
labels={"app": 'pod-label'}, namespace='default')
@ -237,24 +239,25 @@ class TestNamespaceScenario(base.BaseKuryrScenarioTest):
subnet_ns2_name)
@decorators.idempotent_id('bddd5441-1244-429d-a125-b53ddfb132a9')
def test_host_to_namespace_connectivity(self):
# Create namespace and pod and service in that namespace
def test_host_to_namespace_pod_connectivity(self):
# Create namespace and pod in that namespace
namespace_name, namespace = self.create_namespace()
self.addCleanup(self.delete_namespace, namespace_name)
# Check host to namespace pod and service connectivity
# Check host to namespace pod and pod to host connectivity
pod_name, pod = self.create_pod(labels={"app": 'pod-label'},
namespace=namespace_name)
pod_ip = self.get_pod_ip(pod_name, namespace=namespace_name)
svc_name, _ = self.create_service(pod_label=pod.metadata.labels,
namespace=namespace_name)
service_ip = self.get_service_ip(service_name=svc_name,
namespace=namespace_name)
self.wait_service_status(service_ip,
CONF.kuryr_kubernetes.lb_build_timeout)
# Check connectivity to pod and service in the namespace
host_ip_of_pod = self.get_host_ip_for_pod(
pod_name, namespace=namespace_name)
# Check connectivity to pod in the namespace from host pod resides on
self.ping_ip_address(pod_ip)
resp = requests.get("http://{dst_ip}".format(dst_ip=service_ip))
self.assertEqual(resp.status_code, 200)
# check connectivity from Pod to host pod resides on
cmd = [
"/bin/sh", "-c", "ping -c 4 {dst_ip}>/dev/null ; echo $?".format(
dst_ip=host_ip_of_pod)]
self.assertEqual(self.exec_command_in_pod(
pod_name, cmd, namespace_name), '0')
def _delete_namespace_resources(self, namespace, net_crd, subnet):
# Check resources are deleted

View File

@ -36,11 +36,6 @@ class TestServiceScenario(base.BaseKuryrScenarioTest):
super(TestServiceScenario, cls).resource_setup()
cls.create_setup_for_service_test()
@decorators.idempotent_id('bddf5441-1244-449d-a125-b5fdcfc1a1a9')
def test_service_curl(self):
LOG.info("Trying to curl the service IP %s" % self.service_ip)
self.assert_backend_amount(self.service_ip, self.pod_num)
@decorators.idempotent_id('bddf5441-1244-449d-a125-b5fdcfa1a7a9')
def test_pod_service_curl(self):
pod_name, pod = self.create_pod()
@ -103,7 +98,15 @@ class TestUdpServiceScenario(base.BaseKuryrScenarioTest):
@decorators.idempotent_id('bddf5441-1244-449d-a125-b5fda1670781')
def test_service_udp_ping(self):
self.create_setup_for_service_test(protocol="UDP", port=90,
# NOTE(ltomasbo): Using LoadBalancer type to avoid namespace isolation
# restrictions as this test targets svc udp testing and not the
# isolation
self.create_setup_for_service_test(spec_type="LoadBalancer",
protocol="UDP", port=90,
target_port=9090)
self.assert_backend_amount(self.service_ip, self.pod_num,
# NOTE(ltomasbo): Ensure usage of svc clusterIP IP instead of the FIP
# as the focus of this test is not to check FIP connectivity.
clusterip_svc_ip = self.get_service_ip(self.service_name,
spec_type='ClusterIP')
self.assert_backend_amount(clusterip_svc_ip, self.pod_num,
server_port=90, protocol="UDP")