Merge "LVM plugin for k8s basic test"

This commit is contained in:
Jenkins 2016-09-16 16:41:12 +00:00 committed by Gerrit Code Review
commit eb08681a89
13 changed files with 446 additions and 125 deletions

View File

@ -70,8 +70,10 @@ def k8scluster(revert_snapshot, request, config,
kube_settings = getattr(request.instance, 'kube_settings',
settings.DEFAULT_CUSTOM_YAML)
LOG.info('Kube settings are {}'.format(kube_settings))
k8s_actions.install_k8s(custom_yaml=kube_settings)
k8s_actions.install_k8s(
custom_yaml=kube_settings,
lvm_config=underlay.config_lvm)
hardware.create_snapshot(ext.SNAPSHOT.k8s_deployed)
else:

View File

@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from fuel_ccp_tests.helpers import ext
@ -150,7 +151,7 @@ def snapshot(request, hardware):
@pytest.fixture(scope="function")
def underlay(revert_snapshot, config, hardware):
def underlay(request, revert_snapshot, config, hardware):
"""Fixture that should provide SSH access to underlay objects.
Input data:
@ -179,12 +180,18 @@ def underlay(revert_snapshot, config, hardware):
config.underlay.ssh = hardware.get_ssh_data(
roles=config.underlay.roles)
underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh)
if not config.underlay.lvm:
underlay.enable_lvm(hardware.lvm_storages())
config.underlay.lvm = underlay.config_lvm
hardware.create_snapshot(ext.SNAPSHOT.underlay)
else:
# 1. hardware environment created and powered on
# 2. config.underlay.ssh contains SSH access to provisioned nodes
# (can be passed from external config with TESTS_CONFIGS variable)
pass
underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh)
return underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh)
return underlay

View File

@ -77,6 +77,39 @@ class EnvironmentManager(object):
)
self.__devops_config = conf
def lvm_storages(self):
"""Returns a dict object of lvm storages in current environment
returned data example:
{
"master": {
"id": "virtio-bff72959d1a54cb19d08"
},
"slave-0": {
"id": "virtio-5e33affc8fe44503839f"
},
"slave-1": {
"id": "virtio-10b6a262f1ec4341a1ba"
},
}
:rtype: dict
"""
result = {}
for node in self.k8s_nodes:
lvm = filter(lambda x: x.volume.name == 'lvm', node.disk_devices)
if len(lvm) == 0:
continue
lvm = lvm[0]
result[node.name] = {}
result_node = result[node.name]
result_node['id'] = "{bus}-{serial}".format(
bus=lvm.bus,
serial=lvm.volume.serial[:20])
LOG.info("Got disk-id '{}' for node '{}'".format(
result_node['id'], node.name))
return result
@property
def _d_env_name(self):
"""Get environment name from fuel devops config

View File

@ -32,6 +32,26 @@ class EnvironmentManagerEmpty(object):
"""
self.__config = config
def lvm_storages(self):
"""Returns data of lvm_storages on nodes in environment
It's expected that data of self.__config.lvm_storages will be
like this:
{
"node1": {
"device": "vdb"
},
"node2": {
"device": "vdb"
},
"node3": {
"device": "vdb"
},
}
:rtype: dict
"""
return self.__config.underlay.lvm
def get_ssh_data(self, roles=None):
raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
"Please provide SSH details in config.underlay.ssh")

View File

@ -26,6 +26,33 @@ class K8sNode(K8sBaseResource):
def name(self):
return self.metadata.name
@property
def labels(self):
return self.metadata.labels
@labels.setter
def labels(self, labels):
current_labels = {
label: None for label in self.labels
}
current_labels.update(labels)
self.add_labels(labels=current_labels)
def add_labels(self, labels):
if not isinstance(labels, dict):
raise TypeError("labels must be a dict!")
body = {
"metadata":
{
"labels": labels
}
}
self._add_details(self._manager.update(body=body, name=self.name))
def remove_labels(self, list_labels):
labels = {label: None for label in list_labels}
self.add_labels(labels=labels)
class K8sNodeManager(K8sBaseManager):
"""docstring for ClassName"""
@ -49,3 +76,6 @@ class K8sNodeManager(K8sBaseManager):
def _deletecollection(self, **kwargs):
return self.api.deletecollection_namespaced_node(**kwargs)
def update(self, body, name, **kwargs):
return self.api.patch_namespaced_node(body=body, name=name, **kwargs)

View File

@ -39,9 +39,45 @@ class K8SManager(object):
self._api_client = None
super(K8SManager, self).__init__()
def mark_lvm_nodes(self, lvm_config):
if lvm_config:
lvm_mark = {"lvm": "on"}
# Get nodes ips
lvm_nodes_ips = [self.__underlay.host_by_node_name(node_name)
for node_name in lvm_config]
# Get only those K8sNodes, which has ips from lvm_nodes_ips
lvm_nodes = [
node for node in self.api.nodes.list()
if any(
ip.address in lvm_nodes_ips for ip in node.addresses)]
for node in lvm_nodes:
node.add_labels(lvm_mark)
def upload_lvm_plugin(self, node_name):
LOG.info("Uploading LVM plugin to node '{}'".format(node_name))
if self.__underlay:
with self.__underlay.remote(node_name=node_name) as remote:
remote.upload(settings.LVM_PLUGIN_PATH, '/tmp/')
with remote.get_sudo(remote):
remote.check_call(
'mkdir -p {}'.format(settings.LVM_PLUGIN_DIR),
verbose=True
)
remote.check_call(
"mv /tmp/{} {}".format(settings.LVM_FILENAME,
settings.LVM_PLUGIN_DIR),
verbose=True
)
remote.check_call(
"chmod +x {}/{}".format(settings.LVM_PLUGIN_DIR,
settings.LVM_FILENAME),
verbose=True
)
def install_k8s(self, custom_yaml=None, env_var=None,
k8s_admin_ip=None, k8s_slave_ips=None,
expected_ec=None, verbose=True):
expected_ec=None, verbose=True, lvm_config=None):
"""Action to deploy k8s by fuel-ccp-installer script
Additional steps:
@ -64,6 +100,11 @@ class K8SManager(object):
k8s_slave_ips = [self.__underlay.host_by_node_name(k8s_node)
for k8s_node in k8s_nodes]
if lvm_config:
LOG.info("uploading LVM plugin for k8s")
for node_name in lvm_config:
self.upload_lvm_plugin(node_name)
environment_variables = {
"SLAVE_IPS": " ".join(k8s_slave_ips),
"ADMIN_IP": k8s_admin_ip,
@ -113,6 +154,8 @@ class K8SManager(object):
self.__config.k8s.kube_host = k8s_admin_ip
self.mark_lvm_nodes(lvm_config)
return result
@property
@ -169,6 +212,79 @@ class K8SManager(object):
'"{phase}" phase'.format(
pod_name=pod_name, phase=phase))
def check_pod_create(self, body, timeout=300, interval=5):
"""Check creating sample pod
:param k8s_pod: V1Pod
:param k8sclient: K8sCluster
:rtype: V1Pod
"""
LOG.info("Creating pod in k8s cluster")
LOG.debug(
"POD spec to create:\n{}".format(
yaml.dump(body, default_flow_style=False))
)
LOG.debug("Timeout for creation is set to {}".format(timeout))
LOG.debug("Checking interval is set to {}".format(interval))
pod = self.api.pods.create(body=body)
pod.wait_running(timeout=300, interval=5)
LOG.info("Pod '{}' is created".format(pod.metadata.name))
return self.api.pods.get(name=pod.metadata.name)
def wait_pod_deleted(self, podname, timeout=60, interval=5):
helpers.wait(
lambda: podname not in [pod.name for pod in self.api.pods.list()],
timeout=timeout,
interval=interval,
timeout_msg="Pod deletion timeout reached!"
)
def check_pod_delete(self, k8s_pod, timeout=300, interval=5):
"""Deleting pod from k8s
:param k8s_pod: fuel_ccp_tests.managers.k8s.nodes.K8sNode
:param k8sclient: fuel_ccp_tests.managers.k8s.cluster.K8sCluster
"""
LOG.info("Deleting pod '{}'".format(k8s_pod.name))
LOG.debug("Pod status:\n{}".format(k8s_pod.status))
LOG.debug("Timeout for deletion is set to {}".format(timeout))
LOG.debug("Checking interval is set to {}".format(interval))
self.api.pods.delete(body=k8s_pod, name=k8s_pod.name)
self.wait_pod_deleted(k8s_pod.name, timeout, interval)
LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
def check_service_create(self, body):
"""Check creating k8s service
:param body: dict, service spec
:param k8sclient: K8sCluster object
:rtype: K8sService object
"""
LOG.info("Creating service in k8s cluster")
LOG.debug(
"Service spec to create:\n{}".format(
yaml.dump(body, default_flow_style=False))
)
service = self.api.services.create(body=body)
LOG.info("Service '{}' is created".format(service.metadata.name))
return self.api.services.get(name=service.metadata.name)
def check_ds_create(self, body):
"""Check creating k8s DaemonSet
:param body: dict, DaemonSet spec
:param k8sclient: K8sCluster object
:rtype: K8sDaemonSet object
"""
LOG.info("Creating DaemonSet in k8s cluster")
LOG.debug(
"DaemonSet spec to create:\n{}".format(
yaml.dump(body, default_flow_style=False))
)
ds = self.api.daemonsets.create(body=body)
LOG.info("DaemonSet '{}' is created".format(ds.metadata.name))
return self.api.daemonsets.get(name=ds.metadata.name)
def create_objects(self, path):
if isinstance(path, str):
path = [path]

View File

@ -12,9 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
from devops.helpers import helpers
from devops.helpers import ssh_client
from paramiko import rsakey
from fuel_ccp_tests import logger
LOG = logger.logger
class UnderlaySSHManager(object):
"""Keep the list of SSH access credentials to Underlay nodes.
@ -58,6 +63,7 @@ class UnderlaySSHManager(object):
or by a hostname.
"""
config_ssh = None
config_lvm = None
def __init__(self, config_ssh):
"""Read config.underlay.ssh object
@ -67,6 +73,9 @@ class UnderlaySSHManager(object):
if self.config_ssh is None:
self.config_ssh = []
if self.config_lvm is None:
self.config_lvm = {}
self.add_config_ssh(config_ssh)
def add_config_ssh(self, config_ssh):
@ -156,6 +165,42 @@ class UnderlaySSHManager(object):
names.append(ssh['node_name'])
return names
def enable_lvm(self, lvmconfig):
"""Method for enabling lvm oh hosts in environment
:param lvmconfig: dict with ids or device' names of lvm storage
:raises: devops.error.DevopsCalledProcessError,
devops.error.TimeoutError, AssertionError, ValueError
"""
def get_actions(lvm_id):
return [
"systemctl enable lvm2-lvmetad.service",
"systemctl enable lvm2-lvmetad.socket",
"systemctl start lvm2-lvmetad.service",
"systemctl start lvm2-lvmetad.socket",
"pvcreate {} && pvs".format(lvm_id),
"vgcreate default {} && vgs".format(lvm_id),
"lvcreate -L 1G -T default/pool && lvs",
]
lvmpackages = ["lvm2", "liblvm2-dev", "thin-provisioning-tools"]
for node_name in self.node_names():
lvm = lvmconfig.get(node_name, None)
if not lvm:
continue
if 'id' in lvm:
lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id'])
elif 'device' in lvm:
lvmdevice = '/dev/{}'.format(lvm['device'])
else:
raise ValueError("Unknown LVM device type")
if lvmdevice:
self.apt_install_package(
packages=lvmpackages, node_name=node_name, verbose=True)
for command in get_actions(lvmdevice):
self.sudo_check_call(command, node_name=node_name,
verbose=True)
self.config_lvm = dict(lvmconfig)
def host_by_node_name(self, node_name, address_pool=None):
ssh_data = self.__ssh_data(node_name=node_name,
address_pool=address_pool)
@ -189,13 +234,15 @@ class UnderlaySSHManager(object):
"""Execute command on the node_name/host and check for exit code
:type cmd: str
:type node_name: str
:type host: str
:type verbose: bool
:type timeout: int
:type error_info: str
:type expected: list
:type raise_on_err: bool
:rtype: list stdout
:raises: DevopsCalledProcessError
:raises: devops.error.DevopsCalledProcessError
"""
remote = self.remote(node_name=node_name, host=host,
address_pool=address_pool)
@ -204,6 +251,38 @@ class UnderlaySSHManager(object):
error_info=error_info, expected=expected,
raise_on_err=raise_on_err)
def apt_install_package(self, packages=None, node_name=None, host=None,
**kwargs):
"""Method to install packages on ubuntu nodes
:type packages: list
:type node_name: str
:type host: str
:raises: devops.error.DevopsCalledProcessError,
devops.error.TimeoutError, AssertionError, ValueError
Other params of check_call and sudo_check_call are allowed
"""
expected = kwargs.pop('expected', None)
if not packages or not isinstance(packages, list):
raise ValueError("packages list should be provided!")
install = "apt-get install -y {}".format(" ".join(packages))
# Should wait until other 'apt' jobs are finished
pgrep_expected = [0, 1]
pgrep_command = "pgrep -a -f apt"
helpers.wait(
lambda: (self.check_call(
pgrep_command, expected=pgrep_expected, host=host,
node_name=node_name, **kwargs).exit_code == 1
), interval=30, timeout=1200,
timeout_msg="Timeout reached while waiting for apt lock"
)
# Install packages
self.sudo_check_call("apt-get update", node_name=node_name, host=host,
**kwargs)
self.sudo_check_call(install, expected=expected, node_name=node_name,
host=host, **kwargs)
def sudo_check_call(
self, cmd,
node_name=None, host=None, address_pool=None,
@ -213,13 +292,15 @@ class UnderlaySSHManager(object):
"""Execute command with sudo on node_name/host and check for exit code
:type cmd: str
:type node_name: str
:type host: str
:type verbose: bool
:type timeout: int
:type error_info: str
:type expected: list
:type raise_on_err: bool
:rtype: list stdout
:raises: DevopsCalledProcessError
:raises: devops.error.DevopsCalledProcessError
"""
remote = self.remote(node_name=node_name, host=host,
address_pool=address_pool)

View File

@ -163,8 +163,20 @@ NETCHECKER_SERVER_DIR = os.environ.get(
NETCHECKER_AGENT_DIR = os.environ.get(
'NETCHECKER_AGENT_DIR', os.path.join(os.getcwd(), 'mcp-netchecker-agent')
)
# Settings for AppController testing
# AC_ZIP_URL is used to get link for zip archive with AppController, and by
# default it's built from AC_REPO (github link to AppController project) and
# AC_COMMIT (specific commit or master). You should provide AC_REPO (with
# or without AC_COMMIT) for now to pass AppController tests..
AC_COMMIT = os.environ.get("AC_COMMIT", "master")
AC_REPO = os.environ.get("AC_REPO", "")
AC_ZIP_URL = os.environ.get(
"AC_ZIP_URL", "{repo}/archive/{commit}.zip".format(
repo=AC_REPO, commit=AC_COMMIT))
LVM_PLUGIN_DIRNAME = os.environ.get("LVM_PLUGIN_DIRNAME", 'mirantis.com~lvm')
LVM_PLUGIN_DIR = os.path.join(
'/usr/libexec/kubernetes/kubelet-plugins/volume/exec', LVM_PLUGIN_DIRNAME)
LVM_PLUGIN_PATH = os.environ.get("LVM_PLUGIN_PATH", "~/lvm")
LVM_FILENAME = os.path.basename(LVM_PLUGIN_PATH)

View File

@ -55,6 +55,8 @@ underlay_opts = [
ct.Cfg('nameservers', ct.JSONList(),
help="IP addresses of DNS servers",
default=[]),
ct.Cfg('lvm', ct.JSONDict(),
help="LVM settings for Underlay"),
]
# TODO(ddmitriev): remove these variables from settings.py

View File

@ -0,0 +1,101 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from fuel_ccp_tests import logger
from fuel_ccp_tests.helpers import ext
LOG = logger.logger
@pytest.mark.component
class TestLVMPluginUsage(object):
"""Tests using k8s' LVM plugin.
Required environment variables to use:
CONF_PATH=./fuel-ccp-tests/templates/default-with-storage.yaml
LVM_PLUGIN_PATH=/path/to/lvm/plugin
To create basic pod node label 'lvm=on' is required on any k8s node.
"""
def check_lvm_exists(self, remote, lvm_name):
LOG.info("Check if lvm storage exists")
cmd = "lvs | grep -w {}".format(lvm_name)
with remote.get_sudo(remote):
remote.check_call(command=cmd, verbose=True,
timeout=120)
@pytest.mark.nginx_with_lvm
@pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed)
def test_create_nginx_with_lvm(self, underlay, k8scluster):
"""Test creating pod with LVM plugin
Scenario:
1. Create nginx pod with LVM plugin usage on top of k8s.
2. Ensure that volume is created.
3. Delete pod.
4. Ensure volume persists.
"""
lvm_volume_group = "default"
lvm_volume_name = "vol1"
nginx = {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "nginx"
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx",
"volumeMounts": [
{
"mountPath": "/data",
"name": "test"
}
],
"ports": [
{
"containerPort": 80
}
]
}
],
"volumes": [
{
"name": "test",
"flexVolume": {
"driver": "mirantis.com/lvm",
"fsType": "ext4",
"options": {
"volumeID": lvm_volume_name,
"size": "100m",
"pool": "pool",
"volumeGroup": lvm_volume_group,
}
}
}
],
"nodeSelector": {
"lvm": "on"
}
}
}
pod = k8scluster.check_pod_create(body=nginx)
remote = underlay.remote(host=pod.status.host_ip)
self.check_lvm_exists(remote, lvm_volume_name)
k8scluster.check_pod_delete(pod)
self.check_lvm_exists(remote, lvm_volume_name)

View File

@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from devops.helpers.helpers import wait
from fuel_ccp_tests import logger
LOG = logger.logger
@ -57,84 +53,6 @@ class SystemBaseTest(object):
for node_name in underlay.node_names():
self.required_images_exists(node_name, underlay, required_images)
def check_pod_create(self, body, k8sclient, timeout=300, interval=5):
"""Check creating sample pod
:param k8s_pod: V1Pod
:param k8sclient: K8sCluster
:rtype: V1Pod
"""
LOG.info("Creating pod in k8s cluster")
LOG.debug(
"POD spec to create:\n{}".format(
yaml.dump(body, default_flow_style=False))
)
LOG.debug("Timeout for creation is set to {}".format(timeout))
LOG.debug("Checking interval is set to {}".format(interval))
pod = k8sclient.pods.create(body=body)
pod.wait_running(timeout=300, interval=5)
LOG.info("Pod '{}' is created".format(pod.metadata.name))
return k8sclient.pods.get(name=pod.metadata.name)
@staticmethod
def wait_pod_deleted(k8sclient, podname, timeout=60, interval=5):
wait(
lambda: podname not in [pod.name for pod in k8sclient.pods.list()],
timeout=timeout,
interval=interval,
timeout_msg="Pod deletion timeout reached!"
)
@staticmethod
def check_pod_delete(k8s_pod, k8sclient, timeout=300, interval=5):
"""Deleting pod from k8s
:param k8s_pod: fuel_ccp_tests.managers.k8s.nodes.K8sNode
:param k8sclient: fuel_ccp_tests.managers.k8s.cluster.K8sCluster
"""
LOG.info("Deleting pod '{}'".format(k8s_pod.name))
LOG.debug("Pod status:\n{}".format(k8s_pod.status))
LOG.debug("Timeout for deletion is set to {}".format(timeout))
LOG.debug("Checking interval is set to {}".format(interval))
k8sclient.pods.delete(body=k8s_pod, name=k8s_pod.name)
SystemBaseTest.wait_pod_deleted(k8sclient, k8s_pod.name, timeout,
interval)
LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
@staticmethod
def check_service_create(body, k8sclient):
"""Check creating k8s service
:param body: dict, service spec
:param k8sclient: K8sCluster object
:rtype: K8sService object
"""
LOG.info("Creating service in k8s cluster")
LOG.debug(
"Service spec to create:\n{}".format(
yaml.dump(body, default_flow_style=False))
)
service = k8sclient.services.create(body=body)
LOG.info("Service '{}' is created".format(service.metadata.name))
return k8sclient.services.get(name=service.metadata.name)
@staticmethod
def check_ds_create(body, k8sclient):
"""Check creating k8s DaemonSet
:param body: dict, DaemonSet spec
:param k8sclient: K8sCluster object
:rtype: K8sDaemonSet object
"""
LOG.info("Creating DaemonSet in k8s cluster")
LOG.debug(
"DaemonSet spec to create:\n{}".format(
yaml.dump(body, default_flow_style=False))
)
ds = k8sclient.daemonsets.create(body=body)
LOG.info("DaemonSet '{}' is created".format(ds.metadata.name))
return k8sclient.daemonsets.get(name=ds.metadata.name)
def check_number_kube_nodes(self, underlay, k8sclient):
"""Check number of slaves"""
LOG.info("Check number of nodes")

View File

@ -101,9 +101,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest,
self.calico_ipip_exists(underlay)
self.check_etcd_health(underlay)
nginx = self.get_nginx_spec()
pod = self.check_pod_create(body=nginx, k8sclient=k8sclient)
pod = k8s_actions.check_pod_create(body=nginx)
self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip)
self.check_pod_delete(pod, k8sclient)
k8s_actions.check_pod_delete(pod)
@pytest.mark.k8s_installed_with_etcd_on_host
@pytest.mark.snapshot_needed
@ -143,9 +143,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest,
self.calico_ipip_exists(underlay)
self.check_etcd_health(underlay)
nginx = self.get_nginx_spec()
pod = self.check_pod_create(body=nginx, k8sclient=k8sclient)
pod = k8s_actions.check_pod_create(body=nginx)
self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip)
self.check_pod_delete(pod, k8sclient)
k8s_actions.check_pod_delete(pod)
@pytest.mark.k8s_installed_with_etcd_in_container
@pytest.mark.snapshot_needed
@ -185,9 +185,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest,
self.calico_ipip_exists(underlay)
self.check_etcd_health(underlay)
nginx = self.get_nginx_spec()
pod = self.check_pod_create(body=nginx, k8sclient=k8sclient)
pod = k8s_actions.check_pod_create(body=nginx)
self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip)
self.check_pod_delete(pod, k8sclient)
k8s_actions.check_pod_delete(pod)
@pytest.mark.k8s_installed_with_ready_ssh_keys
@pytest.mark.snapshot_needed
@ -221,9 +221,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest,
self.calico_ipip_exists(underlay)
self.check_etcd_health(underlay)
nginx = self.get_nginx_spec()
pod = self.check_pod_create(body=nginx, k8sclient=k8sclient)
pod = k8s_actions.check_pod_create(body=nginx)
self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip)
self.check_pod_delete(pod, k8sclient)
k8s_actions.check_pod_delete(pod)
@pytest.mark.fuel_ccp_installer_idempotency

View File

@ -12,20 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import pytest
import random
import requests
import time
import os
import yaml
from devops.helpers.helpers import wait, wait_pass
from k8sclient.client.rest import ApiException
from devops.helpers import helpers
from k8sclient.client import rest
from base_test import SystemBaseTest
import base_test
from fuel_ccp_tests.helpers import ext
from fuel_ccp_tests import logger
from fuel_ccp_tests import settings
from fuel_ccp_tests.helpers import ext
LOG = logger.logger
@ -55,7 +55,8 @@ class TestFuelCCPNetCheckerMixin:
@pytest.mark.usefixtures("check_netchecker_files")
@pytest.mark.system
class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
class TestFuelCCPNetChecker(base_test.SystemBaseTest,
TestFuelCCPNetCheckerMixin):
"""Test class for network connectivity verification in k8s"""
@staticmethod
@ -71,7 +72,7 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
@staticmethod
def wait_ds_running(k8sclient, dsname, timeout=60, interval=5):
wait(
helpers.wait(
lambda: TestFuelCCPNetChecker.get_ds_status(k8sclient, dsname),
timeout=timeout, interval=interval)
@ -90,40 +91,38 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
'docker push {0}/netchecker/{1}:latest'.format(registry, stype),
node_name='master')
def start_netchecker_server(self, k8sclient):
def start_netchecker_server(self, k8s):
with open(self.pod_yaml_file) as pod_conf:
for pod_spec in yaml.load_all(pod_conf):
try:
if k8sclient.pods.get(name=pod_spec['metadata']['name']):
if k8s.api.pods.get(name=pod_spec['metadata']['name']):
LOG.debug('Network checker server pod {} is '
'already running! Skipping resource creation'
'.'.format(pod_spec['metadata']['name']))
continue
except ApiException as e:
except rest.ApiException as e:
if e.status == 404:
self.check_pod_create(body=pod_spec,
k8sclient=k8sclient)
k8s.check_pod_create(body=pod_spec)
else:
raise e
with open(self.svc_yaml_file) as svc_conf:
for svc_spec in yaml.load_all(svc_conf):
try:
if k8sclient.services.get(
if k8s.api.services.get(
name=svc_spec['metadata']['name']):
LOG.debug('Network checker server pod {} is '
'already running! Skipping resource creation'
'.'.format(svc_spec['metadata']['name']))
continue
except ApiException as e:
except rest.ApiException as e:
if e.status == 404:
self.check_service_create(body=svc_spec,
k8sclient=k8sclient)
k8s.check_service_create(body=svc_spec)
else:
raise e
def start_netchecker_agent(self, underlay, k8sclient):
def start_netchecker_agent(self, underlay, k8s):
# TODO(apanchenko): use python API client here when it will have
# TODO(apanchenko): needed functionality (able work with labels)
underlay.sudo_check_call(
@ -133,10 +132,9 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
with open(self.ds_yaml_file) as ds_conf:
for daemon_set_spec in yaml.load_all(ds_conf):
self.check_ds_create(body=daemon_set_spec,
k8sclient=k8sclient)
self.wait_ds_running(
k8sclient,
k8s.check_ds_create(body=daemon_set_spec)
TestFuelCCPNetChecker.wait_ds_running(
k8s,
dsname=daemon_set_spec['metadata']['name'])
@staticmethod
@ -147,7 +145,7 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
@staticmethod
def wait_netchecker_running(kube_host_ip, timeout=60, interval=5):
wait_pass(
helpers.wait_pass(
lambda: TestFuelCCPNetChecker.get_netchecker_status(kube_host_ip),
timeout=timeout, interval=interval)
@ -216,6 +214,7 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
# STEP #1
show_step(1)
k8sclient = k8scluster.api
assert k8sclient.nodes.list() is not None, "Can not get nodes list"
# STEP #2
show_step(2)
@ -257,12 +256,12 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
# STEP #9
show_step(9)
self.start_netchecker_server(k8sclient=k8sclient)
self.wait_netchecker_running(config.k8s.kube_host, timeout=240)
self.start_netchecker_server(k8s=k8scluster)
self.wait_netchecker_running(underlay, timeout=240)
# STEP #10
show_step(10)
self.start_netchecker_agent(underlay, k8sclient)
self.start_netchecker_agent(underlay, k8scluster)
# STEP #11
# currently agents need some time to start reporting to the server
@ -286,13 +285,13 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin):
underlay.sudo_check_call(
'kubectl delete pod/netchecker-server',
node_name='master')
self.wait_pod_deleted(k8sclient, 'netchecker-server')
k8scluster.wait_pod_deleted('netchecker-server')
self.block_traffic_on_slave(underlay, target_slave)
# start netchecker-server
self.start_netchecker_server(k8sclient=k8sclient)
self.wait_netchecker_running(config.k8s.kube_host, timeout=240)
self.start_netchecker_server(k8s=k8scluster)
self.wait_netchecker_running(underlay, timeout=240)
# STEP #13
show_step(13)