From 0e023bcd21ba16268c61e46b50646c3289982874 Mon Sep 17 00:00:00 2001 From: Sergey Lebedev Date: Tue, 2 Aug 2016 18:18:40 +0300 Subject: [PATCH] LVM plugin for k8s basic test We have plugin for LVM usage in k8s, this patch adds basic test using this plugin. This test creates an environment (which should have extra storage), installs required software to use LVM on nodes, uploads a plugin to each node and then creates nginx pod using this plugin to create its storage. Sample command to run test: ENV_NAME="lvm_usage"\ CONF_PATH=fuel_ccp_tests/templates/default-with-storage.yaml\ IMAGE_PATH=/path/to/image LVM_PLUGIN_PATH=/path/to/plugin/binary\ DEPLOY_SCRIPT=/path/to/fuel-ccp-installer/utils/jenkins/kargo_deploy.sh\ py.test -s fuel_ccp_tests/tests/system/test_k8s_lvm_plugin_usage.py Change-Id: I0a31324e445902774f35b6cf2af1d9d43e74f53e --- fuel_ccp_tests/fixtures/k8s_fixtures.py | 4 +- fuel_ccp_tests/fixtures/underlay_fixtures.py | 13 +- fuel_ccp_tests/managers/envmanager_devops.py | 33 +++++ fuel_ccp_tests/managers/envmanager_empty.py | 20 +++ fuel_ccp_tests/managers/k8s/nodes.py | 30 +++++ fuel_ccp_tests/managers/k8smanager.py | 118 +++++++++++++++++- .../managers/underlay_ssh_manager.py | 85 ++++++++++++- fuel_ccp_tests/settings.py | 12 ++ fuel_ccp_tests/settings_oslo.py | 2 + .../k8s/test_k8s_lvm_plugin_usage.py | 101 +++++++++++++++ fuel_ccp_tests/tests/system/base_test.py | 82 ------------ .../tests/system/test_ccp_install_k8s.py | 16 +-- .../tests/system/test_netchecker.py | 55 ++++---- 13 files changed, 446 insertions(+), 125 deletions(-) create mode 100644 fuel_ccp_tests/tests/component/k8s/test_k8s_lvm_plugin_usage.py diff --git a/fuel_ccp_tests/fixtures/k8s_fixtures.py b/fuel_ccp_tests/fixtures/k8s_fixtures.py index acf7e59..897a762 100644 --- a/fuel_ccp_tests/fixtures/k8s_fixtures.py +++ b/fuel_ccp_tests/fixtures/k8s_fixtures.py @@ -70,8 +70,10 @@ def k8scluster(revert_snapshot, request, config, kube_settings = getattr(request.instance, 'kube_settings', settings.DEFAULT_CUSTOM_YAML) LOG.info('Kube settings are {}'.format(kube_settings)) - k8s_actions.install_k8s(custom_yaml=kube_settings) + k8s_actions.install_k8s( + custom_yaml=kube_settings, + lvm_config=underlay.config_lvm) hardware.create_snapshot(ext.SNAPSHOT.k8s_deployed) else: diff --git a/fuel_ccp_tests/fixtures/underlay_fixtures.py b/fuel_ccp_tests/fixtures/underlay_fixtures.py index a1fa813..20660c7 100644 --- a/fuel_ccp_tests/fixtures/underlay_fixtures.py +++ b/fuel_ccp_tests/fixtures/underlay_fixtures.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + import pytest from fuel_ccp_tests.helpers import ext @@ -150,7 +151,7 @@ def snapshot(request, hardware): @pytest.fixture(scope="function") -def underlay(revert_snapshot, config, hardware): +def underlay(request, revert_snapshot, config, hardware): """Fixture that should provide SSH access to underlay objects. Input data: @@ -179,12 +180,18 @@ def underlay(revert_snapshot, config, hardware): config.underlay.ssh = hardware.get_ssh_data( roles=config.underlay.roles) + underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh) + + if not config.underlay.lvm: + underlay.enable_lvm(hardware.lvm_storages()) + config.underlay.lvm = underlay.config_lvm + hardware.create_snapshot(ext.SNAPSHOT.underlay) else: # 1. hardware environment created and powered on # 2. config.underlay.ssh contains SSH access to provisioned nodes # (can be passed from external config with TESTS_CONFIGS variable) - pass + underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh) - return underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh) + return underlay diff --git a/fuel_ccp_tests/managers/envmanager_devops.py b/fuel_ccp_tests/managers/envmanager_devops.py index f784af3..28174fe 100644 --- a/fuel_ccp_tests/managers/envmanager_devops.py +++ b/fuel_ccp_tests/managers/envmanager_devops.py @@ -77,6 +77,39 @@ class EnvironmentManager(object): ) self.__devops_config = conf + def lvm_storages(self): + """Returns a dict object of lvm storages in current environment + + returned data example: + { + "master": { + "id": "virtio-bff72959d1a54cb19d08" + }, + "slave-0": { + "id": "virtio-5e33affc8fe44503839f" + }, + "slave-1": { + "id": "virtio-10b6a262f1ec4341a1ba" + }, + } + + :rtype: dict + """ + result = {} + for node in self.k8s_nodes: + lvm = filter(lambda x: x.volume.name == 'lvm', node.disk_devices) + if len(lvm) == 0: + continue + lvm = lvm[0] + result[node.name] = {} + result_node = result[node.name] + result_node['id'] = "{bus}-{serial}".format( + bus=lvm.bus, + serial=lvm.volume.serial[:20]) + LOG.info("Got disk-id '{}' for node '{}'".format( + result_node['id'], node.name)) + return result + @property def _d_env_name(self): """Get environment name from fuel devops config diff --git a/fuel_ccp_tests/managers/envmanager_empty.py b/fuel_ccp_tests/managers/envmanager_empty.py index 0aae265..232d604 100644 --- a/fuel_ccp_tests/managers/envmanager_empty.py +++ b/fuel_ccp_tests/managers/envmanager_empty.py @@ -32,6 +32,26 @@ class EnvironmentManagerEmpty(object): """ self.__config = config + def lvm_storages(self): + """Returns data of lvm_storages on nodes in environment + + It's expected that data of self.__config.lvm_storages will be + like this: + { + "node1": { + "device": "vdb" + }, + "node2": { + "device": "vdb" + }, + "node3": { + "device": "vdb" + }, + } + :rtype: dict + """ + return self.__config.underlay.lvm + def get_ssh_data(self, roles=None): raise Exception("EnvironmentManagerEmpty doesn't have SSH details. " "Please provide SSH details in config.underlay.ssh") diff --git a/fuel_ccp_tests/managers/k8s/nodes.py b/fuel_ccp_tests/managers/k8s/nodes.py index 6b8b204..7610230 100644 --- a/fuel_ccp_tests/managers/k8s/nodes.py +++ b/fuel_ccp_tests/managers/k8s/nodes.py @@ -26,6 +26,33 @@ class K8sNode(K8sBaseResource): def name(self): return self.metadata.name + @property + def labels(self): + return self.metadata.labels + + @labels.setter + def labels(self, labels): + current_labels = { + label: None for label in self.labels + } + current_labels.update(labels) + self.add_labels(labels=current_labels) + + def add_labels(self, labels): + if not isinstance(labels, dict): + raise TypeError("labels must be a dict!") + body = { + "metadata": + { + "labels": labels + } + } + self._add_details(self._manager.update(body=body, name=self.name)) + + def remove_labels(self, list_labels): + labels = {label: None for label in list_labels} + self.add_labels(labels=labels) + class K8sNodeManager(K8sBaseManager): """docstring for ClassName""" @@ -49,3 +76,6 @@ class K8sNodeManager(K8sBaseManager): def _deletecollection(self, **kwargs): return self.api.deletecollection_namespaced_node(**kwargs) + + def update(self, body, name, **kwargs): + return self.api.patch_namespaced_node(body=body, name=name, **kwargs) diff --git a/fuel_ccp_tests/managers/k8smanager.py b/fuel_ccp_tests/managers/k8smanager.py index 9dc5135..81c9629 100644 --- a/fuel_ccp_tests/managers/k8smanager.py +++ b/fuel_ccp_tests/managers/k8smanager.py @@ -39,9 +39,45 @@ class K8SManager(object): self._api_client = None super(K8SManager, self).__init__() + def mark_lvm_nodes(self, lvm_config): + if lvm_config: + lvm_mark = {"lvm": "on"} + # Get nodes ips + lvm_nodes_ips = [self.__underlay.host_by_node_name(node_name) + for node_name in lvm_config] + # Get only those K8sNodes, which has ips from lvm_nodes_ips + lvm_nodes = [ + node for node in self.api.nodes.list() + if any( + ip.address in lvm_nodes_ips for ip in node.addresses)] + + for node in lvm_nodes: + node.add_labels(lvm_mark) + + def upload_lvm_plugin(self, node_name): + LOG.info("Uploading LVM plugin to node '{}'".format(node_name)) + if self.__underlay: + with self.__underlay.remote(node_name=node_name) as remote: + remote.upload(settings.LVM_PLUGIN_PATH, '/tmp/') + with remote.get_sudo(remote): + remote.check_call( + 'mkdir -p {}'.format(settings.LVM_PLUGIN_DIR), + verbose=True + ) + remote.check_call( + "mv /tmp/{} {}".format(settings.LVM_FILENAME, + settings.LVM_PLUGIN_DIR), + verbose=True + ) + remote.check_call( + "chmod +x {}/{}".format(settings.LVM_PLUGIN_DIR, + settings.LVM_FILENAME), + verbose=True + ) + def install_k8s(self, custom_yaml=None, env_var=None, k8s_admin_ip=None, k8s_slave_ips=None, - expected_ec=None, verbose=True): + expected_ec=None, verbose=True, lvm_config=None): """Action to deploy k8s by fuel-ccp-installer script Additional steps: @@ -64,6 +100,11 @@ class K8SManager(object): k8s_slave_ips = [self.__underlay.host_by_node_name(k8s_node) for k8s_node in k8s_nodes] + if lvm_config: + LOG.info("uploading LVM plugin for k8s") + for node_name in lvm_config: + self.upload_lvm_plugin(node_name) + environment_variables = { "SLAVE_IPS": " ".join(k8s_slave_ips), "ADMIN_IP": k8s_admin_ip, @@ -113,6 +154,8 @@ class K8SManager(object): self.__config.k8s.kube_host = k8s_admin_ip + self.mark_lvm_nodes(lvm_config) + return result @property @@ -169,6 +212,79 @@ class K8SManager(object): '"{phase}" phase'.format( pod_name=pod_name, phase=phase)) + def check_pod_create(self, body, timeout=300, interval=5): + """Check creating sample pod + + :param k8s_pod: V1Pod + :param k8sclient: K8sCluster + :rtype: V1Pod + """ + LOG.info("Creating pod in k8s cluster") + LOG.debug( + "POD spec to create:\n{}".format( + yaml.dump(body, default_flow_style=False)) + ) + LOG.debug("Timeout for creation is set to {}".format(timeout)) + LOG.debug("Checking interval is set to {}".format(interval)) + pod = self.api.pods.create(body=body) + pod.wait_running(timeout=300, interval=5) + LOG.info("Pod '{}' is created".format(pod.metadata.name)) + return self.api.pods.get(name=pod.metadata.name) + + def wait_pod_deleted(self, podname, timeout=60, interval=5): + helpers.wait( + lambda: podname not in [pod.name for pod in self.api.pods.list()], + timeout=timeout, + interval=interval, + timeout_msg="Pod deletion timeout reached!" + ) + + def check_pod_delete(self, k8s_pod, timeout=300, interval=5): + """Deleting pod from k8s + + :param k8s_pod: fuel_ccp_tests.managers.k8s.nodes.K8sNode + :param k8sclient: fuel_ccp_tests.managers.k8s.cluster.K8sCluster + """ + LOG.info("Deleting pod '{}'".format(k8s_pod.name)) + LOG.debug("Pod status:\n{}".format(k8s_pod.status)) + LOG.debug("Timeout for deletion is set to {}".format(timeout)) + LOG.debug("Checking interval is set to {}".format(interval)) + self.api.pods.delete(body=k8s_pod, name=k8s_pod.name) + self.wait_pod_deleted(k8s_pod.name, timeout, interval) + LOG.debug("Pod '{}' is deleted".format(k8s_pod.name)) + + def check_service_create(self, body): + """Check creating k8s service + + :param body: dict, service spec + :param k8sclient: K8sCluster object + :rtype: K8sService object + """ + LOG.info("Creating service in k8s cluster") + LOG.debug( + "Service spec to create:\n{}".format( + yaml.dump(body, default_flow_style=False)) + ) + service = self.api.services.create(body=body) + LOG.info("Service '{}' is created".format(service.metadata.name)) + return self.api.services.get(name=service.metadata.name) + + def check_ds_create(self, body): + """Check creating k8s DaemonSet + + :param body: dict, DaemonSet spec + :param k8sclient: K8sCluster object + :rtype: K8sDaemonSet object + """ + LOG.info("Creating DaemonSet in k8s cluster") + LOG.debug( + "DaemonSet spec to create:\n{}".format( + yaml.dump(body, default_flow_style=False)) + ) + ds = self.api.daemonsets.create(body=body) + LOG.info("DaemonSet '{}' is created".format(ds.metadata.name)) + return self.api.daemonsets.get(name=ds.metadata.name) + def create_objects(self, path): if isinstance(path, str): path = [path] diff --git a/fuel_ccp_tests/managers/underlay_ssh_manager.py b/fuel_ccp_tests/managers/underlay_ssh_manager.py index cbbe854..a6c3639 100644 --- a/fuel_ccp_tests/managers/underlay_ssh_manager.py +++ b/fuel_ccp_tests/managers/underlay_ssh_manager.py @@ -12,9 +12,14 @@ # License for the specific language governing permissions and limitations # under the License. +from devops.helpers import helpers from devops.helpers import ssh_client from paramiko import rsakey +from fuel_ccp_tests import logger + +LOG = logger.logger + class UnderlaySSHManager(object): """Keep the list of SSH access credentials to Underlay nodes. @@ -58,6 +63,7 @@ class UnderlaySSHManager(object): or by a hostname. """ config_ssh = None + config_lvm = None def __init__(self, config_ssh): """Read config.underlay.ssh object @@ -67,6 +73,9 @@ class UnderlaySSHManager(object): if self.config_ssh is None: self.config_ssh = [] + if self.config_lvm is None: + self.config_lvm = {} + self.add_config_ssh(config_ssh) def add_config_ssh(self, config_ssh): @@ -156,6 +165,42 @@ class UnderlaySSHManager(object): names.append(ssh['node_name']) return names + def enable_lvm(self, lvmconfig): + """Method for enabling lvm oh hosts in environment + + :param lvmconfig: dict with ids or device' names of lvm storage + :raises: devops.error.DevopsCalledProcessError, + devops.error.TimeoutError, AssertionError, ValueError + """ + def get_actions(lvm_id): + return [ + "systemctl enable lvm2-lvmetad.service", + "systemctl enable lvm2-lvmetad.socket", + "systemctl start lvm2-lvmetad.service", + "systemctl start lvm2-lvmetad.socket", + "pvcreate {} && pvs".format(lvm_id), + "vgcreate default {} && vgs".format(lvm_id), + "lvcreate -L 1G -T default/pool && lvs", + ] + lvmpackages = ["lvm2", "liblvm2-dev", "thin-provisioning-tools"] + for node_name in self.node_names(): + lvm = lvmconfig.get(node_name, None) + if not lvm: + continue + if 'id' in lvm: + lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id']) + elif 'device' in lvm: + lvmdevice = '/dev/{}'.format(lvm['device']) + else: + raise ValueError("Unknown LVM device type") + if lvmdevice: + self.apt_install_package( + packages=lvmpackages, node_name=node_name, verbose=True) + for command in get_actions(lvmdevice): + self.sudo_check_call(command, node_name=node_name, + verbose=True) + self.config_lvm = dict(lvmconfig) + def host_by_node_name(self, node_name, address_pool=None): ssh_data = self.__ssh_data(node_name=node_name, address_pool=address_pool) @@ -189,13 +234,15 @@ class UnderlaySSHManager(object): """Execute command on the node_name/host and check for exit code :type cmd: str + :type node_name: str + :type host: str :type verbose: bool :type timeout: int :type error_info: str :type expected: list :type raise_on_err: bool :rtype: list stdout - :raises: DevopsCalledProcessError + :raises: devops.error.DevopsCalledProcessError """ remote = self.remote(node_name=node_name, host=host, address_pool=address_pool) @@ -204,6 +251,38 @@ class UnderlaySSHManager(object): error_info=error_info, expected=expected, raise_on_err=raise_on_err) + def apt_install_package(self, packages=None, node_name=None, host=None, + **kwargs): + """Method to install packages on ubuntu nodes + + :type packages: list + :type node_name: str + :type host: str + :raises: devops.error.DevopsCalledProcessError, + devops.error.TimeoutError, AssertionError, ValueError + + Other params of check_call and sudo_check_call are allowed + """ + expected = kwargs.pop('expected', None) + if not packages or not isinstance(packages, list): + raise ValueError("packages list should be provided!") + install = "apt-get install -y {}".format(" ".join(packages)) + # Should wait until other 'apt' jobs are finished + pgrep_expected = [0, 1] + pgrep_command = "pgrep -a -f apt" + helpers.wait( + lambda: (self.check_call( + pgrep_command, expected=pgrep_expected, host=host, + node_name=node_name, **kwargs).exit_code == 1 + ), interval=30, timeout=1200, + timeout_msg="Timeout reached while waiting for apt lock" + ) + # Install packages + self.sudo_check_call("apt-get update", node_name=node_name, host=host, + **kwargs) + self.sudo_check_call(install, expected=expected, node_name=node_name, + host=host, **kwargs) + def sudo_check_call( self, cmd, node_name=None, host=None, address_pool=None, @@ -213,13 +292,15 @@ class UnderlaySSHManager(object): """Execute command with sudo on node_name/host and check for exit code :type cmd: str + :type node_name: str + :type host: str :type verbose: bool :type timeout: int :type error_info: str :type expected: list :type raise_on_err: bool :rtype: list stdout - :raises: DevopsCalledProcessError + :raises: devops.error.DevopsCalledProcessError """ remote = self.remote(node_name=node_name, host=host, address_pool=address_pool) diff --git a/fuel_ccp_tests/settings.py b/fuel_ccp_tests/settings.py index e665e62..bc9cd6b 100644 --- a/fuel_ccp_tests/settings.py +++ b/fuel_ccp_tests/settings.py @@ -148,8 +148,20 @@ NETCHECKER_SERVER_DIR = os.environ.get( NETCHECKER_AGENT_DIR = os.environ.get( 'NETCHECKER_AGENT_DIR', os.path.join(os.getcwd(), 'mcp-netchecker-agent') ) + +# Settings for AppController testing +# AC_ZIP_URL is used to get link for zip archive with AppController, and by +# default it's built from AC_REPO (github link to AppController project) and +# AC_COMMIT (specific commit or master). You should provide AC_REPO (with +# or without AC_COMMIT) for now to pass AppController tests.. AC_COMMIT = os.environ.get("AC_COMMIT", "master") AC_REPO = os.environ.get("AC_REPO", "") AC_ZIP_URL = os.environ.get( "AC_ZIP_URL", "{repo}/archive/{commit}.zip".format( repo=AC_REPO, commit=AC_COMMIT)) + +LVM_PLUGIN_DIRNAME = os.environ.get("LVM_PLUGIN_DIRNAME", 'mirantis.com~lvm') +LVM_PLUGIN_DIR = os.path.join( + '/usr/libexec/kubernetes/kubelet-plugins/volume/exec', LVM_PLUGIN_DIRNAME) +LVM_PLUGIN_PATH = os.environ.get("LVM_PLUGIN_PATH", "~/lvm") +LVM_FILENAME = os.path.basename(LVM_PLUGIN_PATH) diff --git a/fuel_ccp_tests/settings_oslo.py b/fuel_ccp_tests/settings_oslo.py index 7ced42d..c9cf139 100644 --- a/fuel_ccp_tests/settings_oslo.py +++ b/fuel_ccp_tests/settings_oslo.py @@ -55,6 +55,8 @@ underlay_opts = [ ct.Cfg('nameservers', ct.JSONList(), help="IP addresses of DNS servers", default=[]), + ct.Cfg('lvm', ct.JSONDict(), + help="LVM settings for Underlay"), ] # TODO(ddmitriev): remove these variables from settings.py diff --git a/fuel_ccp_tests/tests/component/k8s/test_k8s_lvm_plugin_usage.py b/fuel_ccp_tests/tests/component/k8s/test_k8s_lvm_plugin_usage.py new file mode 100644 index 0000000..07f4342 --- /dev/null +++ b/fuel_ccp_tests/tests/component/k8s/test_k8s_lvm_plugin_usage.py @@ -0,0 +1,101 @@ +# Copyright 2016 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import pytest + +from fuel_ccp_tests import logger +from fuel_ccp_tests.helpers import ext + +LOG = logger.logger + + +@pytest.mark.component +class TestLVMPluginUsage(object): + """Tests using k8s' LVM plugin. + + Required environment variables to use: + CONF_PATH=./fuel-ccp-tests/templates/default-with-storage.yaml + LVM_PLUGIN_PATH=/path/to/lvm/plugin + + To create basic pod node label 'lvm=on' is required on any k8s node. + """ + + def check_lvm_exists(self, remote, lvm_name): + LOG.info("Check if lvm storage exists") + cmd = "lvs | grep -w {}".format(lvm_name) + with remote.get_sudo(remote): + remote.check_call(command=cmd, verbose=True, + timeout=120) + + @pytest.mark.nginx_with_lvm + @pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed) + def test_create_nginx_with_lvm(self, underlay, k8scluster): + """Test creating pod with LVM plugin + + Scenario: + 1. Create nginx pod with LVM plugin usage on top of k8s. + 2. Ensure that volume is created. + 3. Delete pod. + 4. Ensure volume persists. + """ + lvm_volume_group = "default" + lvm_volume_name = "vol1" + nginx = { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "nginx" + }, + "spec": { + "containers": [ + { + "name": "nginx", + "image": "nginx", + "volumeMounts": [ + { + "mountPath": "/data", + "name": "test" + } + ], + "ports": [ + { + "containerPort": 80 + } + ] + } + ], + "volumes": [ + { + "name": "test", + "flexVolume": { + "driver": "mirantis.com/lvm", + "fsType": "ext4", + "options": { + "volumeID": lvm_volume_name, + "size": "100m", + "pool": "pool", + "volumeGroup": lvm_volume_group, + } + } + } + ], + "nodeSelector": { + "lvm": "on" + } + } + } + pod = k8scluster.check_pod_create(body=nginx) + remote = underlay.remote(host=pod.status.host_ip) + self.check_lvm_exists(remote, lvm_volume_name) + k8scluster.check_pod_delete(pod) + self.check_lvm_exists(remote, lvm_volume_name) diff --git a/fuel_ccp_tests/tests/system/base_test.py b/fuel_ccp_tests/tests/system/base_test.py index fdf8677..1e93dee 100644 --- a/fuel_ccp_tests/tests/system/base_test.py +++ b/fuel_ccp_tests/tests/system/base_test.py @@ -12,10 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import yaml - -from devops.helpers.helpers import wait - from fuel_ccp_tests import logger LOG = logger.logger @@ -57,84 +53,6 @@ class SystemBaseTest(object): for node_name in underlay.node_names(): self.required_images_exists(node_name, underlay, required_images) - def check_pod_create(self, body, k8sclient, timeout=300, interval=5): - """Check creating sample pod - - :param k8s_pod: V1Pod - :param k8sclient: K8sCluster - :rtype: V1Pod - """ - LOG.info("Creating pod in k8s cluster") - LOG.debug( - "POD spec to create:\n{}".format( - yaml.dump(body, default_flow_style=False)) - ) - LOG.debug("Timeout for creation is set to {}".format(timeout)) - LOG.debug("Checking interval is set to {}".format(interval)) - pod = k8sclient.pods.create(body=body) - pod.wait_running(timeout=300, interval=5) - LOG.info("Pod '{}' is created".format(pod.metadata.name)) - return k8sclient.pods.get(name=pod.metadata.name) - - @staticmethod - def wait_pod_deleted(k8sclient, podname, timeout=60, interval=5): - wait( - lambda: podname not in [pod.name for pod in k8sclient.pods.list()], - timeout=timeout, - interval=interval, - timeout_msg="Pod deletion timeout reached!" - ) - - @staticmethod - def check_pod_delete(k8s_pod, k8sclient, timeout=300, interval=5): - """Deleting pod from k8s - - :param k8s_pod: fuel_ccp_tests.managers.k8s.nodes.K8sNode - :param k8sclient: fuel_ccp_tests.managers.k8s.cluster.K8sCluster - """ - LOG.info("Deleting pod '{}'".format(k8s_pod.name)) - LOG.debug("Pod status:\n{}".format(k8s_pod.status)) - LOG.debug("Timeout for deletion is set to {}".format(timeout)) - LOG.debug("Checking interval is set to {}".format(interval)) - k8sclient.pods.delete(body=k8s_pod, name=k8s_pod.name) - SystemBaseTest.wait_pod_deleted(k8sclient, k8s_pod.name, timeout, - interval) - LOG.debug("Pod '{}' is deleted".format(k8s_pod.name)) - - @staticmethod - def check_service_create(body, k8sclient): - """Check creating k8s service - - :param body: dict, service spec - :param k8sclient: K8sCluster object - :rtype: K8sService object - """ - LOG.info("Creating service in k8s cluster") - LOG.debug( - "Service spec to create:\n{}".format( - yaml.dump(body, default_flow_style=False)) - ) - service = k8sclient.services.create(body=body) - LOG.info("Service '{}' is created".format(service.metadata.name)) - return k8sclient.services.get(name=service.metadata.name) - - @staticmethod - def check_ds_create(body, k8sclient): - """Check creating k8s DaemonSet - - :param body: dict, DaemonSet spec - :param k8sclient: K8sCluster object - :rtype: K8sDaemonSet object - """ - LOG.info("Creating DaemonSet in k8s cluster") - LOG.debug( - "DaemonSet spec to create:\n{}".format( - yaml.dump(body, default_flow_style=False)) - ) - ds = k8sclient.daemonsets.create(body=body) - LOG.info("DaemonSet '{}' is created".format(ds.metadata.name)) - return k8sclient.daemonsets.get(name=ds.metadata.name) - def check_number_kube_nodes(self, underlay, k8sclient): """Check number of slaves""" LOG.info("Check number of nodes") diff --git a/fuel_ccp_tests/tests/system/test_ccp_install_k8s.py b/fuel_ccp_tests/tests/system/test_ccp_install_k8s.py index 4806be4..e56ad7b 100644 --- a/fuel_ccp_tests/tests/system/test_ccp_install_k8s.py +++ b/fuel_ccp_tests/tests/system/test_ccp_install_k8s.py @@ -101,9 +101,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest, self.calico_ipip_exists(underlay) self.check_etcd_health(underlay) nginx = self.get_nginx_spec() - pod = self.check_pod_create(body=nginx, k8sclient=k8sclient) + pod = k8s_actions.check_pod_create(body=nginx) self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip) - self.check_pod_delete(pod, k8sclient) + k8s_actions.check_pod_delete(pod) @pytest.mark.k8s_installed_with_etcd_on_host @pytest.mark.snapshot_needed @@ -143,9 +143,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest, self.calico_ipip_exists(underlay) self.check_etcd_health(underlay) nginx = self.get_nginx_spec() - pod = self.check_pod_create(body=nginx, k8sclient=k8sclient) + pod = k8s_actions.check_pod_create(body=nginx) self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip) - self.check_pod_delete(pod, k8sclient) + k8s_actions.check_pod_delete(pod) @pytest.mark.k8s_installed_with_etcd_in_container @pytest.mark.snapshot_needed @@ -185,9 +185,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest, self.calico_ipip_exists(underlay) self.check_etcd_health(underlay) nginx = self.get_nginx_spec() - pod = self.check_pod_create(body=nginx, k8sclient=k8sclient) + pod = k8s_actions.check_pod_create(body=nginx) self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip) - self.check_pod_delete(pod, k8sclient) + k8s_actions.check_pod_delete(pod) @pytest.mark.k8s_installed_with_ready_ssh_keys @pytest.mark.snapshot_needed @@ -221,9 +221,9 @@ class TestFuelCCPInstaller(base_test.SystemBaseTest, self.calico_ipip_exists(underlay) self.check_etcd_health(underlay) nginx = self.get_nginx_spec() - pod = self.check_pod_create(body=nginx, k8sclient=k8sclient) + pod = k8s_actions.check_pod_create(body=nginx) self.check_nginx_pod_is_reached(underlay, pod.status.pod_ip) - self.check_pod_delete(pod, k8sclient) + k8s_actions.check_pod_delete(pod) @pytest.mark.fuel_ccp_installer_idempotency diff --git a/fuel_ccp_tests/tests/system/test_netchecker.py b/fuel_ccp_tests/tests/system/test_netchecker.py index 9a7a0c7..e09125a 100644 --- a/fuel_ccp_tests/tests/system/test_netchecker.py +++ b/fuel_ccp_tests/tests/system/test_netchecker.py @@ -12,20 +12,20 @@ # License for the specific language governing permissions and limitations # under the License. +import os import pytest import random import requests import time -import os import yaml -from devops.helpers.helpers import wait, wait_pass -from k8sclient.client.rest import ApiException +from devops.helpers import helpers +from k8sclient.client import rest -from base_test import SystemBaseTest +import base_test +from fuel_ccp_tests.helpers import ext from fuel_ccp_tests import logger from fuel_ccp_tests import settings -from fuel_ccp_tests.helpers import ext LOG = logger.logger @@ -55,7 +55,8 @@ class TestFuelCCPNetCheckerMixin: @pytest.mark.usefixtures("check_netchecker_files") @pytest.mark.system -class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): +class TestFuelCCPNetChecker(base_test.SystemBaseTest, + TestFuelCCPNetCheckerMixin): """Test class for network connectivity verification in k8s""" @staticmethod @@ -71,7 +72,7 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): @staticmethod def wait_ds_running(k8sclient, dsname, timeout=60, interval=5): - wait( + helpers.wait( lambda: TestFuelCCPNetChecker.get_ds_status(k8sclient, dsname), timeout=timeout, interval=interval) @@ -90,40 +91,38 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): 'docker push {0}/netchecker/{1}:latest'.format(registry, stype), node_name='master') - def start_netchecker_server(self, k8sclient): + def start_netchecker_server(self, k8s): with open(self.pod_yaml_file) as pod_conf: for pod_spec in yaml.load_all(pod_conf): try: - if k8sclient.pods.get(name=pod_spec['metadata']['name']): + if k8s.api.pods.get(name=pod_spec['metadata']['name']): LOG.debug('Network checker server pod {} is ' 'already running! Skipping resource creation' '.'.format(pod_spec['metadata']['name'])) continue - except ApiException as e: + except rest.ApiException as e: if e.status == 404: - self.check_pod_create(body=pod_spec, - k8sclient=k8sclient) + k8s.check_pod_create(body=pod_spec) else: raise e with open(self.svc_yaml_file) as svc_conf: for svc_spec in yaml.load_all(svc_conf): try: - if k8sclient.services.get( + if k8s.api.services.get( name=svc_spec['metadata']['name']): LOG.debug('Network checker server pod {} is ' 'already running! Skipping resource creation' '.'.format(svc_spec['metadata']['name'])) continue - except ApiException as e: + except rest.ApiException as e: if e.status == 404: - self.check_service_create(body=svc_spec, - k8sclient=k8sclient) + k8s.check_service_create(body=svc_spec) else: raise e - def start_netchecker_agent(self, underlay, k8sclient): + def start_netchecker_agent(self, underlay, k8s): # TODO(apanchenko): use python API client here when it will have # TODO(apanchenko): needed functionality (able work with labels) underlay.sudo_check_call( @@ -133,10 +132,9 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): with open(self.ds_yaml_file) as ds_conf: for daemon_set_spec in yaml.load_all(ds_conf): - self.check_ds_create(body=daemon_set_spec, - k8sclient=k8sclient) - self.wait_ds_running( - k8sclient, + k8s.check_ds_create(body=daemon_set_spec) + TestFuelCCPNetChecker.wait_ds_running( + k8s, dsname=daemon_set_spec['metadata']['name']) @staticmethod @@ -147,7 +145,7 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): @staticmethod def wait_netchecker_running(kube_host_ip, timeout=60, interval=5): - wait_pass( + helpers.wait_pass( lambda: TestFuelCCPNetChecker.get_netchecker_status(kube_host_ip), timeout=timeout, interval=interval) @@ -216,6 +214,7 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): # STEP #1 show_step(1) k8sclient = k8scluster.api + assert k8sclient.nodes.list() is not None, "Can not get nodes list" # STEP #2 show_step(2) @@ -257,12 +256,12 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): # STEP #9 show_step(9) - self.start_netchecker_server(k8sclient=k8sclient) - self.wait_netchecker_running(config.k8s.kube_host, timeout=240) + self.start_netchecker_server(k8s=k8scluster) + self.wait_netchecker_running(underlay, timeout=240) # STEP #10 show_step(10) - self.start_netchecker_agent(underlay, k8sclient) + self.start_netchecker_agent(underlay, k8scluster) # STEP #11 # currently agents need some time to start reporting to the server @@ -286,13 +285,13 @@ class TestFuelCCPNetChecker(SystemBaseTest, TestFuelCCPNetCheckerMixin): underlay.sudo_check_call( 'kubectl delete pod/netchecker-server', node_name='master') - self.wait_pod_deleted(k8sclient, 'netchecker-server') + k8scluster.wait_pod_deleted('netchecker-server') self.block_traffic_on_slave(underlay, target_slave) # start netchecker-server - self.start_netchecker_server(k8sclient=k8sclient) - self.wait_netchecker_running(config.k8s.kube_host, timeout=240) + self.start_netchecker_server(k8s=k8scluster) + self.wait_netchecker_running(underlay, timeout=240) # STEP #13 show_step(13)