Add tests to check rabbit cluster

- Add client to work with rabbit cluster: create queue,
message publishing, check cluster state
- Add failover tests for rabbit nodes

Change-Id: Ifde0aac45a43460d9af15442381bf0843bfe072e
This commit is contained in:
asledzinskiy 2016-12-14 13:11:35 +02:00
parent 990ea5e627
commit ca1ad32f20
23 changed files with 610 additions and 59 deletions

View File

@ -12,7 +12,7 @@ fuel_ccp_tests
├── managers
│ └── k8s
├── templates
│ ├── k8s_templates
│ ├── ccp_deploy_topology
│ ├── misc
│ └── registry_templates
└── tests
@ -47,7 +47,7 @@ Contains .yaml templates with environment configuration(virtual machines, networ
Tests Overview
================
The fuel-ccp-test are performed to verify that the completed software (ccp) functions according to the expectations defined by the requirements.
The fuel-ccp-test are performed to verify that the completed software (ccp) functions according to the expectations defined by the requirements.
The tests depended on purpose are divided on several categories.
@ -67,7 +67,7 @@ Consists from 2 categories **precommit** and **system**. The purpose of the sys
Test execution
--------------
To execute tests necessary to add value to several variables via *export* or in the test command. Variables:
To execute tests necessary to add value to several variables via *export* or in the test command. Variables:
- ENV_NAME - prefix name for the env and VMs
- IMAGE_PATH - qcow2 image path
@ -78,4 +78,3 @@ To execute tests necessary to add value to several variables via *export* or in
After exporting execute the command:
py.test -vvv -s -k <test_name> or py.test -vvv -s -m <test_mark>

View File

@ -76,7 +76,7 @@ def ccpcluster(revert_snapshot, config, hardware,
path=settings.CCP_FETCH_CONFIG,
config=settings.CCP_FETCH_PARAMS)
with open(settings.TOPOLOGY_PATH, 'r') as f:
with open(config.ccp_deploy.topology_path, 'r') as f:
ccp_actions.put_raw_config(
path=settings.CCP_DEPLOY_TOPOLOGY,
content=f.read())

View File

@ -15,11 +15,13 @@
from copy import deepcopy
import os
import pytest
from devops.helpers import helpers
from fuel_ccp_tests import logger
from fuel_ccp_tests import settings
from fuel_ccp_tests.helpers import ext
from fuel_ccp_tests.helpers import post_os_deploy_checks
from fuel_ccp_tests.helpers import rabbit
from fuel_ccp_tests.managers.osmanager import OSManager
LOG = logger.logger
@ -37,12 +39,7 @@ def os_deployed(ccpcluster,
"""
osmanager = OSManager(config, underlay, k8s_actions, ccpcluster)
if not config.os.running:
LOG.info("Preparing openstack log collector fixture...")
topology = None
if config.os_deploy.stacklight_enable:
topology = ('/fuel_ccp_tests/templates/k8s_templates/'
'stacklight_topology.yaml')
osmanager.install_os(topology=topology)
osmanager.install_os()
hardware.create_snapshot(ext.SNAPSHOT.os_deployed)
else:
LOG.info("Openstack allready installed and running...")
@ -68,7 +65,7 @@ def galera_deployed(ccpcluster,
k8s_actions.create_registry()
ccpcluster.build()
topology_path = \
os.getcwd() + '/fuel_ccp_tests/templates/k8s_templates/' \
os.getcwd() + '/fuel_ccp_tests/templates/ccp_deploy_topology/' \
'3galera_1comp.yaml'
remote = underlay.remote(host=config.k8s.kube_host)
remote.upload(topology_path, '/tmp')
@ -93,3 +90,17 @@ def galera_deployed(ccpcluster,
config.os.running = True
hardware.create_snapshot(ext.SNAPSHOT.os_galera_deployed)
@pytest.fixture(scope='function')
def rabbit_client(underlay, config, os_deployed):
"""Deploy openstack
"""
host = config.k8s.kube_host
remote = underlay.remote(host=host)
rabbit_port = ''.join(remote.execute(
"kubectl get service --namespace ccp rabbitmq -o yaml |"
" awk '/nodePort: / {print $NF}'")['stdout'])
client = helpers.wait_pass(lambda: rabbit.RabbitClient(host, rabbit_port),
interval=60, timeout=360)
return client

View File

@ -0,0 +1,56 @@
import kombu
from fuel_ccp_tests import logger
from fuel_ccp_tests.helpers import utils
LOG = logger.logger
class RabbitClient(object):
def __init__(self, ip, port, user='rabbitmq', password='password'):
c = kombu.Connection("amqp://{0}:{1}@{2}:{3}//".format(user, password,
ip, port))
c.connect()
self.ch = c.channel()
def list_nodes(self, remote, pod, namespace):
output = ''.join(
remote.execute("kubectl exec -i {} --namespace={}"
" -- rabbitmqctl"
" cluster_status".format(pod,
namespace))['stdout'])
substring_ind = output.find('{running_nodes')
sub_end_ind = output.find('cluster_name')
result_str = output[substring_ind: sub_end_ind]
num_node = result_str.count("rabbit@")
return num_node
def check_queue_replicated(self, queue, remote, pod, namespace):
remote.check_call("kubectl exec -i {} --namespace={}"
" -- rabbitmqctl list_queues |"
" grep {}".format(pod, namespace,
queue))
def create_queue(self):
test_queue = 'test-rabbit-{}'.format(utils.rand_name())
q = kombu.Queue(test_queue, channel=self.ch, durable=False,
queue_arguments={"x-expires": 15 * 60 * 1000})
q.declare()
return test_queue
def publish_message_to_queue(self, queue):
uid = utils.generate_uuid()
producer = kombu.Producer(channel=self.ch, routing_key=queue)
producer.publish(uid)
return {'queue': queue, 'id': uid}
def check_queue_message(self, message):
q = kombu.Queue(message['queue'], channel=self.ch)
msg = q.get(True)
assert msg.body in message['id'],\
"Message body is {}, expected {}".format(msg.body, message['id'])
def delete_queue(self, queue):
q = kombu.Queue(queue, channel=self.ch)
q.delete()

View File

@ -14,10 +14,12 @@
import copy
import os
import random
import shutil
import tempfile
import time
import traceback
import uuid
import paramiko
import yaml
@ -483,3 +485,11 @@ def get_top_fixtures_marks(request, mark_name):
.format(top_fixtures_marks))
return top_fixtures_marks
def rand_name():
return str(random.randint(1, 0x7fffffff))
def generate_uuid():
return uuid.uuid4().hex

View File

@ -344,7 +344,11 @@ class K8SManager(object):
LOG.info('Added custom upstream DNS servers (dnsmasq) to the '
'settings: {0}'.format(k8s_settings['nameservers']))
def get_pods_number(self, pod_name, namespace=None):
def get_running_pods(self, pod_name, namespace=None):
pods = [pod for pod in self.api.pods.list(namespace=namespace)
if pod_name in pod.name]
if pod_name in pod.name and pod.status.phase == 'Running']
return pods
def get_pods_number(self, pod_name, namespace=None):
pods = self.get_running_pods(pod_name, namespace)
return len(pods)

View File

@ -11,7 +11,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from fuel_ccp_tests import logger
from fuel_ccp_tests import settings
@ -32,8 +31,7 @@ class OSManager(object):
self.__k8s_actions = k8s_actions
self.__ccpcluster = ccpcluster
def install_os(self, topology=None,
check_os_ready=True):
def install_os(self, check_os_ready=True):
"""Action to deploy openstack by ccp tool
Additional steps:
@ -47,7 +45,7 @@ class OSManager(object):
LOG.info("Trying to install k8s")
"""
Deploy openstack with stacklight topology
Deploy openstack with provided topology
"""
LOG.info("Preparing openstack log collector fixture...")
if settings.REGISTRY == "127.0.0.1:31500":
@ -55,16 +53,6 @@ class OSManager(object):
self.__k8s_actions.create_registry()
LOG.info("Building images...")
self.__ccpcluster.build()
if topology:
LOG.info("Pushing topology yaml...")
LOG.warn(
"Patched topology used, workaround until kube 1.4 released")
topology_path = \
os.getcwd() + topology
self.__underlay.remote(
host=self.__config.k8s.kube_host).upload(
topology_path,
settings.DEPLOY_CONFIG)
LOG.info("Deploy openstack")
self.__ccpcluster.deploy()
if check_os_ready:
@ -77,8 +65,8 @@ class OSManager(object):
if check_jobs_ready:
LOG.info("Checking openstack jobs statuses...")
post_os_deploy_checks.check_jobs_status(self.__k8s_actions.api,
timeout=3600)
timeout=4500)
if check_pods_ready:
LOG.info("Checking openstack pods statuses...")
post_os_deploy_checks.check_pods_status(self.__k8s_actions.api,
timeout=3600)
timeout=4500)

View File

@ -11,3 +11,4 @@ psycopg2
python-k8sclient==0.3.0
junit-xml
elasticsearch>=2.0.0,<=3.0.0 # Apache-2.0
kombu>=3.0.25 # BSD

View File

@ -123,8 +123,8 @@ CCP_DEPLOY_CONFIG = '~/.ccp.deploy-config.yaml'
CCP_DEPLOY_TOPOLOGY = '~/.ccp.deploy-topology.yaml'
TOPOLOGY_PATH = os.environ.get('TOPOLOGY_PATH',
os.getcwd() + '/fuel_ccp_tests/templates/'
'k8s_templates/k8s_topology.yaml')
'ccp_deploy_topology/'
'default_deploy_topology.yaml')
FUEL_CCP_KEYSTONE_LOCAL_REPO = os.environ.get('FUEL_CCP_KEYSTONE_LOCAL_REPO',
None)

View File

@ -26,6 +26,9 @@ from fuel_ccp_tests import settings
_default_conf = pkg_resources.resource_filename(
__name__, 'templates/default.yaml')
_default_topology = pkg_resources.resource_filename(
__name__, 'templates/ccp_deploy_topology/default_deploy_topology.yaml')
hardware_opts = [
ct.Cfg('manager', ct.String(),
@ -103,7 +106,9 @@ ccp_deploy_opts = [
ct.Cfg('ccp_globals', ct.JSONDict(),
help="", default=None),
ct.Cfg('ccp_params', ct.JSONDict(),
help="", default=None)
help="", default=None),
ct.Cfg('topology_path', ct.String(),
help="", default=_default_topology),
]
# Access credentials to a ready CCP

View File

@ -7,6 +7,9 @@ nodes:
roles:
- compute
- openvswitch
node[1-3]:
roles:
- rabbitmq
roles:
controller:
- etcd
@ -27,7 +30,6 @@ roles:
- nova-consoleauth
- nova-novncproxy
- nova-scheduler
- rabbitmq
compute:
- nova-compute
- nova-libvirt
@ -35,3 +37,5 @@ roles:
- neutron-openvswitch-agent
- openvswitch-db
- openvswitch-vswitchd
rabbitmq:
- rabbitmq

View File

@ -1,17 +0,0 @@
ccp-microservices-options:
- images-base-distro: debian
- images-maintainer: mos-microservices@mirantis.com
- repositories-protocol: https
- repositories-port: 443
- builder-push
- registry-address: {registry_address}
- logfile: /var/log/microservices.log
- verbose
- debug
- builder-workers: 1
- registry-insecure
- images-tag: {images_tag}
- deploy-config: {deploy_config}
- images-namespace: {images_namespace}
dry_run_options:
export_dir: {export_dir}

View File

@ -51,7 +51,8 @@ class TestServiceGlance(object):
ccpcluster.build()
topology_path = os.getcwd() + '/fuel_ccp_tests/templates/' \
'k8s_templates/k8s_topology.yaml'
'ccp_deploy_topology/' \
'default_deploy_topology.yaml'
remote.upload(topology_path, './')
with remote.get_sudo(remote):
ccpcluster.deploy()

View File

@ -63,7 +63,8 @@ class TestServiceHorizon(object):
ccpcluster.build()
topology_path = os.getcwd() + '/fuel_ccp_tests/templates/' \
'k8s_templates/k8s_topology.yaml'
'ccp_deploy_topology/' \
'default_deploy_topology.yaml'
remote.upload(topology_path, settings.CCP_CLI_PARAMS['deploy-config'])
ccpcluster.deploy()
post_os_deploy_checks.check_jobs_status(k8sclient, timeout=1500,

View File

@ -64,7 +64,8 @@ class TestPreStackLight(object):
ccpcluster.build(suppress_output=False)
topology_path = os.getcwd() + '/fuel_ccp_tests/templates/' \
'k8s_templates/stacklight_topology.yaml'
'ccp_deploy_topology/' \
'stacklight_topology.yaml'
remote.upload(topology_path, settings.CCP_CLI_PARAMS['deploy-config'])
ccpcluster.deploy()

View File

@ -49,7 +49,7 @@ class TestDeployTwoOS(base_test.SystemBaseTest):
k8s_actions.create_registry()
ccpcluster.build()
topology_path = \
os.getcwd() + '/fuel_ccp_tests/templates/k8s_templates/' \
os.getcwd() + '/fuel_ccp_tests/templates/ccp_deploy_topology/' \
'1ctrl_1comp.yaml'
remote = underlay.remote(host=config.k8s.kube_host)
remote.upload(topology_path, '/tmp')
@ -71,7 +71,7 @@ class TestDeployTwoOS(base_test.SystemBaseTest):
timeout=600)
topology_path = \
os.getcwd() + '/fuel_ccp_tests/templates/k8s_templates/' \
os.getcwd() + '/fuel_ccp_tests/templates/ccp_deploy_topology/' \
'1ctrl_1comp_diff.yaml'
remote.upload(topology_path, '/tmp')
conf = copy.deepcopy(settings.CCP_CONF)
@ -117,7 +117,7 @@ class TestDeployTwoOS(base_test.SystemBaseTest):
k8s_actions.create_registry()
ccpcluster.build()
topology_path = \
os.getcwd() + '/fuel_ccp_tests/templates/k8s_templates/' \
os.getcwd() + '/fuel_ccp_tests/templates/ccp_deploy_topology/' \
'1ctrl_1comp.yaml'
remote = underlay.remote(host=config.k8s.kube_host)
remote.upload(topology_path, '/tmp')
@ -139,7 +139,7 @@ class TestDeployTwoOS(base_test.SystemBaseTest):
timeout=600)
topology_path = \
os.getcwd() + '/fuel_ccp_tests/templates/k8s_templates/' \
os.getcwd() + '/fuel_ccp_tests/templates/ccp_deploy_topology/' \
'1ctrl_1comp_same.yaml'
remote.upload(topology_path, '/tmp')
conf = copy.deepcopy(settings.CCP_CONF)
@ -186,7 +186,7 @@ class TestDeployTwoOS(base_test.SystemBaseTest):
k8s_actions.create_registry()
ccpcluster.build()
topology_path = \
os.getcwd() + '/fuel_ccp_tests/templates/k8s_templates/' \
os.getcwd() + '/fuel_ccp_tests/templates/ccp_deploy_topology/' \
'1ctrl.yaml'
remote = underlay.remote(host=config.k8s.kube_host)
remote.upload(topology_path, '/tmp')

View File

@ -0,0 +1,487 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
import time
from devops.helpers import helpers
import base_test
from fuel_ccp_tests import logger
from fuel_ccp_tests import settings
from fuel_ccp_tests.helpers import post_os_deploy_checks
from fuel_ccp_tests.helpers import rabbit
LOG = logger.logger
class TestRabbitmq(base_test.SystemBaseTest):
""" Galera scale and destructive scenarios
"""
def get_pods(self, k8s):
return \
k8s.get_running_pods('rabbit',
settings.CCP_CONF['kubernetes']['namespace'])
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_deploy
@pytest.mark.rabbitmq
def test_rabbitmq(self, underlay, config,
k8s_actions, show_step,
os_deployed, rabbit_client):
"""Deploy rabbit cluster
Scenario:
1. Revert snapshot with deployed rabbit
2. Check rabbit cluster state
3. Check queue, messages replication
Duration 30 min
"""
show_step(2)
show_step(3)
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
remote = underlay.remote(host=config.k8s.kube_host)
rabbit_pods = self.get_pods(k8s_actions)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
for pod in rabbit_pods:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(rabbit_pods),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(rabbit_pods), rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_shutdown
@pytest.mark.rabbitmq
def test_rabbitmq_shutdown_node(self, hardware, underlay, config,
ccpcluster, k8s_actions, show_step,
os_deployed, rabbit_client):
"""Shutdown rabbitmq node
Scenario:
1. Revert snapshot with deployed rabbit
2. Shutdown one rabbit node
3. Check rabbit cluster state
4. Check queue, messages replication
5. Create 2 vms
Duration 30 min
"""
rabbit_node = underlay.node_names()[1]
rabbit_node_ip = underlay.host_by_node_name(rabbit_node)
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
rabbit_pods = self.get_pods(k8s_actions)
show_step(2)
hardware.shutdown_node_by_ip(rabbit_node_ip)
show_step(3)
helpers.wait(lambda: (len(self.get_pods(k8s_actions)) ==
len(rabbit_pods) - 1),
timeout=600,
timeout_msg='Timeout waiting for rabbit pod'
' to be terminated')
pods_after_shutdown = self.get_pods(k8s_actions)
remote = underlay.remote(host=config.k8s.kube_host)
show_step(4)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
for pod in pods_after_shutdown:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(pods_after_shutdown),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(pods_after_shutdown),
rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
show_step(5)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_cold_restart
@pytest.mark.rabbitmq
def test_rabbitmq_cold_restart_node(self, hardware, underlay, config,
ccpcluster, k8s_actions, show_step,
os_deployed, rabbit_client):
"""Cold restart rabbitmq node
Scenario:
1. Revert snapshot with deployed rabbit
2. Cold restart one rabbit node
3. Check rabbit cluster state
4. Check queue, messages replication
5. Create 2 vms
Duration 30 min
"""
rabbit_node = underlay.node_names()[1]
rabbit_node_ip = underlay.host_by_node_name(rabbit_node)
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
rabbit_pods = self.get_pods(k8s_actions)
show_step(2)
hardware.shutdown_node_by_ip(rabbit_node_ip)
hardware.wait_node_is_offline(rabbit_node_ip, 90)
time.sleep(15)
hardware.start_node_by_ip(rabbit_node_ip)
hardware.wait_node_is_online(rabbit_node_ip, 180)
show_step(3)
helpers.wait(lambda: (len(self.get_pods(k8s_actions)) ==
len(rabbit_pods) - 1),
timeout=1200,
timeout_msg='Expected to have one pod destroyed'
' after reboot')
helpers.wait(lambda: (len(self.get_pods(k8s_actions)) ==
len(rabbit_pods)),
timeout=1200,
timeout_msg='Expected pod to come back after reboot')
pods_after_reboot = self.get_pods(k8s_actions)
remote = underlay.remote(host=config.k8s.kube_host)
show_step(4)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
for pod in pods_after_reboot:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(pods_after_reboot),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(pods_after_reboot), rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
show_step(5)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_poweroff
@pytest.mark.rabbitmq
def test_rabbitmq_poweroff_node(self, hardware, underlay, config,
ccpcluster, k8s_actions, show_step,
os_deployed, rabbit_client):
"""Poweroff rabbit node
Scenario:
1. Revert snapshot with deployed rabbit
2. Poweroff one rabbit node
3. Check rabbit cluster state
4. Check queue, messages replication
5. Create 2 vms
Duration 30 min
"""
rabbit_node = underlay.node_names()[1]
rabbit_node_ip = underlay.host_by_node_name(rabbit_node)
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
rabbit_pods = self.get_pods(k8s_actions)
show_step(2)
underlay.sudo_check_call('shutdown +1', node_name=rabbit_node)
hardware.shutdown_node_by_ip(rabbit_node_ip)
hardware.wait_node_is_offline(rabbit_node_ip, 90)
show_step(3)
helpers.wait(lambda: (len(self.get_pods(k8s_actions)) ==
len(rabbit_pods) - 1),
timeout=600,
timeout_msg='Timeout waiting for rabbit pod'
' to be terminated')
pods_after_reboot = self.get_pods(k8s_actions)
remote = underlay.remote(host=config.k8s.kube_host)
show_step(4)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
for pod in pods_after_reboot:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(pods_after_reboot),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(pods_after_reboot), rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
show_step(5)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_soft_reboot
@pytest.mark.rabbitmq
def test_rabbitmq_soft_reboot_node(self, hardware, underlay, config,
ccpcluster, k8s_actions, show_step,
os_deployed, rabbit_client):
"""Soft reboot rabbitmq node
Scenario:
1. Revert snapshot with deployed rabbit
2. Reboot one rabbit node
3. Check rabbit cluster state
4. Check queue, messages replication
5. Create 2 vms
Duration 30 min
"""
rabbit_node = underlay.node_names()[1]
rabbit_node_ip = underlay.host_by_node_name(rabbit_node)
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
rabbit_pods = self.get_pods(k8s_actions)
show_step(2)
underlay.sudo_check_call('shutdown +1', node_name=rabbit_node)
hardware.shutdown_node_by_ip(rabbit_node_ip)
hardware.wait_node_is_offline(rabbit_node_ip, 90)
time.sleep(15)
hardware.start_node_by_ip(rabbit_node_ip)
hardware.wait_node_is_online(rabbit_node_ip, 180)
show_step(3)
helpers.wait(lambda: (len(self.get_pods(k8s_actions)) ==
len(rabbit_pods)),
timeout=600,
timeout_msg='Timeout waiting for rabbit pod'
' to be terminated')
pods_after_reboot = self.get_pods(k8s_actions)
remote = underlay.remote(host=config.k8s.kube_host)
show_step(4)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
for pod in pods_after_reboot:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(pods_after_reboot),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(pods_after_reboot), rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
show_step(5)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_cluster_shutdown
@pytest.mark.rabbitmq
def test_rabbitmq_cluster_shutdown(self, hardware, underlay, config,
ccpcluster, k8s_actions, show_step,
os_deployed, rabbit_client):
"""Rabbitmq cluster shutdown
Scenario:
1. Revert snapshot with deployed rabbit
2. Shutdown all rabbit nodes and start them one by one
3. Check rabbit cluster state
4. Check queue, messages replication
5. Create 2 vms
Duration 30 min
"""
rabbit_nodes = underlay.node_names()[:3]
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
rabbit_pods = self.get_pods(k8s_actions)
rabbit_node_ips = []
show_step(2)
for rabbit_node in rabbit_nodes:
rabbit_node_ip = underlay.host_by_node_name(rabbit_node)
rabbit_node_ips.append(rabbit_node_ip)
hardware.shutdown_node_by_ip(rabbit_node_ip)
hardware.wait_node_is_offline(rabbit_node_ip, 90)
for rabbit_ip in rabbit_node_ips:
hardware.start_node_by_ip(rabbit_ip)
hardware.wait_node_is_online(rabbit_ip, 180)
show_step(3)
post_os_deploy_checks.check_jobs_status(k8s_actions.api, timeout=2000)
post_os_deploy_checks.check_pods_status(k8s_actions.api)
pods_after_shutdown = self.get_pods(k8s_actions)
assert len(rabbit_pods) == len(pods_after_shutdown),\
"Different number of pods after shutdown, was {}," \
" now {}".format(len(rabbit_pods), len(pods_after_shutdown))
remote = underlay.remote(host=config.k8s.kube_host)
show_step(4)
host = config.k8s.kube_host
rabbit_port = ''.join(remote.execute(
"kubectl get service --namespace ccp rabbitmq -o yaml |"
" awk '/nodePort: / {print $NF}'")['stdout'])
rabbit_client = rabbit.RabbitClient(host, rabbit_port)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
for pod in pods_after_shutdown:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(pods_after_shutdown),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(pods_after_shutdown),
rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_scale_up_down
@pytest.mark.rabbitmq
def test_rabbitmq_scale(self, hardware, underlay, config,
ccpcluster, k8s_actions, show_step,
os_deployed, rabbit_client):
"""Rabbit cluster scale
Scenario:
1. Revert snapshot with deployed rabbit
2. Scale up rabbit to 5 replicas
3. Check rabbit state
4. Check number of rabbit pods
5. Create 2 vms
6. Scale down rabbit to 3 replicas
7. Check rabbit state
8. Check number of rabbit pods
9. Create 2 vms
Duration 30 min
"""
show_step(2)
with underlay.yaml_editor(settings.CCP_DEPLOY_TOPOLOGY,
host=config.k8s.kube_host) as editor:
del editor.content['nodes']['node[1-3]']
editor.content['nodes']['node[1-5]'] = {'roles': ['rabbitmq']}
ccpcluster.deploy()
post_os_deploy_checks.check_jobs_status(k8s_actions.api, timeout=2000)
post_os_deploy_checks.check_pods_status(k8s_actions.api)
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
remote = underlay.remote(host=config.k8s.kube_host)
rabbit_pods = self.get_pods(k8s_actions)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
for pod in rabbit_pods:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(rabbit_pods),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(rabbit_pods), rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
show_step(4)
rabbit_pods = \
k8s_actions.get_pods_number('rabbit', namespace)
assert rabbit_pods == 5,\
"Expcted to have 5 rabbit pods, got {}".format(rabbit_pods)
show_step(5)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)
show_step(6)
with underlay.yaml_editor(settings.CCP_DEPLOY_TOPOLOGY,
host=config.k8s.kube_host) as editor:
del editor.content['nodes']['node[1-5]']
editor.content['nodes']['node[1-3]'] = {'roles': ['rabbitmq']}
ccpcluster.deploy()
post_os_deploy_checks.check_jobs_status(k8s_actions.api, timeout=2000)
post_os_deploy_checks.check_pods_status(k8s_actions.api)
show_step(7)
show_step(8)
rabbit_pods = \
k8s_actions.get_pods_number('rabbit', namespace)
assert rabbit_pods == 3,\
"Expcted to have 3 rabbit pods, got {}".format(rabbit_pods)
show_step(9)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)
@pytest.mark.fail_snapshot
@pytest.mark.rabbitmq_node_replacement
@pytest.mark.rabbitmq
def test_rabbitmq_node_replacement(self, hardware, underlay, config,
ccpcluster, k8s_actions, show_step,
os_deployed, rabbit_client):
"""Rabbitmq node replacement
Scenario:
1. Revert snapshot with deployed rabbit
2. Shutdown one rabbit node
3. Add new rabbit node to config
4. Re-deploy cluster
5. Check rabbit cluster state
6. Check queue, messages replication
7. Create 2 vms
Duration 30 min
"""
rabbit_node = underlay.node_names()[1]
rabbit_node_ip = underlay.host_by_node_name(rabbit_node)
namespace = settings.CCP_CONF["kubernetes"]["namespace"]
rabbit_pods = self.get_pods(k8s_actions)
show_step(2)
hardware.shutdown_node_by_ip(rabbit_node_ip)
helpers.wait(lambda: (len(self.get_pods(k8s_actions)) ==
len(rabbit_pods) - 1),
timeout=600,
timeout_msg='Timeout waiting for rabbit pod'
' to be terminated')
show_step(3)
with underlay.yaml_editor(settings.CCP_DEPLOY_TOPOLOGY,
host=config.k8s.kube_host) as editor:
del editor.content['nodes']['node[1-3]']
editor.content['nodes']['node[1-2]'] = {'roles': ['rabbitmq']}
editor.content['nodes']['node4'] = {'roles': ['rabbitmq', 'etcd']}
show_step(4)
ccpcluster.deploy()
post_os_deploy_checks.check_jobs_status(k8s_actions.api, timeout=2000)
post_os_deploy_checks.check_pods_status(k8s_actions.api)
pods_after_replacement = self.get_pods(k8s_actions)
remote = underlay.remote(host=config.k8s.kube_host)
show_step(6)
queue = rabbit_client.create_queue()
message = rabbit_client.publish_message_to_queue(queue)
rabbit_client.check_queue_message(message)
show_step(5)
for pod in pods_after_replacement:
rabbit_cluster_nodes = rabbit_client.list_nodes(
remote, pod.name, namespace)
assert rabbit_cluster_nodes == len(pods_after_replacement),\
"Expected to have {} nodes in cluster," \
" got {}".format(len(pods_after_replacement),
rabbit_cluster_nodes)
rabbit_client.check_queue_replicated(queue, remote,
pod.name, namespace)
rabbit_client.delete_queue(queue)
show_step(7)
remote.check_call(
"source openrc-{}; bash fuel-ccp/tools/deploy-test-vms.sh -a"
" create".format(namespace), timeout=600)