Add failover test suite

* 3 tests from failover suite

Change-Id: I4b9833e22d9df831f356804b2623af8dfd109c81
This commit is contained in:
Vasily Gorin 2016-10-03 17:20:17 +03:00 committed by ibumarskov
parent 042d03e583
commit 9757d7a0b0
4 changed files with 251 additions and 23 deletions

View File

@ -65,14 +65,14 @@ core
Steps
#####
1. Log in to the Fuel with preinstalled plugin and deployed enviroment with 3 controllers and 1 compute.
1. Log in to the Fuel with preinstalled plugin and deployed ha enviroment with 3 controllers, 1 compute and 1 compute-vmware nodes.
2. Log in to Horizon.
3. Create vcenter VM and check connectivity to outside world from VM.
3. Launch two instances in different az (nova and vcenter) and check connectivity to outside world from VMs.
4. Shutdown primary controller.
5. Ensure that VIPs are moved to other controller.
6. Ensure taht there is a connectivity to outside world from created VM.
6. Ensure that there is a connectivity to outside world from created VMs.
7. Create a new network and attach it to default router.
8. Create a vcenter VM with new network and check network connectivity via ICMP.
8. Launch two instances in different az (nova and vcenter) with new network and check network connectivity via ICMP.
Expected result
@ -81,20 +81,20 @@ Expected result
Networking works correct after failure of primary controller.
Check cluster functionality after reboot vcenter.
-------------------------------------------------
Check cluster functionality after interrupt connection with NSX manager.
------------------------------------------------------------------------
ID
##
nsxt_reboot_vcenter
nsxt_interrupt_connection
Description
###########
Test verifies that system functionality is ok when vcenter has been rebooted.
Test verifies that cluster will functional after interrupt connection with NSX manager.
Complexity
@ -107,23 +107,17 @@ Steps
#####
1. Log in to the Fuel with preinstalled plugin and deployed enviroment.
2. Log in to Horizon.
3. Launch vcenter instance VM_1 with image TestVM-VMDK and flavor m1.tiny.
4. Launch vcenter instance VM_2 with image TestVM-VMDK and flavor m1.tiny.
5. Check connection between VMs, send ping from VM_1 to VM_2 and vice verse.
6. Reboot vcenter::
vmrun -T ws-shared -h https://localhost:443/sdk -u vmware -p pass
reset "[standard] vcenter/vcenter.vmx"
7. Check that controller lost connection with vCenter.
8. Wait for vCenter is online.
9. Ensure that all instances from vCenter are displayed in dashboard.
10. Ensure there is connectivity between vcenter1's and vcenter2's VMs.
11. Run OSTF.
2. Launch instances in each az with default network.
3. Disrupt connection with NSX manager and check that controller lost connection with NSX.
4. Try to create new network.
5. Restore connection with NSX manager.
6. Try to create new network again.
7. Launch instance in created network.
8. Ensure that all instances have connectivity to external network.
9. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed. Ping should get response.
After restore connection with NSX manager cluster should be fully functional. All created VMs should be operable. All OSTF test cases should be passed.

View File

@ -45,6 +45,7 @@ def import_tests():
from tests import test_plugin_nsxt # noqa
from tests import test_plugin_integration # noqa
from tests import test_plugin_scale # noqa
from tests import test_plugin_failover # noqa
def run_tests():

View File

@ -15,14 +15,17 @@ under the License.
import os
from devops.helpers.ssh_client import SSHAuth
from proboscis.asserts import assert_true
from fuelweb_test import logger
from fuelweb_test.helpers import utils
from fuelweb_test.helpers.utils import pretty_log
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.settings import SSH_IMAGE_CREDENTIALS
from helpers import settings
cirros_auth = SSHAuth(**SSH_IMAGE_CREDENTIALS)
class TestNSXtBase(TestBasic):
"""Base class for NSX-T plugin tests"""
@ -30,6 +33,8 @@ class TestNSXtBase(TestBasic):
def __init__(self):
super(TestNSXtBase, self).__init__()
self.default = settings
self.vcenter_az = 'vcenter'
self.vmware_image = 'TestVM-VMDK'
def install_nsxt_plugin(self):
"""Download and install NSX-T plugin on master node.
@ -90,3 +95,38 @@ class TestNSXtBase(TestBasic):
expected=[1 if failover else 0],
raise_on_err=not failover
)
def _get_controller_with_vip(self):
"""Return name of controller with VIPs."""
for node in self.env.d_env.nodes().slaves:
ng_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
if ng_node['online'] and 'controller' in ng_node['roles']:
hosts_vip = self.fuel_web.get_pacemaker_resource_location(
ng_node['devops_name'], 'vip__management')
logger.info('Now primary controller is '
'{}'.format(hosts_vip[0].name))
return hosts_vip[0].name
return True
def ping_from_instance(self, src_floating_ip, dst_ip, primary,
size=56, count=1):
"""Verify ping between instances.
:param src_floating_ip: floating ip address of instance
:param dst_ip: destination ip address
:param primary: name of the primary controller
:param size: number of data bytes to be sent
:param count: number of packets to be sent
"""
with self.fuel_web.get_ssh_for_node(primary) as ssh:
command = "ping -s {0} -c {1} {2}".format(size, count,
dst_ip)
ping = ssh.execute_through_host(
hostname=src_floating_ip,
cmd=command,
auth=cirros_auth
)
logger.info("Ping result is {}".format(ping['stdout_str']))
return 0 == ping['exit_code']

View File

@ -0,0 +1,193 @@
"""Copyright 2016 Mirantis, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from proboscis import test
from proboscis.asserts import assert_true
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from fuelweb_test.helpers.os_actions import OpenStackActions
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import SERVTEST_PASSWORD
from fuelweb_test.settings import SERVTEST_TENANT
from fuelweb_test.settings import SERVTEST_USERNAME
from fuelweb_test.tests.base_test_case import SetupEnvironment
from system_test import logger
from tests.base_plugin_test import TestNSXtBase
from tests.test_plugin_nsxt import TestNSXtBVT
@test(groups=['nsxt_plugin', 'nsxt_failover'])
class TestNSXtFailover(TestNSXtBase):
"""NSX-t failover automated tests"""
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=['nsxt_uninstall_negative'])
@log_snapshot_after_test
def nsxt_uninstall_negative(self):
"""Check plugin can not be removed while it is enabled for environment.
Scenario:
1. Install NSX-T plugin on Fuel Master node with 5 slaves.
2. Create new environment with enabled NSX-T plugin.
3. Try to delete plugin via cli from master node.
Duration: 10 min
"""
# Install NSX-T plugin on Fuel Master node with 5 slaves
self.show_step(1)
self.env.revert_snapshot('ready_with_5_slaves')
self.install_nsxt_plugin()
# Create new environment with enabled NSX-T plugin
self.show_step(2)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=self.default.cluster_settings,
configure_ssl=False)
self.enable_plugin(cluster_id)
# Try to delete plugin via cli from master node
self.show_step(3)
self.delete_nsxt_plugin(failover=True)
@test(depends_on=[TestNSXtBVT.nsxt_bvt],
groups=['nsxt_shutdown_controller'])
@log_snapshot_after_test
def nsxt_shutdown_controller(self):
"""Check plugin functionality after shutdown primary controller.
Scenario:
1. Get access to OpenStack.
2. Create VMs and check connectivity to outside world
from VM.
3. Shutdown primary controller.
4. Ensure that VIPs are moved to another controller.
5. Ensure that there is a connectivity to outside world from
created VM.
6. Create new network and attach it to default router.
7. Create VMs with new network and check network
connectivity via ICMP.
Duration: 180 min
"""
# Get access to OpenStack
self.show_step(1)
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
# Create vcenter VM and check connectivity to outside world from VM
self.show_step(2)
image = os_conn.get_image(self.vmware_image)
net = os_conn.get_network(self.default.PRIVATE_NET)
sec_group = os_conn.create_sec_group_for_ssh()
vms=[]
vms.append(os_conn.create_server(
net_id=net['id'],
security_groups=[sec_group]))
vms.append(os_conn.create_server(
availability_zone=self.vcenter_az,
image=image,
net_id=net['id'],
security_groups=[sec_group]))
ips = []
for vm in vms:
floating = os_conn.assign_floating_ip(vm)
wait(lambda: tcp_ping(floating.ip, 22),
timeout=180,
timeout_msg="Node {ip} is not accessible by SSH.".format(
ip=floating.ip))
ips.append(floating.ip)
vip_contr = self._get_controller_with_vip()
for ip in ips:
logger.info('Check connectivity from {0}'.format(ip))
assert_true(self.ping_from_instance(ip,
'8.8.8.8',
vip_contr),
'Ping failed')
# Shutdown primary controller
self.show_step(3)
primary_ctrl_devops = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
self.fuel_web.warm_shutdown_nodes([primary_ctrl_devops])
# Ensure that VIPs are moved to another controller
self.show_step(4)
vip_contr_new = self._get_controller_with_vip()
assert_true(vip_contr_new and vip_contr_new != vip_contr,
'VIPs have not been moved to another controller')
logger.info('VIPs have been moved to another controller')
# Ensure that there is a connectivity to outside world from created VM
self.show_step(5)
for ip in ips:
logger.info('Check connectivity from {0}'.format(ip))
assert_true(self.ping_from_instance(ip,
'8.8.8.8',
vip_contr_new),
'Ping failed')
# Create new network and attach it to default router
self.show_step(6)
net_1 = os_conn.create_network(network_name='net_1')['network']
subnet_1 = os_conn.create_subnet(
subnet_name='subnet_1',
network_id=net_1['id'],
cidr='192.168.77.0/24')
default_router = os_conn.get_router(os_conn.get_network(
self.default.ADMIN_NET))
os_conn.add_router_interface(router_id=default_router['id'],
subnet_id=subnet_1['id'])
# Create vCenter VM with new network and check ICMP connectivity
self.show_step(7)
vms=[]
vms.append(os_conn.create_server(
net_id=net_1['id'],
security_groups=[sec_group]))
vms.append(os_conn.create_server(
availability_zone=self.vcenter_az,
image=image,
net_id=net_1['id'],
security_groups=[sec_group]))
ips = []
for vm in vms:
floating = os_conn.assign_floating_ip(vm)
wait(lambda: tcp_ping(floating.ip, 22),
timeout=180,
timeout_msg="Node {ip} is not accessible by SSH.".format(
ip=floating.ip))
ips.append(floating.ip)
for ip in ips:
logger.info('Check connectivity from {0}'.format(ip))
assert_true(self.ping_from_instance(ip,
'8.8.8.8',
vip_contr_new),
'Ping failed')