Tests for NSXv plugin.

Plugin tests has been moved to plugin repositories. We need to
adapt tests accordingly.

Closes-bug: #1508139 partially
Change-Id: Ib36836ca75da7f7ba47dada6f5cbc619350957b5
This commit is contained in:
Andrey Setyaev 2015-11-18 08:22:59 +00:00
parent f1b2355fc4
commit 0b419ef938
7 changed files with 1541 additions and 0 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "plugin_test/fuel-qa"]
path = plugin_test/fuel-qa
url = https://github.com/openstack/fuel-qa

13
plugin_test/__init__.py Normal file
View File

@ -0,0 +1,13 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

1
plugin_test/fuel-qa Submodule

@ -0,0 +1 @@
Subproject commit 77d0b5d8e1c69ec244bcbba9578c73b9834e8523

66
plugin_test/run_tests.py Normal file
View File

@ -0,0 +1,66 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import os
import re
from nose.plugins import Plugin
from paramiko.transport import _join_lingering_threads
class CloseSSHConnectionsPlugin(Plugin):
"""Closes all paramiko's ssh connections after each test case
Plugin fixes proboscis disability to run cleanup of any kind.
'afterTest' calls _join_lingering_threads function from paramiko,
which stops all threads (set the state to inactive and joins for 10s)
"""
name = 'closesshconnections'
def options(self, parser, env=os.environ):
super(CloseSSHConnectionsPlugin, self).options(parser, env=env)
def configure(self, options, conf):
super(CloseSSHConnectionsPlugin, self).configure(options, conf)
self.enabled = True
def afterTest(self, *args, **kwargs):
_join_lingering_threads()
def import_tests():
from tests import test_plugin_nsxv
def run_tests():
from proboscis import TestProgram # noqa
import_tests()
# Run Proboscis and exit.
TestProgram(
addplugins=[CloseSSHConnectionsPlugin()]
).run_and_exit()
if __name__ == '__main__':
sys.path.append(sys.path[0]+"/fuel-qa")
import_tests()
from fuelweb_test.helpers.patching import map_test
if any(re.search(r'--group=patching_master_tests', arg)
for arg in sys.argv):
map_test('master')
elif any(re.search(r'--group=patching.*', arg) for arg in sys.argv):
map_test('environment')
run_tests()

View File

View File

@ -0,0 +1,971 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import os.path
from proboscis import test
from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from devops.error import TimeoutError
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.common import Common
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.settings import SERVTEST_USERNAME
from fuelweb_test.settings import SERVTEST_PASSWORD
from fuelweb_test.settings import SERVTEST_TENANT
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.helpers import os_actions
@test(groups=["plugins", "nsxv_plugin"])
class TestNSXvPlugin(TestBasic):
"""NSXvPlugin""" # TODO documentation
_common = None
plugin_name = 'nsxv'
NSXV_PLUGIN_PATH = os.environ.get('NSXV_PLUGIN_PATH')
nsxv_manager_ip = os.environ.get('NSXV_MANAGER_IP')
nsxv_insecure = True if os.environ.get('NSXV_INSECURE') == 'true' \
else False
nsxv_user = os.environ.get('NSXV_USER')
nsxv_password = os.environ.get('NSXV_PASSWORD')
nsxv_datacenter_moid = os.environ.get('NSXV_DATACENTER_MOID')
nsxv_cluster_moid = os.environ.get('NSXV_CLUSTER_MOID')
nsxv_resource_pool_id = os.environ.get('NSXV_RESOURCE_POOL_ID')
nsxv_datastore_id = os.environ.get('NSXV_DATASTORE_ID')
nsxv_external_network = os.environ.get('NSXV_EXTERNAL_NETWORK')
nsxv_vdn_scope_id = os.environ.get('NSXV_VDN_SCOPE_ID')
nsxv_dvs_id = os.environ.get('NSXV_DVS_ID')
nsxv_backup_edge_pool = os.environ.get('NSXV_BACKUP_EDGE_POOL')
nsxv_mgt_net_moid = os.environ.get('NSXV_MGT_NET_MOID')
nsxv_mgt_net_proxy_ips = os.environ.get('NSXV_MGT_NET_PROXY_IPS')
nsxv_mgt_net_proxy_netmask = os.environ.get('NSXV_MGT_NET_PROXY_NETMASK')
nsxv_mgt_net_default_gw = os.environ.get('NSXV_MGT_NET_DEFAULT_GW')
nsxv_edge_ha = True if os.environ.get('NSXV_EDGE_HA') == 'true' \
else False
def node_name(self, name_node):
return self.fuel_web.get_nailgun_node_by_name(name_node)['hostname']
def get_settings(self):
cluster_settings = {'net_provider': 'neutron',
'assign_to_all_nodes': False,
'net_segment_type': NEUTRON_SEGMENT_TYPE}
return cluster_settings
def install_nsxv_plugin(self):
admin_remote = self.env.d_env.get_admin_remote()
checkers.upload_tarball(admin_remote, self.NSXV_PLUGIN_PATH, "/var")
checkers.install_plugin_check_code(admin_remote,
plugin=os.path.
basename(self.NSXV_PLUGIN_PATH))
def enable_plugin(self, cluster_id=None):
assert_true(
self.fuel_web.check_plugin_exists(cluster_id, self.plugin_name),
"Test aborted")
plugin_settings = {'metadata/enabled': True,
'nsxv_manager_host/value': self.nsxv_manager_ip,
'nsxv_insecure/value': self.nsxv_insecure,
'nsxv_user/value': self.nsxv_user,
'nsxv_password/value': self.nsxv_password,
'nsxv_datacenter_moid/value':
self.nsxv_datacenter_moid,
'nsxv_cluster_moid/value': self.nsxv_cluster_moid,
'nsxv_resource_pool_id/value':
self.nsxv_resource_pool_id,
'nsxv_datastore_id/value': self.nsxv_datastore_id,
'nsxv_external_network/value':
self.nsxv_external_network,
'nsxv_vdn_scope_id/value': self.nsxv_vdn_scope_id,
'nsxv_dvs_id/value': self.nsxv_dvs_id,
'nsxv_backup_edge_pool/value':
self.nsxv_backup_edge_pool,
'nsxv_mgt_net_moid/value': self.nsxv_mgt_net_moid,
'nsxv_mgt_net_proxy_ips/value':
self.nsxv_mgt_net_proxy_ips,
'nsxv_mgt_net_proxy_netmask/value':
self.nsxv_mgt_net_proxy_netmask,
'nsxv_mgt_net_default_gateway/value':
self.nsxv_mgt_net_default_gw,
'nsxv_edge_ha/value': self.nsxv_edge_ha}
self.fuel_web.update_plugin_data(cluster_id, self.plugin_name,
plugin_settings)
def create_instances(self, os_conn=None, vm_count=None, nics=None,
security_group=None):
"""Create Vms on available hypervisors
:param os_conn: type object, openstack
:param vm_count: type interger, count of VMs to create
:param nics: type dictionary, neutron networks
to assign to instance
:param security_group: type dictionary, security group to assign to
instances
"""
# Get list of available images,flavors and hipervisors
images_list = os_conn.nova.images.list()
flavors_list = os_conn.nova.flavors.list()
for image in images_list:
if image.name == 'TestVM-VMDK':
os_conn.nova.servers.create(
flavor=flavors_list[0],
name='test_{0}'.format(image.name),
image=image, min_count=vm_count,
availability_zone='vcenter',
nics=nics
)
# Verify that current state of each VMs is Active
srv_list = os_conn.get_servers()
for srv in srv_list:
assert_true(os_conn.get_instance_detail(srv).status != 'ERROR',
"Current state of Vm {0} is {1}".format(
srv.name, os_conn.get_instance_detail(srv).status))
try:
wait(
lambda:
os_conn.get_instance_detail(srv).status == "ACTIVE",
timeout=500)
except TimeoutError:
logger.error(
"Timeout is reached.Current state of Vm {0} is {1}".format(
srv.name, os_conn.get_instance_detail(srv).status))
# assign security group
if security_group:
srv.add_security_group(security_group)
def check_connection_vms(self, os_conn=None, srv_list=None,
packets=3, remote=None, ip=None):
"""Check network connectivity between VMs with ping
:param os_conn: type object, openstack
:param srv_list: type list, instances
:param packets: type int, packets count of icmp reply
:param remote: SSHClient
:param ip: type list, remote ip to check by ping
"""
for srv in srv_list:
# VMs on different hypervisors should communicate between
# each other
if not remote:
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
remote = self.fuel_web.get_ssh_for_node(
primary_controller.name)
addresses = srv.addresses[srv.addresses.keys()[0]]
fip = [add['addr'] for add in addresses
if add['OS-EXT-IPS:type'] == 'floating'][0]
logger.info("Connect to VM {0}".format(fip))
res = -1
if not ip:
for s in srv_list:
if s != srv:
ip_2 = s.networks[s.networks.keys()[0]][0]
res = os_conn.execute_through_host(
remote, fip,
"ping -q -c3 {}"
"| grep -o '[0-9] packets received' | cut"
" -f1 -d ' '".format(ip_2))
else:
for ip_2 in ip:
if ip_2 != srv.networks[srv.networks.keys()[0]][0]:
res = os_conn.execute_through_host(
remote, fip,
"ping -q -c3 {}"
"| grep -o '[0-9] packets received' | cut"
" -f1 -d ' '".format(ip_2))
assert_true(
int(res) == packets,
"Ping VM{0} from Vm {1},"
" received {2} icmp".formasettingst(ip_2, fip, res)
)
def check_service(self, ssh=None, commands=None):
"""Check that required nova services are running on controller
:param ssh: SSHClient
:param commands: type list, nova commands to execute on controller,
example of commands:
['nova-manage service list | grep vcenter-vmcluster1'
"""
ssh.execute('source openrc')
for cmd in commands:
output = list(ssh.execute(cmd)['stdout'])
wait(
lambda:
':-)' in output[-1].split(' '),
timeout=200)
def create_and_assign_floating_ip(self, os_conn=None, srv_list=None,
ext_net=None, tenant_id=None):
if not ext_net:
ext_net = [net for net
in os_conn.neutron.list_networks()["networks"]
if net['name'] == "net04_ext"][0]
if not tenant_id:
tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id
ext_net = [net for net
in os_conn.neutron.list_networks()["networks"]
if net['name'] == "net04_ext"][0]
tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id
if not srv_list:
srv_list = os_conn.get_servers()
for srv in srv_list:
fip = os_conn.neutron.create_floatingip(
{'floatingip': {
'floating_network_id': ext_net['id'],
'tenant_id': tenant_id}})
os_conn.nova.servers.add_floating_ip(
srv, fip['floatingip']['floating_ip_address']
)
def get_common(self, cluster_id):
nsxv_ip = self.fuel_web.get_public_vip(cluster_id)
#if not self._common:
self._common = Common(
controller_ip=nsxv_ip, user=SERVTEST_USERNAME,
password=SERVTEST_PASSWORD, tenant=SERVTEST_TENANT
)
return self._common
def create_network(self, cluster_id, name):
common = self.get_common(cluster_id)
net_body = {"network": {"name": name,
}
}
network = common.neutron.create_network(net_body)['network']
return network
def create_net_public(self, cluster_id):
"""Create custom exteral net and subnet"""
common = self.get_common(cluster_id)
network = common.neutron.create_network(body={
'network': {
'name': 'net04_ext',
'admin_state_up': True,
'router:external': True,
'shared': True,
}
})
network_id = network['network']['id']
logger.debug("id {0} to master node".format(network_id))
common.neutron.create_subnet(body={
'subnet': {
'network_id': network_id,
'ip_version': 4,
'cidr': '172.16.0.0/24',
'name': 'subnet04_ext',
'allocation_pools': [{"start": "172.16.0.30",
"end": "172.16.0.40"}],
'gateway_ip': '172.16.0.1',
'enable_dhcp': False,
}
})
return network['network']
def add_router(self, cluster_id, router_name, ext_net, distributed=False,
router_type='shared'):
common = self.get_common(cluster_id)
gateway = {"network_id": ext_net["id"],
"enable_snat": True}
router_param = {'router': {'name': router_name,
'admin_state_up': True,
'router_type': router_type,
'distributed': distributed,
'external_gateway_info': gateway}}
router = common.neutron.create_router(body=router_param)['router']
return router
def add_subnet_to_router(self, cluster_id, router_id, sub_id):
common = self.get_common(cluster_id)
common.neutron.add_interface_router(
router_id,
{'subnet_id': sub_id}
)
def create_subnet(self, cluster_id, network, cidr):
common = self.get_common(cluster_id)
subnet_body = {"subnet": {"network_id": network['id'],
"ip_version": 4,
"cidr": cidr,
"name": 'subnet_{}'.format(
network['name']),
}
}
subnet = common.neutron.create_subnet(subnet_body)['subnet']
return subnet
def create_all_necessary_staff(self, cluster_id):
private_net = self.create_network(cluster_id, 'net04')
subnet_private = self.create_subnet(cluster_id, private_net, '10.100.0.0/24')
public_net = self.create_net_public(cluster_id)
router = self.add_router(cluster_id, 'connecting_router', public_net)
self.add_subnet_to_router(cluster_id, router['id'], subnet_private['id'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["nsxv_smoke"])
#@log_snapshot_after_test
def nsxv_smoke(self):
"""Deploy a cluster with NSXv Plugin
Scenario:
1. Upload the plugin to master node
2. Create cluster and configure NSXv for that cluster
3. Provision one controller node
4. Deploy cluster with plugin
Duration 90 min
"""
self.env.revert_snapshot('ready_with_5_slaves', skip_timesync=True)
self.install_nsxv_plugin()
# Configure cluster
settings = self.get_settings()
settings["images_vcenter"] = True
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings,
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
vc_glance=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign roles to nodes
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'], })
self.fuel_web.deploy_cluster_wait(cluster_id)
self.create_all_necessary_staff(cluster_id)
self.env.make_snapshot("deploy_nsxv", is_make=True)
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["nsxv_add_delete_nodes", "nsxv_plugin"])
@log_snapshot_after_test
def nsxv_add_delete_nodes(self):
"""Deploy cluster with plugin and vmware datastore backend
Scenario:
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller role, compute-vmware, cinder-vmware.
5. Remove node cinder-vmware.
6. Add node with cinder role.
7. Redeploy cluster.
8. Run OSTF.
Duration 3 hours
"""
self.env.revert_snapshot("ready_with_9_slaves", skip_timesync=True)
self.install_nsxv_plugin()
settings = self.get_settings()
settings["images_vcenter"] = True
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings,
)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute-vmware'],
'slave-05': ['cinder-vmware'], })
target_node_1 = self.node_name('slave-04')
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
vc_glance=True,
multiclusters=True,
target_node_1=target_node_1)
self.enable_plugin(cluster_id=cluster_id)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self.create_all_necessary_staff(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
# Remove node with cinder-vmware role
self.fuel_web.update_nodes(
cluster_id,
{'slave-05': ['cinder-vmware'], }, False, True)
# Add 1 node with cinder role and redeploy cluster
self.fuel_web.update_nodes(
cluster_id,
{
'slave-06': ['cinder'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["nsxv_add_delete_controller", "nsxv_plugin"])
@log_snapshot_after_test
def nsxv_add_delete_controller(self):
"""Deploy cluster with plugin, adding and deletion controler node.
Scenario:
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Add 4 node with controller role.
5. Add 1 node with cinder-vmware role.
6. Add 1 node with compute role.
7. Deploy cluster.
8. Run OSTF.
9. Remove node with controller role.
10. Redeploy cluster.
11. Run OSTF.
12. Add node with controller role.
13. Redeploy cluster.
14. Run OSTF.
Duration 3.5 hours
"""
self.env.revert_snapshot("ready_with_9_slaves")
self.install_nsxv_plugin()
settings = self.get_settings()
settings["images_vcenter"] = True
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings,
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
vc_glance=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['controller'],
'slave-05': ['cinder-vmware'],
'slave-06': ['compute-vmware'], })
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self._create_net_int(cluster_id)
self.create_net_public(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'sanity'])
# Remove node with controller role
self.fuel_web.update_nodes(
cluster_id,
{'slave-04': ['controller'], }, False, True)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
# Fixme #1457515 in 8.0
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'sanity', 'ha'],
should_fail=1,
failed_test_name=['Check that required services are running'])
# Add node with controller role
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['controller'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
# Fixme #1457515 in 8.0
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke', 'sanity', 'ha'],
should_fail=1,
failed_test_name=['Check that required services are running'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["nsxv_reset_controller", 'nsxv_plugin'])
# @log_snapshot_after_test
def nsxv_reset_controller(self):
"""Verify that vmclusters should migrate after reset controller.
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller role.
5. Add 2 node with compute role.
6. Deploy the cluster.
7. Launch instances.
8. Verify connection between VMs. Send ping
Check that ping get reply
9. Reset controller.
10. Check that vmclusters should be migrate to another controller.
11. Verify connection between VMs.
Send ping, check that ping get reply
Duration 1.8 hours
"""
self.env.revert_snapshot("ready_with_5_slaves")
self.install_nsxv_plugin()
settings = self.get_settings()
settings["images_vcenter"] = True
# Configure cluster with 2 vcenter cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
# cluster_id = self.fuel_web.get_last_created_cluster()
# Configure cluster with 2 vcenter clusters and vcenter glance
self.fuel_web.vcenter_configure(cluster_id,
vc_glance=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'], })
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self.create_all_necessary_staff(cluster_id)
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
# create security group with rules for ssh and ping
security_group = {}
security_group[os_actions.get_tenant(SERVTEST_TENANT).id] =\
os_actions.create_sec_group_for_ssh()
sec_group = security_group[
os_actions.get_tenant(SERVTEST_TENANT).id].id
self.create_instances(
os_conn=os_conn, vm_count=1,
nics=[{'net-id': private_net['id']}], security_group=sec_group)
# Verify connection between VMs. Send ping Check that ping get reply
self.create_and_assign_floating_ip(os_conn=os_conn)
srv_list = os_conn.get_servers()
self.check_connection_vms(os_conn=os_conn, srv_list=srv_list)
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
ssh = self.fuel_web.get_ssh_for_node(primary_controller.name)
cmds = ['nova-manage service list | grep vcenter-vmcluster1',
'nova-manage service list | grep vcenter-vmcluster2']
self.check_service(ssh=ssh, commands=cmds)
self.fuel_web.warm_restart_nodes(
[self.fuel_web.environment.d_env.get_node(
name=primary_controller.name)])
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[1]
)
ssh = self.fuel_web.get_ssh_for_node(primary_controller.name)
self.check_service(ssh=ssh, commands=cmds)
# Verify connection between VMs. Send ping Check that ping get reply
srv_list = os_conn.get_servers()
self.check_connection_vms(os_conn=os_conn, srv_list=srv_list)
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["nsxv_shutdown_controller", 'nsxv_plugin'])
# @log_snapshot_after_test
def nsxv_shutdown_controller(self):
"""Verify that vmclusters should be migrate after shutdown controller.
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller role.
5. Add 2 node with compute role.
6. Deploy the cluster.
7. Launch instances.
8. Verify connection between VMs. Send ping
Check that ping get reply
9. Shutdown controller.
10. Check that vmclusters should be migrate to another controller.
11. Verify connection between VMs.
Send ping, check that ping get reply
Duration 1.8 hours
"""
self.env.revert_snapshot("ready_with_5_slaves")
self.install_nsxv_plugin()
settings = self.get_settings()
settings["images_vcenter"] = True
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings,
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
vc_glance=True,
multiclusters=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute-vmware'],
'slave-05': ['compute-vmware']}
)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
self.create_all_necessary_staff(cluster_id)
# create security group with rules for ssh and ping
security_group = {}
security_group[os_conn.get_tenant(SERVTEST_TENANT).id] =\
os_conn.create_sec_group_for_ssh()
security_group = security_group[
os_conn.get_tenant(SERVTEST_TENANT).id].id
network = os_conn.nova.networks.find(label='net04')
self.create_instances(
os_conn=os_conn, vm_count=1,
nics=[{'net-id': network.id}], security_group=security_group)
# Verify connection between VMs. Send ping Check that ping get reply
self.create_and_assign_floating_ip(os_conn=os_conn)
srv_list = os_conn.get_servers()
self.check_connection_vms(os_conn=os_conn, srv_list=srv_list)
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]
)
ssh = self.fuel_web.get_ssh_for_node(primary_controller.name)
cmds = ['nova-manage service list | grep vcenter-vmcluster1',
'nova-manage service list | grep vcenter-vmcluster2']
self.check_service(ssh=ssh, commands=cmds)
self.fuel_web.warm_shutdown_nodes(
[self.fuel_web.environment.d_env.get_node(
name=primary_controller.name)])
primary_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[1]
)
ssh = self.fuel_web.get_ssh_for_node(primary_controller.name)
self.check_service(ssh=ssh, commands=cmds)
# Verify connection between VMs. Send ping Check that ping get reply
srv_list = os_conn.get_servers()
self.check_connection_vms(
os_conn=os_conn, srv_list=srv_list,
remote=ssh
)
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["nsxv_ceilometer", "nsxv_plugin"])
@log_snapshot_after_test
def nsxv_ceilometer(self):
"""Deploy cluster with plugin and ceilometer
Scenario:
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller + mongo roles.
5. Add 2 node with compute role.
5. Deploy the cluster.
6. Run OSTF.
Duration 3 hours
"""
self.env.revert_snapshot("ready_with_5_slaves")
self.install_nsxv_plugin()
settings = self.get_settings()
settings["images_vcenter"] = True
settings["ceilometer"] = True
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
vc_glance=True,
multiclusters=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller', 'mongo'],
'slave-02': ['controller', 'mongo'],
'slave-03': ['controller', 'mongo'],
'slave-04': ['compute-vmware']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self.create_all_necessary_staff(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['smoke', 'tests_platform'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["nsxv_ha_mode", "nsxv_plugin"])
@log_snapshot_after_test
def nsxv_ha_mode(self):
"""Deploy cluster with plugin in HA mode
Scenario:
1. Upload plugins to the master node
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller role.
5. Add 2 node with compute role.
6. Deploy the cluster.
7. Run OSTF.
Duration 2.5 hours
"""
self.env.revert_snapshot("ready_with_5_slaves")
self.install_nsxv_plugin()
settings = self.get_settings()
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings,
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
multiclusters=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'], }
)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=80 * 60)
os_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
os_ip, SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
self.create_all_necessary_staff(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["nsxv_ceph", "nsxv_plugin"])
# @log_snapshot_after_test
def nsxv_ceph(self):
"""Deploy cluster with plugin and ceph backend
Scenario:
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller role.
5. Add 1 node with compute + ceph-osd roles.
6. Add 1 node with cinder-vmware + ceph-osd roles.
7. Deploy the cluster
8. Run OSTF
Duration 2.5 hours
"""
self.env.revert_snapshot("ready_with_5_slaves")
self.install_nsxv_plugin()
settings = self.get_settings()
settings["images_vcenter"] = True
settings["volumes_lvm"] = True
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
vc_glance=True,
multiclusters=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['cinder'],
'slave-03': ['cinder'],
'slave-04': ['cinder-vmware'],
'slave-05': ['cinder-vmware']}
)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self.create_all_necessary_staff(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["nsxv_ceph_no_vcenter", "nsxv_plugin"])
# @log_snapshot_after_test
def nsxv_ceph_no_vcenter(self):
"""Deploy cluster with plugin and ceph backend
Scenario:
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Add 3 node with controller role.
5. Add 1 node with compute + ceph-osd roles.
6. Add 1 node with cinder-vmware + ceph-osd roles.
7. Deploy the cluster
8. Run OSTF
Duration 2.5 hours
"""
self.env.revert_snapshot("ready_with_5_slaves")
self.install_nsxv_plugin()
settings = self.get_settings()
settings["volumes_ceph"] = True
# Configure cluster
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings
)
# Configure VMWare vCenter settings
self.fuel_web.vcenter_configure(cluster_id,
multiclusters=True)
self.enable_plugin(cluster_id=cluster_id)
# Assign role to node
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller', 'ceph-osd'],
'slave-03': ['controller', 'ceph-osd'], }
)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self.create_all_necessary_staff(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['smoke'])

View File

@ -0,0 +1,487 @@
#!/bin/sh
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# functions
INVALIDOPTS_ERR=100
NOJOBNAME_ERR=101
NOISOPATH_ERR=102
NOTASKNAME_ERR=103
NOWORKSPACE_ERR=104
DEEPCLEAN_ERR=105
MAKEISO_ERR=106
NOISOFOUND_ERR=107
COPYISO_ERR=108
SYMLINKISO_ERR=109
CDWORKSPACE_ERR=110
ISODOWNLOAD_ERR=111
INVALIDTASK_ERR=112
# Defaults
export REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-5000}
export ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT=${ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:-true}
ShowHelp() {
cat << EOF
System Tests Script
It can perform several actions depending on Jenkins JOB_NAME it's ran from
or it can take names from exported environment variables or command line options
if you do need to override them.
-w (dir) - Path to workspace where fuelweb git repository was checked out.
Uses Jenkins' WORKSPACE if not set
-e (name) - Directly specify environment name used in tests
Uses ENV_NAME variable is set.
-j (name) - Name of this job. Determines ISO name, Task name and used by tests.
Uses Jenkins' JOB_NAME if not set
-v - Do not use virtual environment
-V (dir) - Path to python virtual environment
-i (file) - Full path to ISO file to build or use for tests.
Made from iso dir and name if not set.
-t (name) - Name of task this script should perform. Should be one of defined ones.
Taken from Jenkins' job's suffix if not set.
-o (str) - Allows you any extra command line option to run test job if you
want to use some parameters.
-a (str) - Allows you to path NOSE_ATTR to the test job if you want
to use some parameters.
-A (str) - Allows you to path NOSE_EVAL_ATTR if you want to enter attributes
as python expressions.
-m (name) - Use this mirror to build ISO from.
Uses 'srt' if not set.
-U - ISO URL for tests.
Null by default.
-r (yes/no) - Should built ISO file be places with build number tag and
symlinked to the last build or just copied over the last file.
-b (num) - Allows you to override Jenkins' build number if you need to.
-l (dir) - Path to logs directory. Can be set by LOGS_DIR evironment variable.
Uses WORKSPACE/logs if not set.
-d - Dry run mode. Only show what would be done and do nothing.
Useful for debugging.
-k - Keep previously created test environment before tests run
-K - Keep test environment after tests are finished
-h - Show this help page
Most variables uses guesses from Jenkins' job name but can be overriden
by exported variable before script is run or by one of command line options.
You can override following variables using export VARNAME="value" before running this script
WORKSPACE - path to directory where Fuelweb repository was checked out by Jenkins or manually
JOB_NAME - name of Jenkins job that determines which task should be done and ISO file name.
If task name is "iso" it will make iso file
Other defined names will run Nose tests using previously built ISO file.
ISO file name is taken from job name prefix
Task name is taken from job name suffix
Separator is one dot '.'
For example if JOB_NAME is:
mytest.somestring.iso
ISO name: mytest.iso
Task name: iso
If ran with such JOB_NAME iso file with name mytest.iso will be created
If JOB_NAME is:
mytest.somestring.node
ISO name: mytest.iso
Task name: node
If script was run with this JOB_NAME node tests will be using ISO file mytest.iso.
First you should run mytest.somestring.iso job to create mytest.iso.
Then you can ran mytest.somestring.node job to start tests using mytest.iso and other tests too.
EOF
}
GlobalVariables() {
# where built iso's should be placed
# use hardcoded default if not set before by export
ISO_DIR="${ISO_DIR:=/var/www/fuelweb-iso}"
# name of iso file
# taken from jenkins job prefix
# if not set before by variable export
if [ -z "${ISO_NAME}" ]; then
ISO_NAME="${JOB_NAME%.*}.iso"
fi
# full path where iso file should be placed
# make from iso name and path to iso shared directory
# if was not overriden by options or export
if [ -z "${ISO_PATH}" ]; then
ISO_PATH="${ISO_DIR}/${ISO_NAME}"
fi
# what task should be ran
# it's taken from jenkins job name suffix if not set by options
if [ -z "${TASK_NAME}" ]; then
TASK_NAME="${JOB_NAME##*.}"
fi
# do we want to keep iso's for each build or just copy over single file
ROTATE_ISO="${ROTATE_ISO:=yes}"
# choose mirror to build iso from. Default is 'srt' for Saratov's mirror
# you can change mirror by exporting USE_MIRROR variable before running this script
USE_MIRROR="${USE_MIRROR:=srt}"
# only show what commands would be executed but do nothing
# this feature is usefull if you want to debug this script's behaviour
DRY_RUN="${DRY_RUN:=no}"
VENV="${VENV:=yes}"
}
GetoptsVariables() {
while getopts ":w:j:i:t:o:a:A:m:U:r:b:V:l:dkKe:v:h" opt; do
case $opt in
w)
WORKSPACE="${OPTARG}"
;;
j)
JOB_NAME="${OPTARG}"
;;
i)
ISO_PATH="${OPTARG}"
;;
t)
TASK_NAME="${OPTARG}"
;;
o)
TEST_OPTIONS="${TEST_OPTIONS} ${OPTARG}"
;;
a)
NOSE_ATTR="${OPTARG}"
;;
A)
NOSE_EVAL_ATTR="${OPTARG}"
;;
m)
USE_MIRROR="${OPTARG}"
;;
U)
ISO_URL="${OPTARG}"
;;
r)
ROTATE_ISO="${OPTARG}"
;;
b)
BUILD_NUMBER="${OPTARG}"
;;
V)
VENV_PATH="${OPTARG}"
;;
l)
LOGS_DIR="${OPTARG}"
;;
k)
KEEP_BEFORE="yes"
;;
K)
KEEP_AFTER="yes"
;;
e)
ENV_NAME="${OPTARG}"
;;
d)
DRY_RUN="yes"
;;
v)
VENV="no"
;;
h)
ShowHelp
exit 0
;;
\?)
echo "Invalid option: -$OPTARG"
ShowHelp
exit $INVALIDOPTS_ERR
;;
:)
echo "Option -$OPTARG requires an argument."
ShowHelp
exit $INVALIDOPTS_ERR
;;
esac
done
}
CheckVariables() {
if [ -z "${JOB_NAME}" ]; then
echo "Error! JOB_NAME is not set!"
exit $NOJOBNAME_ERR
fi
if [ -z "${ISO_PATH}" ]; then
echo "Error! ISO_PATH is not set!"
exit $NOISOPATH_ERR
fi
if [ -z "${TASK_NAME}" ]; then
echo "Error! TASK_NAME is not set!"
exit $NOTASKNAME_ERR
fi
if [ -z "${WORKSPACE}" ]; then
echo "Error! WORKSPACE is not set!"
exit $NOWORKSPACE_ERR
fi
}
MakeISO() {
# Create iso file to be used in tests
# clean previous garbage
if [ "${DRY_RUN}" = "yes" ]; then
echo make deep_clean
else
make deep_clean
fi
ec="${?}"
if [ "${ec}" -gt "0" ]; then
echo "Error! Deep clean failed!"
exit $DEEPCLEAN_ERR
fi
# create ISO file
export USE_MIRROR
if [ "${DRY_RUN}" = "yes" ]; then
echo make iso
else
make iso
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error making ISO!"
exit $MAKEISO_ERR
fi
if [ "${DRY_RUN}" = "yes" ]; then
ISO="${WORKSPACE}/build/iso/fuel.iso"
else
ISO="`ls ${WORKSPACE}/build/iso/*.iso | head -n 1`"
# check that ISO file exists
if [ ! -f "${ISO}" ]; then
echo "Error! ISO file not found!"
exit $NOISOFOUND_ERR
fi
fi
# copy ISO file to storage dir
# if rotation is enabled and build number is aviable
# save iso to tagged file and symlink to the last build
# if rotation is not enabled just copy iso to iso_dir
if [ "${ROTATE_ISO}" = "yes" -a "${BUILD_NUMBER}" != "" ]; then
# copy iso file to shared dir with revision tagged name
NEW_BUILD_ISO_PATH="${ISO_PATH#.iso}_${BUILD_NUMBER}.iso"
if [ "${DRY_RUN}" = "yes" ]; then
echo cp "${ISO}" "${NEW_BUILD_ISO_PATH}"
else
cp "${ISO}" "${NEW_BUILD_ISO_PATH}"
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Copy ${ISO} to ${NEW_BUILD_ISO_PATH} failed!"
exit $COPYISO_ERR
fi
# create symlink to the last built ISO file
if [ "${DRY_RUN}" = "yes" ]; then
echo ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}"
else
ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}"
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Create symlink from ${NEW_BUILD_ISO_PATH} to ${ISO_PATH} failed!"
exit $SYMLINKISO_ERR
fi
else
# just copy file to shared dir
if [ "${DRY_RUN}" = "yes" ]; then
echo cp "${ISO}" "${ISO_PATH}"
else
cp "${ISO}" "${ISO_PATH}"
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Copy ${ISO} to ${ISO_PATH} failed!"
exit $COPYISO_ERR
fi
fi
if [ "${ec}" -gt "0" ]; then
echo "Error! Copy ISO from ${ISO} to ${ISO_PATH} failed!"
exit $COPYISO_ERR
fi
echo "Finished building ISO: ${ISO_PATH}"
exit 0
}
CdWorkSpace() {
# chdir into workspace or fail if could not
if [ "${DRY_RUN}" != "yes" ]; then
cd "${WORKSPACE}"
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Cannot cd to WORKSPACE!"
exit $CDWORKSPACE_ERR
fi
else
echo cd "${WORKSPACE}"
fi
}
RunTest() {
# Run test selected by task name
# check if iso file exists
if [ ! -f "${ISO_PATH}" ]; then
if [ -z "${ISO_URL}" -a "${DRY_RUN}" != "yes" ]; then
echo "Error! File ${ISO_PATH} not found and no ISO_URL (-U key) for downloading!"
exit $NOISOFOUND_ERR
else
if [ "${DRY_RUN}" = "yes" ]; then
echo wget -c ${ISO_URL} -O ${ISO_PATH}
else
echo "No ${ISO_PATH} found. Trying to download file."
wget -c ${ISO_URL} -O ${ISO_PATH}
rc=$?
if [ $rc -ne 0 ]; then
echo "Failed to fetch ISO from ${ISO_URL}"
exit $ISODOWNLOAD_ERR
fi
fi
fi
fi
if [ -z "${VENV_PATH}" ]; then
VENV_PATH="/home/jenkins/venv-nailgun-tests"
fi
# run python virtualenv
if [ "${VENV}" = "yes" ]; then
if [ "${DRY_RUN}" = "yes" ]; then
echo . $VENV_PATH/bin/activate
else
. $VENV_PATH/bin/activate
fi
fi
if [ "${ENV_NAME}" = "" ]; then
ENV_NAME="${JOB_NAME}_system_test"
fi
if [ "${LOGS_DIR}" = "" ]; then
LOGS_DIR="${WORKSPACE}/logs"
fi
if [ ! -f "$LOGS_DIR" ]; then
mkdir -p $LOGS_DIR
fi
export ENV_NAME
export LOGS_DIR
export ISO_PATH
if [ "${KEEP_BEFORE}" != "yes" ]; then
# remove previous environment
if [ "${DRY_RUN}" = "yes" ]; then
echo dos.py erase "${ENV_NAME}"
else
if [ $(dos.py list | grep "^${ENV_NAME}\$") ]; then
dos.py erase "${ENV_NAME}"
fi
fi
fi
# gather additional option for this nose test run
OPTS=""
if [ -n "${NOSE_ATTR}" ]; then
OPTS="${OPTS} -a ${NOSE_ATTR}"
fi
if [ -n "${NOSE_EVAL_ATTR}" ]; then
OPTS="${OPTS} -A ${NOSE_EVAL_ATTR}"
fi
if [ -n "${TEST_OPTIONS}" ]; then
OPTS="${OPTS} ${TEST_OPTIONS}"
fi
# run python test set to create environments, deploy and test product
if [ "${DRY_RUN}" = "yes" ]; then
echo export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
echo python plugin_test/run_tests.py -q --nologcapture --with-xunit ${OPTS}
else
export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
echo ${PYTHONPATH}
python plugin_test/run_tests.py -q --nologcapture --with-xunit ${OPTS}
fi
ec=$?
# Extract logs using fuel_logs utility
if [ "${FUELLOGS_TOOL}" != "no" ]; then
for logfile in $(find "${LOGS_DIR}" -name "fail*.tar.xz" -type f);
do
./fuel-qa/utils/jenkins/fuel_logs.py "${logfile}" > "${logfile}.filtered.log"
done
fi
if [ "${KEEP_AFTER}" != "yes" ]; then
# remove environment after tests
if [ "${DRY_RUN}" = "yes" ]; then
echo dos.py destroy "${ENV_NAME}"
else
dos.py destroy "${ENV_NAME}"
fi
fi
exit "${ec}"
}
RouteTasks() {
# this selector defines task names that are recognised by this script
# and runs corresponding jobs for them
# running any jobs should exit this script
case "${TASK_NAME}" in
test)
RunTest
;;
iso)
MakeISO
;;
*)
echo "Unknown task: ${TASK_NAME}!"
exit $INVALIDTASK_ERR
;;
esac
exit 0
}
# MAIN
# first we want to get variable from command line options
GetoptsVariables ${@}
# then we define global variables and there defaults when needed
GlobalVariables
# check do we have all critical variables set
CheckVariables
# first we chdir into our working directory unless we dry run
CdWorkSpace
# finally we can choose what to do according to TASK_NAME
RouteTasks