Migrate group thread_1 to pytest scope

Implements blueprint migrate-fuel-qa-from-proboscis-nose-frameworks

Change-Id: I23231967b722dc8c1fe2cf2faa303f8c4a9df871
This commit is contained in:
Dmitry Tyzhnenko 2016-05-17 10:43:44 +03:00
parent 628136142e
commit 4e11179097
18 changed files with 1075 additions and 42 deletions

View File

@ -234,7 +234,14 @@ class Manager(Basic):
"""Bootstrap slave nodes."""
logger.info("Getting ready slaves")
slaves = slaves or int(self.full_config['template']['slaves'])
if not slaves:
if hasattr(self._context, 'cluster_config'):
slaves = len(self._context.cluster_config.get('nodes'))
elif self.full_config:
slaves = int(self.full_config['template']['slaves'])
else:
logger.error("Unable to count slaves")
raise RuntimeError("Unable to count slaves")
snapshot_name = "ready_with_{}_slaves".format(slaves)
if self.check_run(snapshot_name):
self.env.revert_snapshot(snapshot_name)

View File

@ -54,7 +54,19 @@ def manager(request, config_file):
@pytest.fixture(scope='function', autouse=True)
def snapshot(request):
"""Fixture which provide getting of artifacs after test."""
"""Fixture which provide getting of artifacs after test.
Markers:
get_logs - create snapshot with logs
fail_snapshot - create environment snapshot
Example:
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
def test_ha_deploy():
pass
"""
get_logs = request.keywords.get('get_logs', None)
fail_snapshot = request.keywords.get('fail_snapshot', None)
@ -91,14 +103,32 @@ def prepare(request):
Provided two marker behaviour:
need_ready_cluster marker if test need already deployed cluster
need_ready_slaves marker if test need already provisioned slaves
need_ready_release marker if test need already provisioned slaves
need_ready_master marker if test need already provisioned slaves
Example:
@pytest.mark.need_ready_cluster
def test_ha_deploy():
pass
@pytest.mark.need_ready_slaves
def test_ha_deploy():
pass
"""
need_ready_cluster = request.keywords.get('need_ready_cluster', None)
need_ready_slaves = request.keywords.get('need_ready_slaves', None)
need_ready_release = request.keywords.get('need_ready_release', None)
need_ready_master = request.keywords.get('need_ready_master', None)
if need_ready_cluster:
request.instance.manager.get_ready_cluster()
if need_ready_slaves:
elif need_ready_slaves:
request.instance.manager.get_ready_slaves()
elif need_ready_release:
request.instance.manager.get_ready_release()
elif need_ready_master:
request.instance.manager.get_ready_setup()
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
@ -128,12 +158,15 @@ def pytest_runtest_setup(item):
def pytest_runtest_teardown(item):
"""Hook which run after test."""
step_name = item.function.__name__
spent_time = time.time() - item._start_time
if hasattr(item, '_start_time'):
spent_time = time.time() - item._start_time
else:
spent_time = 0
minutes = spent_time // 60
# pylint: disable=round-builtin
seconds = int(round(spent_time)) % 60
# pylint: enable=round-builtin
finish_step = "FINISH {} STEP TOOK {} min {} sec".format(
finish_step = "FINISH {} TEST. TOOK {} min {} sec".format(
step_name, minutes, seconds)
foot = "\n" + "<" * 5 + "#" * 30 + "[ {} ]" + "#" * 30 + ">" * 5
foot = foot.format(finish_step)

View File

@ -0,0 +1,91 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
import pytest
from devops.helpers.helpers import http
from devops.helpers.helpers import wait
from fuelweb_test import logger
from fuelweb_test.helpers.ssh_manager import SSHManager
# pylint: disable=import-error
# noinspection PyUnresolvedReferences
from six.moves.xmlrpc_client import ServerProxy
# pylint: enable=import-error
# pylint: disable=no-member
# pylint: disable=no-self-use
ssh_manager = SSHManager()
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
@pytest.mark.need_ready_master
@pytest.mark.thread_1
class TestAdminNode(object):
"""TestAdminNode.""" # TODO documentation
@pytest.mark.test_cobbler_alive
def test_cobbler_alive(self):
"""Test current installation has correctly setup cobbler
API and cobbler HTTP server are alive
Scenario:
1. Revert snapshot "empty"
2. test cobbler API and HTTP server through send http request
Duration 1m
"""
wait(
lambda: http(host=self.env.get_admin_node_ip(), url='/cobbler_api',
waited_code=501),
timeout=60
)
server = ServerProxy(
'http://%s/cobbler_api' % self.env.get_admin_node_ip())
config = self.env.admin_actions.get_fuel_settings()
username = config['cobbler']['user']
password = config['cobbler']['password']
# raises an error if something isn't right
server.login(username, password)
@pytest.mark.test_astuted_alive
def test_astuted_alive(self):
"""Test astute master and worker processes are alive on master node
Scenario:
1. Revert snapshot "empty"
2. Search for master and child processes
Duration 1m
"""
ps_output = ssh_manager.execute(
ssh_manager.admin_ip, 'ps ax')['stdout']
astute_master = [
master for master in ps_output if 'astute master' in master]
logger.info("Found astute processes: {:s}".format(astute_master))
assert len(astute_master) == 1
astute_workers = [
worker for worker in ps_output if 'astute worker' in worker]
logger.info(
"Found {length:d} astute worker processes: {workers!s}"
"".format(length=len(astute_workers), workers=astute_workers))
assert len(astute_workers) > 1

View File

@ -0,0 +1,193 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
import re
import pytest
from fuelweb_test import settings
from fuelweb_test import logger
from fuelweb_test.helpers.eb_tables import Ebtables
# pylint: disable=no-member
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
@pytest.mark.thread_1
class TestNodeDiskSizes(object):
"""TestNodeDiskSizes.""" # TODO documentation
cluster_config = {
'name': "TestNodeDiskSizes",
'mode': settings.DEPLOYMENT_MODE,
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
}
@pytest.mark.need_ready_slaves
@pytest.mark.check_nodes_notifications
def test_check_nodes_notifications(self):
"""Verify nailgun notifications for discovered nodes
Scenario:
1. Setup master and bootstrap 3 slaves
2. Verify hard drive sizes for discovered nodes in /api/nodes
3. Verify hard drive sizes for discovered nodes in notifications
Duration 5m
"""
# self.env.revert_snapshot("ready_with_3_slaves")
fuel_web = self.manager.fuel_web
# assert /api/nodes
disk_size = settings.NODE_VOLUME_SIZE * 1024 ** 3
nailgun_nodes = fuel_web.client.list_nodes()
for node in nailgun_nodes:
for disk in node['meta']['disks']:
assert disk['size'] == disk_size, 'Disk size'
hdd_size = "{0:.3} TB HDD".format((disk_size * 3 / (10 ** 9)) / 1000)
notifications = fuel_web.client.get_notifications()
for node in nailgun_nodes:
# assert /api/notifications
for notification in notifications:
discover = notification['topic'] == 'discover'
current_node = notification['node_id'] == node['id']
if current_node and discover and \
"discovered" in notification['message']:
assert hdd_size in notification['message'], (
'"{size} not found in notification message '
'"{note}" for node {node} '
'(hostname {host})!'.format(
size=hdd_size,
note=notification['message'],
node=node['name'],
host=node['hostname']))
# assert disks
disks = fuel_web.client.get_node_disks(node['id'])
for disk in disks:
expected_size = settings.NODE_VOLUME_SIZE * 1024 - 500
assert disk['size'] == expected_size, (
'Disk size {0} is not equals expected {1}'.format(
disk['size'], expected_size))
@pytest.mark.check_nodes_disks
@pytest.mark.need_ready_cluster
def test_check_nodes_disks(self):
"""Verify hard drive sizes for deployed nodes
Scenario:
1. Create cluster
2. Add 1 controller
3. Add 1 compute
4. Add 1 cinder
5. Deploy cluster
6. Verify hard drive sizes for deployed nodes
7. Run network verify
8. Run OSTF
Duration 15m
"""
cluster_id = self._storage['cluster_id']
fuel_web = self.manager.fuel_web
self.manager.show_step(1)
self.manager.show_step(2)
self.manager.show_step(3)
self.manager.show_step(4)
self.manager.show_step(5)
self.manager.show_step(6)
# assert node disks after deployment
for node_name in self.cluster_config['nodes']:
str_block_devices = fuel_web.get_cluster_block_devices(
node_name)
logger.debug("Block device:\n{}".format(str_block_devices))
expected_regexp = re.compile(
"vda\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(
settings.NODE_VOLUME_SIZE))
assert expected_regexp.search(str_block_devices), (
"Unable to find vda block device for {}G in: {}".format(
settings.NODE_VOLUME_SIZE, str_block_devices))
expected_regexp = re.compile(
"vdb\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(
settings.NODE_VOLUME_SIZE))
assert expected_regexp.search(str_block_devices), (
"Unable to find vdb block device for {}G in: {}".format(
settings.NODE_VOLUME_SIZE, str_block_devices))
expected_regexp = re.compile(
"vdc\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(
settings.NODE_VOLUME_SIZE))
assert expected_regexp.search(str_block_devices), (
"Unable to find vdc block device for {}G in: {}".format(
settings.NODE_VOLUME_SIZE, str_block_devices))
self.manager.show_step(7)
fuel_web.verify_network(cluster_id)
self.manager.show_step(6)
fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
@pytest.mark.thread_1
class TestMultinicBootstrap(object):
"""MultinicBootstrap.""" # TODO documentation
@pytest.mark.multinic_bootstrap_booting
@pytest.mark.need_ready_release
@pytest.mark.check_nodes_disks
def test_multinic_bootstrap_booting(self):
"""Verify slaves booting with blocked mac address
Scenario:
1. Revert snapshot "ready"
2. Block traffic for first slave node (by mac)
3. Restore mac addresses and boot first slave
4. Verify slave mac addresses is equal to unblocked
Duration 2m
"""
slave = self.env.d_env.get_node(name='slave-01')
mac_addresses = [interface.mac_address for interface in
slave.interfaces.filter(network__name='internal')]
try:
for mac in mac_addresses:
Ebtables.block_mac(mac)
for mac in mac_addresses:
Ebtables.restore_mac(mac)
slave.destroy()
self.env.d_env.get_node(name='admin').revert("ready")
nailgun_slave = self.env.bootstrap_nodes([slave])[0]
assert mac.upper() == nailgun_slave['mac'].upper()
Ebtables.block_mac(mac)
finally:
for mac in mac_addresses:
Ebtables.restore_mac(mac)

View File

@ -0,0 +1,152 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from fuelweb_test import settings
from fuelweb_test.settings import iface_alias
# pylint: disable=no-member
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
@pytest.mark.thread_1
class TestL2NetworkConfig(object):
"""TestL2NetworkConfig.""" # TODO documentation
cluster_config = {
'name': "TestL2NetworkConfig",
'mode': settings.DEPLOYMENT_MODE,
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
}
@pytest.mark.need_ready_slaves
@pytest.mark.deploy_node_multiple_interfaces
def test_deploy_node_multiple_interfaces(self):
"""Deploy cluster with networks allocated on different interfaces
Scenario:
1. Create cluster in Ha mode
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 1 node with cinder role
5. Split networks on existing physical interfaces
6. Deploy the cluster
7. Verify network configuration on each deployed node
8. Run network verification
Duration 25m
Snapshot: deploy_node_multiple_interfaces
"""
# self.env.revert_snapshot("ready_with_3_slaves")
fuel_web = self.manager.fuel_web
interfaces_dict = {
iface_alias('eth1'): ['public'],
iface_alias('eth2'): ['storage'],
iface_alias('eth3'): ['private'],
iface_alias('eth4'): ['management'],
}
self.manager.show_step(1)
cluster_id = fuel_web.create_cluster(
name=self.cluster_config['name'],
mode=self.cluster_config['mode'],
)
self.manager.show_step(2)
self.manager.show_step(3)
self.manager.show_step(4)
fuel_web.update_nodes(
cluster_id,
self.cluster_config['nodes']
)
self.manager.show_step(5)
nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
fuel_web.update_node_networks(node['id'], interfaces_dict)
self.manager.show_step(6)
fuel_web.deploy_cluster_wait(cluster_id)
self.manager.show_step(7)
fuel_web.verify_network(cluster_id)
@pytest.mark.skip(reason="Disabled in fuelweb_test")
@pytest.mark.untagged_networks_negative
@pytest.mark.need_ready_slaves
def test_untagged_networks_negative(self):
"""Verify network verification fails with untagged network on eth0
Scenario:
1. Create cluster in ha mode
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Add 1 node with compute cinder
5. Split networks on existing physical interfaces
6. Remove VLAN tagging from networks which are on eth0
7. Run network verification (assert it fails)
8. Start cluster deployment (assert it fails)
Duration 30m
"""
fuel_web = self.manager.fuel_web
vlan_turn_off = {'vlan_start': None}
interfaces = {
iface_alias('eth0'): ["fixed"],
iface_alias('eth1'): ["public"],
iface_alias('eth2'): ["management", "storage"],
iface_alias('eth3'): []
}
self.manager.show_step(1)
cluster_id = fuel_web.create_cluster(
name=self.cluster_config['name'],
mode=self.cluster_config['mode'],
)
self.manager.show_step(2)
self.manager.show_step(3)
self.manager.show_step(4)
fuel_web.update_nodes(
cluster_id,
self.cluster_config['nodes']
)
self.manager.show_step(5)
nets = fuel_web.client.get_networks(cluster_id)['networks']
nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
fuel_web.update_node_networks(node['id'], interfaces)
self.manager.show_step(6)
# select networks that will be untagged:
for net in nets:
net.update(vlan_turn_off)
# stop using VLANs:
fuel_web.client.update_network(cluster_id, networks=nets)
self.manager.show_step(7)
# run network check:
fuel_web.verify_network(cluster_id, success=False)
self.manager.show_step(8)
# deploy cluster:
task = fuel_web.deploy_cluster(cluster_id)
fuel_web.assert_task_failed(task)

View File

@ -20,10 +20,7 @@ from fuelweb_test.helpers import checkers
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers.ssh_manager import SSHManager
# pylint: disable=no-member
ssh_manager = SSHManager()
@ -197,3 +194,68 @@ class TestNeutronVlanHa(object):
self.manager.show_step(6)
fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
@pytest.mark.need_ready_cluster
@pytest.mark.thread_1
@pytest.mark.neutron
class TestNeutronVlan(object):
"""NeutronVlan.""" # TODO documentation
cluster_config = {
"name": "NeutronVlan",
"mode": settings.DEPLOYMENT_MODE,
"settings": {
"net_provider": settings.NEUTRON,
"net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
'tenant': 'simpleVlan',
'user': 'simpleVlan',
'password': 'simpleVlan'
},
"nodes": {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['compute']
}
}
@pytest.mark.deploy_neutron_vlan
@pytest.mark.ha_one_controller_neutron_vlan
@pytest.mark.deployment
@pytest.mark.nova
@pytest.mark.nova_compute
def test_deploy_neutron_vlan(self):
"""Deploy cluster in ha mode with 1 controller and Neutron VLAN
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 2 nodes with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 35m
Snapshot deploy_neutron_vlan
"""
self.manager.show_step(1)
self.manager.show_step(2)
self.manager.show_step(3)
cluster_id = self._storage['cluster_id']
fuel_web = self.manager.fuel_web
cluster = fuel_web.client.get_cluster(cluster_id)
assert str(cluster['net_provider']) == settings.NEUTRON
self.manager.show_step(4)
fuel_web.verify_network(cluster_id)
self.manager.show_step(5)
fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_vlan", is_make=True)

View File

@ -0,0 +1,247 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from paramiko import ChannelException
from devops.helpers.helpers import wait
from devops.error import TimeoutError
from fuelweb_test import logger
from fuelweb_test import settings
from fuelweb_test.helpers import os_actions
# pylint: disable=no-member
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
@pytest.mark.need_ready_cluster
@pytest.mark.neutron
@pytest.mark.thread_1
class TestNeutronIPv6(object):
"""NeutronIPv6."""
cluster_config = {
"name": "NeutronVlan",
"mode": settings.DEPLOYMENT_MODE,
"settings": {
"net_provider": settings.NEUTRON,
"net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
'tenant': 'simpleVlan',
'user': 'simpleVlan',
'password': 'simpleVlan'
},
"nodes": {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['compute']
}
}
@pytest.mark.deploy_neutron_ip_v6
@pytest.mark.nova
@pytest.mark.nova_compute
@pytest.mark.neutron_ipv6
@pytest.mark.deploy_neutron_ip_v6
def test_deploy_neutron_ip_v6(self):
"""Check IPv6 only functionality for Neutron VLAN
Scenario:
1. Revert deploy_neutron_vlan snapshot
2. Create two dualstack network IPv6 subnets
(should be in SLAAC mode,
address space should not intersect).
3. Create virtual router and set gateway.
4. Attach this subnets to the router.
5. Create a Security Group,
that allows SSH and ICMP for both IPv4 and IPv6.
6. Launch two instances, one for each network.
7. Lease a floating IP.
8. Attach Floating IP for main instance.
9. SSH to the main instance and ping6 another instance.
Duration 10m
Snapshot deploy_neutron_ip_v6
"""
self.manager.show_step(1)
cluster_id = self._storage['cluster_id']
fuel_web = self.manager.fuel_web
public_vip = fuel_web.get_public_vip(cluster_id)
logger.info('Public vip is %s', public_vip)
os_conn = os_actions.OpenStackActions(
controller_ip=public_vip,
user='simpleVlan',
passwd='simpleVlan',
tenant='simpleVlan'
)
tenant = os_conn.get_tenant('simpleVlan')
self.manager.show_step(2)
net1 = os_conn.create_network(
network_name='net1',
tenant_id=tenant.id)['network']
net2 = os_conn.create_network(
network_name='net2',
tenant_id=tenant.id)['network']
subnet_1_v4 = os_conn.create_subnet(
subnet_name='subnet_1_v4',
network_id=net1['id'],
cidr='192.168.100.0/24',
ip_version=4)
subnet_1_v6 = os_conn.create_subnet(
subnet_name='subnet_1_v6',
network_id=net1['id'],
ip_version=6,
cidr="2001:db8:100::/64",
gateway_ip="2001:db8:100::1",
ipv6_ra_mode="slaac",
ipv6_address_mode="slaac")
subnet_2_v4 = os_conn.create_subnet(
subnet_name='subnet_2_v4',
network_id=net2['id'],
cidr='192.168.200.0/24',
ip_version=4)
subnet_2_v6 = os_conn.create_subnet(
subnet_name='subnet_2_v6',
network_id=net2['id'],
ip_version=6,
cidr="2001:db8:200::/64",
gateway_ip="2001:db8:200::1",
ipv6_ra_mode="slaac",
ipv6_address_mode="slaac")
self.manager.show_step(3)
router = os_conn.create_router('test_router', tenant=tenant)
self.manager.show_step(4)
os_conn.add_router_interface(
router_id=router["id"],
subnet_id=subnet_1_v4["id"])
os_conn.add_router_interface(
router_id=router["id"],
subnet_id=subnet_1_v6["id"])
os_conn.add_router_interface(
router_id=router["id"],
subnet_id=subnet_2_v4["id"])
os_conn.add_router_interface(
router_id=router["id"],
subnet_id=subnet_2_v6["id"])
self.manager.show_step(5)
security_group = os_conn.create_sec_group_for_ssh()
self.manager.show_step(6)
instance1 = os_conn.create_server(
name='instance1',
security_groups=[security_group],
net_id=net1['id'],
)
instance2 = os_conn.create_server(
name='instance2',
security_groups=[security_group],
net_id=net2['id'],
)
self.manager.show_step(7)
self.manager.show_step(8)
floating_ip = os_conn.assign_floating_ip(instance1)
floating_ip2 = os_conn.assign_floating_ip(instance2)
self.manager.show_step(9)
instance1_ipv6 = [
addr['addr'] for addr in instance1.addresses[net1['name']]
if addr['version'] == 6].pop()
instance2_ipv6 = [
addr['addr'] for addr in instance2.addresses[net2['name']]
if addr['version'] == 6].pop()
logger.info(
'\ninstance1:\n'
'\tFloatingIP: {ip!s}\n'
'\tIPv6 address: {ipv6!s}'.format(
ip=floating_ip.ip,
ipv6=instance1_ipv6))
logger.info(
'\ninstance2:\n'
'\tFloatingIP: {ip!s}\n'
'\tIPv6 address: {ipv6!s}'.format(
ip=floating_ip2.ip,
ipv6=instance2_ipv6))
with fuel_web.get_ssh_for_node("slave-01") as remote:
def ssh_ready(vm_host):
try:
os_conn.execute_through_host(
ssh=remote,
vm_host=vm_host,
cmd="ls -la",
creds=("cirros", "cubswin:)")
)
return True
except ChannelException:
return False
for vm_host, hostname in (
(floating_ip.ip, instance1),
(floating_ip2.ip, instance2)
):
try:
wait(lambda: ssh_ready(vm_host), timeout=120)
except TimeoutError:
raise TimeoutError(
'ssh is not ready on host '
'{hostname:s} ({ip:s}) '
'at timeout 120s'.format(
hostname=hostname, ip=vm_host))
res = os_conn.execute_through_host(
ssh=remote,
vm_host=floating_ip.ip,
cmd="{ping:s} -q "
"-c{count:d} "
"-w{deadline:d} "
"-s{packetsize:d} "
"{dst_address:s}".format(
ping='ping6',
count=10,
deadline=20,
packetsize=1452,
dst_address=instance2_ipv6),
creds=("cirros", "cubswin:)")
)
logger.info('Ping results: \n\t{res:s}'.format(res=res['stdout']))
assert res['exit_code'] == 0, (
'Ping failed with error code: {code:d}\n'
'\tSTDOUT: {stdout:s}\n'
'\tSTDERR: {stderr:s}'.format(
code=res['exit_code'],
stdout=res['stdout'],
stderr=res['stderr']))
self.env.make_snapshot('deploy_neutron_ip_v6')

View File

@ -0,0 +1,92 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from fuelweb_test import settings
# pylint: disable=no-member
@pytest.mark.get_logs
@pytest.mark.fail_snapshot
@pytest.mark.thread_1
class TestHAOneControllerNeutronRestart(object):
cluster_config = {
'name': "TestHAOneControllerNeutronRestart",
'mode': settings.DEPLOYMENT_MODE,
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute']
}
}
@pytest.mark.need_ready_cluster
@pytest.mark.ha_one_controller_neutron_warm_restart
def test_ha_one_controller_neutron_warm_restart(self):
"""Warm restart for ha one controller environment
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
7. Warm restart
8. Wait for HA services to be ready
9. Wait for OS services to be ready
10. Wait for Galera is up
11. Verify firewall rules
12. Run network verification
13. Run OSTF
Duration 30m
"""
cluster_id = self._storage['cluster_id']
fuel_web = self.manager.fuel_web
self.manager.show_step(1)
self.manager.show_step(2)
self.manager.show_step(3)
self.manager.show_step(4)
self.manager.show_step(5)
fuel_web.verify_network(cluster_id)
self.manager.show_step(6)
fuel_web.run_ostf(cluster_id=cluster_id)
self.manager.show_step(7)
fuel_web.warm_restart_nodes(
self.env.d_env.get_nodes(name__in=['slave-01', 'slave-02']))
self.manager.show_step(8)
fuel_web.assert_ha_services_ready(cluster_id)
self.manager.show_step(9)
fuel_web.assert_os_services_ready(cluster_id)
self.manager.show_step(10)
fuel_web.wait_mysql_galera_is_up(['slave-01'])
self.manager.show_step(11)
fuel_web.security.verify_firewall(cluster_id)
self.manager.show_step(12)
fuel_web.verify_network(cluster_id)
self.manager.show_step(13)
fuel_web.run_ostf(cluster_id=cluster_id)

View File

@ -13,6 +13,7 @@
# under the License.
from __future__ import division
from warnings import warn
import datetime
import random
@ -23,6 +24,7 @@ from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from proboscis import test
from proboscis import SkipTest
# pylint: disable=import-error
# noinspection PyUnresolvedReferences
from six.moves.urllib.request import urlopen
@ -37,16 +39,23 @@ from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_1"])
@test(enabled=False, groups=["thread_1"])
class TestAdminNode(TestBasic):
"""TestAdminNode.""" # TODO documentation
"""TestAdminNode.
@test(depends_on=[SetupEnvironment.setup_master],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_admin_node
""" # TODO documentation
@test(enabled=False, depends_on=[SetupEnvironment.setup_master],
groups=["test_cobbler_alive"])
@log_snapshot_after_test
def test_cobbler_alive(self):
"""Test current installation has correctly setup cobbler
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_admin_node.TestAdminNode
API and cobbler HTTP server are alive
Scenario:
@ -56,6 +65,10 @@ class TestAdminNode(TestBasic):
Duration 1m
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("empty")
wait(
lambda: http(host=self.env.get_admin_node_ip(), url='/cobbler_api',
@ -72,12 +85,15 @@ class TestAdminNode(TestBasic):
# raises an error if something isn't right
server.login(username, password)
@test(depends_on=[SetupEnvironment.setup_master],
@test(enabled=False, depends_on=[SetupEnvironment.setup_master],
groups=["test_astuted_alive"])
@log_snapshot_after_test
def test_astuted_alive(self):
"""Test astute master and worker processes are alive on master node
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_admin_node.TestAdminNode
Scenario:
1. Revert snapshot "empty"
2. Search for master and child processes
@ -85,6 +101,10 @@ class TestAdminNode(TestBasic):
Duration 1m
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("empty")
ps_output = self.ssh_manager.execute(
self.ssh_manager.admin_ip, 'ps ax')['stdout']

View File

@ -13,13 +13,14 @@
# under the License.
from __future__ import division
from warnings import warn
import re
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from proboscis import test
from proboscis import SkipTest
from fuelweb_test.helpers.decorators import check_fuel_snapshot
from fuelweb_test.helpers.decorators import log_snapshot_after_test
@ -594,9 +595,13 @@ class FloatingIPs(TestBasic):
self.env.make_snapshot("deploy_floating_ips")
@test(groups=["thread_1"])
@test(enabled=False, groups=["thread_1"])
class NodeMultipleInterfaces(TestBasic):
"""NodeMultipleInterfaces.""" # TODO documentation
"""NodeMultipleInterfaces.
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_l2_network_config
""" # TODO documentation
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_node_multiple_interfaces"])
@ -604,6 +609,9 @@ class NodeMultipleInterfaces(TestBasic):
def deploy_node_multiple_interfaces(self):
"""Deploy cluster with networks allocated on different interfaces
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_l2_network_config.TestL2NetworkConfig
Scenario:
1. Create cluster in Ha mode
2. Add 1 node with controller role
@ -618,6 +626,10 @@ class NodeMultipleInterfaces(TestBasic):
Snapshot: deploy_node_multiple_interfaces
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready_with_3_slaves")
interfaces_dict = {
@ -650,16 +662,24 @@ class NodeMultipleInterfaces(TestBasic):
self.env.make_snapshot("deploy_node_multiple_interfaces", is_make=True)
@test(groups=["thread_1"])
@test(enabled=False, groups=["thread_1"])
class NodeDiskSizes(TestBasic):
"""NodeDiskSizes.""" # TODO documentation
"""NodeDiskSizes.
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_discovery_slave
""" # TODO documentation
@test(enabled=False, depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["check_nodes_notifications"])
@log_snapshot_after_test
def check_nodes_notifications(self):
"""Verify nailgun notifications for discovered nodes
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_discovery_slave.TestNodeDiskSizes
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Verify hard drive sizes for discovered nodes in /api/nodes
@ -668,6 +688,10 @@ class NodeDiskSizes(TestBasic):
Duration 5m
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready_with_3_slaves")
# assert /api/nodes
@ -706,18 +730,25 @@ class NodeDiskSizes(TestBasic):
disk['size'], NODE_VOLUME_SIZE * 1024 - 500
))
@test(depends_on=[NodeMultipleInterfaces.deploy_node_multiple_interfaces],
@test(enabled=False,
depends_on=[NodeMultipleInterfaces.deploy_node_multiple_interfaces],
groups=["check_nodes_disks"])
@log_snapshot_after_test
def check_nodes_disks(self):
"""Verify hard drive sizes for deployed nodes
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_discovery_slave.TestNodeDiskSizes
Scenario:
1. Revert snapshot "deploy_node_multiple_interfaces"
2. Verify hard drive sizes for deployed nodes
Duration 15m
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("deploy_node_multiple_interfaces")
@ -759,16 +790,25 @@ class NodeDiskSizes(TestBasic):
))
@test(groups=["thread_1"])
@test(enabled=False, groups=["thread_1"])
class MultinicBootstrap(TestBasic):
"""MultinicBootstrap.""" # TODO documentation
"""MultinicBootstrap.
@test(depends_on=[SetupEnvironment.prepare_release],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_discovery_slave
""" # TODO documentation
@test(enabled=False,
depends_on=[SetupEnvironment.prepare_release],
groups=["multinic_bootstrap_booting"])
@log_snapshot_after_test
def multinic_bootstrap_booting(self):
"""Verify slaves booting with blocked mac address
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_discovery_slave.TestMultinicBootstrap
Scenario:
1. Revert snapshot "ready"
2. Block traffic for first slave node (by mac)
@ -778,6 +818,10 @@ class MultinicBootstrap(TestBasic):
Duration 2m
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready")
slave = self.env.d_env.nodes().slaves[0]
@ -798,9 +842,14 @@ class MultinicBootstrap(TestBasic):
Ebtables.restore_mac(mac)
@test(groups=["thread_1"])
@test(enabled=False, groups=["thread_1"])
class UntaggedNetworksNegative(TestBasic):
"""UntaggedNetworksNegative.""" # TODO documentation
"""UntaggedNetworksNegative.
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_l2_network_config.TestL2NetworkConfig
""" # TODO documentation
@test(
depends_on=[SetupEnvironment.prepare_slaves_3],
@ -810,6 +859,9 @@ class UntaggedNetworksNegative(TestBasic):
def untagged_networks_negative(self):
"""Verify network verification fails with untagged network on eth0
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_l2_network_config.TestL2NetworkConfig
Scenario:
1. Create cluster in ha mode
2. Add 1 node with controller role
@ -822,6 +874,10 @@ class UntaggedNetworksNegative(TestBasic):
Duration 30m
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready_with_3_slaves")
vlan_turn_off = {'vlan_start': None}

View File

@ -11,9 +11,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from warnings import warn
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import SkipTest
from fuelweb_test.helpers.common import Common
from fuelweb_test.helpers.decorators import log_snapshot_after_test
@ -25,17 +27,26 @@ from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["thread_1", "neutron"])
@test(enabled=False, groups=["thread_1", "neutron"])
class NeutronVlan(TestBasic):
"""NeutronVlan.""" # TODO documentation
"""NeutronVlan.
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron
""" # TODO documentation
@test(enabled=False,
depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_neutron_vlan", "ha_one_controller_neutron_vlan",
"deployment", "nova", "nova-compute"])
@log_snapshot_after_test
def deploy_neutron_vlan(self):
"""Deploy cluster in ha mode with 1 controller and Neutron VLAN
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron.TestNeutronVlan
Scenario:
1. Create cluster
2. Add 1 node with controller role
@ -48,6 +59,10 @@ class NeutronVlan(TestBasic):
Snapshot deploy_neutron_vlan
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
@ -82,16 +97,26 @@ class NeutronVlan(TestBasic):
self.env.make_snapshot("deploy_neutron_vlan", is_make=True)
@test(groups=["neutron", "ha", "ha_neutron", "classic_provisioning"])
@test(enabled=False,
groups=["neutron", "ha", "ha_neutron", "classic_provisioning"])
class NeutronGreHa(TestBasic):
"""NeutronGreHa.""" # TODO documentation
"""NeutronGreHa.
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron
""" # TODO documentation
@test(enabled=False,
depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_neutron_gre_ha", "ha_neutron_gre"])
@log_snapshot_after_test
def deploy_neutron_gre_ha(self):
"""Deploy cluster in HA mode with Neutron GRE (DEPRECATED)
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron.TestNeutronTunHa
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
@ -105,6 +130,10 @@ class NeutronGreHa(TestBasic):
Snapshot deploy_neutron_gre_ha
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(
@ -147,16 +176,25 @@ class NeutronGreHa(TestBasic):
self.env.make_snapshot("deploy_neutron_gre_ha")
@test(groups=["neutron", "ha", "ha_neutron"])
@test(enabled=False, groups=["neutron", "ha", "ha_neutron"])
class NeutronVlanHa(TestBasic):
"""NeutronVlanHa.""" # TODO documentation
"""NeutronVlanHa.
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron
""" # TODO documentation
@test(enabled=False,
depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_neutron_vlan_ha", "neutron_vlan_ha"])
@log_snapshot_after_test
def deploy_neutron_vlan_ha(self):
"""Deploy cluster in HA mode with Neutron VLAN
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron.TestNeutronVlanHa
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
@ -170,6 +208,10 @@ class NeutronVlanHa(TestBasic):
Snapshot deploy_neutron_vlan_ha
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready_with_5_slaves")
cluster_id = self.fuel_web.create_cluster(

View File

@ -11,9 +11,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from warnings import warn
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import SkipTest
from paramiko import ChannelException
from devops.helpers.helpers import wait
@ -25,17 +27,26 @@ from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test import logger
@test(groups=["thread_1", "neutron"])
@test(enabled=False, groups=["thread_1", "neutron"])
class TestNeutronIPv6(TestBasic):
"""NeutronIPv6."""
"""NeutronIPv6.
@test(depends_on_groups=['deploy_neutron_vlan'],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron_ipv6
"""
@test(enabled=False,
depends_on_groups=['deploy_neutron_vlan'],
groups=['deploy_neutron_ip_v6',
"nova", "nova-compute", "neutron_ipv6"])
@log_snapshot_after_test
def deploy_neutron_ip_v6(self):
"""Check IPv6 only functionality for Neutron VLAN
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_neutron_ipv6.TestNeutronIPv6
Scenario:
1. Revert deploy_neutron_vlan snapshot
2. Create two dualstack network IPv6 subnets
@ -54,6 +65,10 @@ class TestNeutronIPv6(TestBasic):
Snapshot deploy_neutron_ip_v6
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.show_step(1, initialize=True)
self.env.revert_snapshot("deploy_neutron_vlan")

View File

@ -12,9 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import time
from warnings import warn
from devops.helpers.helpers import wait
from proboscis import test
from proboscis import SkipTest
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import logger
@ -197,15 +199,25 @@ class CephRestart(TestBasic):
self.env.make_snapshot("ceph_ha_restart")
@test(groups=["thread_1"])
@test(enabled=False, groups=["thread_1"])
class HAOneControllerNeutronRestart(TestBasic):
"""HAOneControllerNeutronRestart
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_restart
"""
@test(enabled=False,
depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ha_one_controller_neutron_warm_restart"])
@log_snapshot_after_test
def ha_one_controller_neutron_warm_restart(self):
"""Warm restart for ha one controller environment
Test disabled and move to fuel_tests suite:
fuel_tests.test.test_restart.TestHAOneControllerNeutronRestart
Scenario:
1. Create cluster
2. Add 1 node with controller role
@ -224,6 +236,10 @@ class HAOneControllerNeutronRestart(TestBasic):
Duration 30m
"""
# pylint: disable=W0101
warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
raise SkipTest("Test disabled and move to fuel_tests suite")
self.env.revert_snapshot("ready_with_3_slaves")
self.show_step(1, initialize=True)

View File

@ -1,3 +1,3 @@
system_tests:
tags:
- nova-compute
- nova_compute

View File

@ -1,3 +1,3 @@
system_tests:
tags:
- nova-compute
- nova_compute

View File

@ -1,3 +1,3 @@
system_tests:
tags:
- nova-compute
- nova_compute

View File

@ -1,3 +1,3 @@
system_tests:
tags:
- nova-compute
- nova_compute

View File

@ -1,3 +1,10 @@
[pytest]
markers =
need_ready_cluster: Create and deploy cluster for test
need_ready_slaves: Create environment with bootstraped slaves
need_ready_release: Setup master and prepare releses
need_ready_master: Setup master only
get_logs: Collect logs after test finish
fail_snapshot: Make environment snapshot if test failed
addopts = -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml
testpaths = fuel_tests