Add timeout_msg values in fuelweb_test/tests

- removed redundant try/except/assert

Change-Id: Ic83e5e3333f7f8ad14f2e4ba7779b174b2585802
This commit is contained in:
Anton Studenov 2016-07-19 16:06:59 +03:00 committed by Alexey Stepanov
parent 8067d572fd
commit ed18a79a06
26 changed files with 220 additions and 378 deletions

View File

@ -336,7 +336,9 @@ class ZabbixPlugin(TestBasic):
'/var/log/zabbix/zabbix_server.log | '
'grep "Status Events"')
wait(lambda: remote.execute(cmd)['exit_code'] == 0)
wait(lambda: remote.execute(cmd)['exit_code'] == 0,
timeout_msg='SNMP heartbeat status not found '
' in /var/log/zabbix/zabbix_server.log')
self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
@ -428,7 +430,8 @@ class ZabbixPlugin(TestBasic):
zabbix_web.login()
wait(lambda: self.check_event_message(
zabbix_web, 'emc', 'SNMPtrigger Critical'))
zabbix_web, 'emc', 'SNMPtrigger Critical'),
timeout_msg='SNMPtrigger Critical event not found in Zabbix')
self.env.make_snapshot("deploy_zabbix_snmp_emc_ha")
@ -519,7 +522,8 @@ class ZabbixPlugin(TestBasic):
zabbix_web.login()
wait(lambda: self.check_event_message(
zabbix_web, 'extreme', 'Power Supply Failed'))
zabbix_web, 'extreme', 'Power Supply Failed'),
timeout_msg='Power Supply Failed event not found in Zabbix')
self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")

View File

@ -73,7 +73,8 @@ class TestAdminNode(TestBasic):
wait(
lambda: http(host=self.env.get_admin_node_ip(), url='/cobbler_api',
waited_code=501),
timeout=60
timeout=60,
timeout_msg='Cobler WEB API is not alive'
)
server = ServerProxy(
'http://%s/cobbler_api' % self.env.get_admin_node_ip())

View File

@ -14,7 +14,6 @@
import os
from devops.error import TimeoutError
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
@ -313,14 +312,10 @@ class BackupRestoreHA(NeutronTunHaBase):
self.fuel_web.client.delete_cluster(cluster_id)
try:
wait((lambda: len(
self.fuel_web.client.list_nodes()) == number_of_nodes),
timeout=5 * 60)
except TimeoutError:
assert_true(len(
self.fuel_web.client.list_nodes()) == number_of_nodes,
'Nodes are not discovered in timeout 5 *60')
wait((lambda: len(
self.fuel_web.client.list_nodes()) == number_of_nodes),
timeout=5 * 60,
timeout_msg='Nodes are not discovered in timeout')
cl = CommandLine()
release_id = self.fuel_web.get_releases_list_for_os(

View File

@ -656,7 +656,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
srv_host = os.get_srv_host_name(srv)
logger.info("Server is on host {:s}".format(srv_host))
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='new WM ssh port ping timeout')
def ssh_ready(remote, ip, creds):
try:
@ -681,7 +682,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
new_srv = os.migrate_server(srv, avail_hosts[0], timeout=200)
logger.info("Check cluster and server state after migration")
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='WM ssh port ping timeout after migration')
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
md5after = os.get_md5sum(
@ -752,7 +754,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
os.attach_volume(vol, srv)
self.show_step(14)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='new WM ssh port ping timeout')
logger.info("Create filesystem and mount volume")
with self.fuel_web.get_ssh_for_node("slave-01") as remote:
@ -774,7 +777,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
new_srv = os.migrate_server(srv, avail_hosts[0], timeout=120)
logger.info("Check cluster and server state after migration")
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='WM ssh port ping timeout after migration')
self.show_step(16)
logger.info("Mount volume after migration")

View File

@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from devops.error import TimeoutError
from devops.helpers.helpers import wait
from proboscis import test
from proboscis.asserts import assert_equal
@ -78,12 +77,10 @@ class CommandLineMinimal(TestBasic):
cluster_id)
self.ssh_manager.execute_on_remote(admin_ip, cmd)
cmd = 'fuel task | grep deployment | awk \'{print $9}\''
try:
wait(lambda: int(
self.ssh_manager.execute_on_remote(
admin_ip, cmd)['stdout'][0].rstrip()) == 100, timeout=120)
except TimeoutError:
raise TimeoutError("hiera manifest was not applied")
wait(lambda: int(
self.ssh_manager.execute_on_remote(
admin_ip, cmd)['stdout'][0].rstrip()) == 100, timeout=120,
timeout_msg='hiera manifest was not applied')
cmd = 'ssh -q node-{0} "hiera role"'.format(node_id)
role = self.ssh_manager.execute_on_remote(
admin_ip, cmd)['stdout'][0].rstrip()
@ -321,20 +318,15 @@ class CommandLineTest(test_cli_base.CommandLine):
"""
self.env.revert_snapshot("cli_selected_nodes_deploy")
node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[2])['id']
node = self.env.d_env.nodes().slaves[2]
node_id = self.fuel_web.get_nailgun_node_by_devops_node(node)['id']
assert_true(check_cobbler_node_exists(self.ssh_manager.admin_ip,
node_id),
"node-{0} is not found".format(node_id))
self.env.d_env.nodes().slaves[2].destroy()
try:
wait(
lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().
slaves[2])['online'], timeout=60 * 6)
except TimeoutError:
raise
node.destroy()
self.fuel_web.wait_node_is_offline(node, timeout=60 * 6)
admin_ip = self.ssh_manager.admin_ip
cmd = 'fuel node --node-id {0} --delete-from-db'.format(node_id)
res = self.ssh_manager.execute_on_remote(admin_ip, cmd)
@ -344,16 +336,15 @@ class CommandLineTest(test_cli_base.CommandLine):
"deleted from database".format(node_id))
cmd = "fuel node | awk '{{print $1}}' | grep -w '{0}'".format(node_id)
try:
wait(
lambda: not self.ssh_manager.execute_on_remote(
admin_ip,
cmd,
raise_on_assert=False)['exit_code'] == 0, timeout=60 * 4)
except TimeoutError:
raise TimeoutError(
"After deletion node-{0} is found in fuel list".format(
node_id))
wait(
lambda: not self.ssh_manager.execute_on_remote(
admin_ip,
cmd,
raise_on_assert=False)['exit_code'] == 0, timeout=60 * 4,
timeout_msg='After deletion node-{0} is found in fuel list'
''.format(node_id))
is_cobbler_node_exists = check_cobbler_node_exists(
self.ssh_manager.admin_ip, node_id)
@ -399,16 +390,13 @@ class CommandLineTest(test_cli_base.CommandLine):
cmd='fuel --env {0} env delete --force'.format(cluster_id)
)
try:
wait(lambda:
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="fuel env | awk '{print $1}' | tail -n 1 | "
"grep '^.$'",
raise_on_assert=False)['exit_code'] == 1, timeout=60 * 10)
except TimeoutError:
raise TimeoutError(
"cluster {0} was not deleted".format(cluster_id))
wait(lambda:
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="fuel env | awk '{print $1}' | tail -n 1 | "
"grep '^.$'",
raise_on_assert=False)['exit_code'] == 1, timeout=60 * 10,
timeout_msg='cluster {0} was not deleted'.format(cluster_id))
assert_false(
check_cluster_presence(cluster_id, self.env.postgres_actions),

View File

@ -18,7 +18,6 @@ import time
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from devops.error import TimeoutError
from devops.helpers.helpers import wait
# pylint: disable=import-error
# noinspection PyUnresolvedReferences
@ -109,17 +108,15 @@ class CommandLine(TestBasic):
logger.info('Wait {timeout} seconds for task: {task}'
.format(timeout=timeout, task=task))
start = time.time()
try:
wait(
lambda: (self.get_task(task['id'])['status'] not in
('pending', 'running')),
interval=interval,
timeout=timeout
)
except TimeoutError:
raise TimeoutError(
"Waiting timeout {timeout} sec was reached for task: {task}"
.format(task=task["name"], timeout=timeout))
wait(
lambda: (self.get_task(task['id'])['status'] not in
('pending', 'running')),
interval=interval,
timeout=timeout,
timeout_msg='Waiting timeout {timeout} sec was reached '
'for task: {task}'.format(task=task["name"],
timeout=timeout)
)
took = time.time() - start
task = self.get_task(task['id'])
logger.info('Task finished in {took} seconds with the result: {task}'

View File

@ -16,7 +16,6 @@ from __future__ import division
from warnings import warn
import re
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from proboscis import test
@ -137,10 +136,7 @@ class HAOneControllerNeutron(HAOneControllerNeutronBase):
assert_true(
len(nodes) == 1, "Verify 1 node has pending deletion status"
)
wait(
lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=10 * 60
)
self.fuel_web.wait_node_is_discovered(nodes[0])
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["ha_one_controller_neutron_blocked_vlan"])
@ -322,13 +318,8 @@ class HAOneControllerNeutron(HAOneControllerNeutronBase):
assert_true(
len(nodes) == 2, "Verify 2 node has pending deletion status"
)
wait(
lambda:
self.fuel_web.is_node_discovered(nodes[0]) and
self.fuel_web.is_node_discovered(nodes[1]),
timeout=10 * 60,
interval=15
)
self.fuel_web.wait_node_is_discovered(nodes[0])
self.fuel_web.wait_node_is_discovered(nodes[1])
@test(groups=["multirole"])

View File

@ -645,20 +645,20 @@ class TestMultipleClusterNets(TestNetworkTemplatesBase):
self.show_step(3)
logger.info('Wait five nodes online for 900 seconds..')
wait(lambda: len(self.fuel_web.client.list_nodes()) == 5,
timeout=15 * 60)
timeout=15 * 60,
timeout_msg='Timeout while waiting five nodes '
'to become online')
logger.info('Wait all nodes from custom nodegroup become '
'in error state..')
# check all custom in error state
for slave in custom_nodes:
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
slave)['status'] == 'error', timeout=15 * 60)
logger.info(
'Node {} changed state to error'.format(slave.name))
except TimeoutError:
raise TimeoutError('Node {} not changed state to '
'error'.format(slave.name))
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
slave)['status'] == 'error', timeout=15 * 60,
timeout_msg='Node {} not changed state to '
'error'.format(slave.name))
logger.info(
'Node {} changed state to error'.format(slave.name))
self.show_step(4)
logger.info('Rebooting nodes from custom nodegroup..')
@ -672,7 +672,7 @@ class TestMultipleClusterNets(TestNetworkTemplatesBase):
get_nailgun_node_by_devops_node(slave)['online']
for slave in custom_nodes),
timeout=10 * 60)
assert 'Some nodes online'
raise AssertionError('Some nodes online')
except TimeoutError:
logger.info('Nodes are offline')
@ -715,7 +715,9 @@ class TestMultipleClusterNets(TestNetworkTemplatesBase):
logger.info('Waiting for all nodes online for 900 seconds...')
wait(lambda: all(n['online'] for n in
self.fuel_web.client.list_cluster_nodes(cluster_id)),
timeout=15 * 60)
timeout=15 * 60,
timeout_msg='Timeout while waiting nodes to become online '
'after reset')
self.show_step(4)
custom_nodegroup = [ng for ng in self.fuel_web.client.get_nodegroups()
@ -726,15 +728,13 @@ class TestMultipleClusterNets(TestNetworkTemplatesBase):
logger.info('Wait all nodes from custom nodegroup become '
'in error state..')
for slave in custom_nodes:
try:
# pylint: disable=undefined-loop-variable
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
slave)['status'] == 'error', timeout=60 * 5)
# pylint: enable=undefined-loop-variable
logger.info('Node {} is in "error" state'.format(slave.name))
except TimeoutError:
raise TimeoutError('Node {} status wasn\'t changed '
'to "error"!'.format(slave.name))
# pylint: disable=undefined-loop-variable
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
slave)['status'] == 'error', timeout=60 * 5,
timeout_msg='Node {} status wasn\'t changed '
'to "error"!'.format(slave.name))
# pylint: enable=undefined-loop-variable
logger.info('Node {} is in "error" state'.format(slave.name))
self.show_step(6)
new_nodegroup = self.fuel_web.client.create_nodegroup(
@ -764,13 +764,10 @@ class TestMultipleClusterNets(TestNetworkTemplatesBase):
logger.info('Wait all nodes from custom nodegroup become '
'in discover state..')
for slave in custom_nodes:
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
slave)['status'] == 'discover', timeout=60 * 5)
logger.info('Node {} is in "discover" state'.format(
slave.name))
except TimeoutError:
raise TimeoutError('Node {} status wasn\'t changed '
'to "discover"!'.format(slave.name))
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
slave)['status'] == 'discover', timeout=60 * 5,
timeout_msg='Node {} status wasn\'t changed '
'to "discover"!'.format(slave.name))
logger.info('Node {} is in "discover" state'.format(slave.name))
self.env.make_snapshot("delete_custom_nodegroup")

View File

@ -19,7 +19,6 @@ from proboscis import SkipTest
from paramiko import ChannelException
from devops.helpers.helpers import wait
from devops.error import TimeoutError
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers.decorators import log_snapshot_after_test
@ -204,14 +203,11 @@ class TestNeutronIPv6(TestBasic):
(floating_ip.ip, instance1),
(floating_ip2.ip, instance2)
):
try:
wait(lambda: ssh_ready(vm_host), timeout=120)
except TimeoutError:
raise TimeoutError(
'ssh is not ready on host '
'{hostname:s} ({ip:s}) '
'at timeout 120s'.format(
hostname=hostname, ip=vm_host))
wait(lambda: ssh_ready(vm_host), timeout=120,
timeout_msg='ssh is not ready on host '
'{hostname:s} ({ip:s}) '
'at timeout 120s'.format(hostname=hostname,
ip=vm_host))
res = os_conn.execute_through_host(
ssh=remote,

View File

@ -569,11 +569,13 @@ class UbuntuBootstrap(base_test_case.TestBasic):
self.fuel_web.client.delete_cluster(cluster_id)
# wait nodes go to reboot
wait(lambda: not self.fuel_web.client.list_nodes(), timeout=10 * 60)
wait(lambda: not self.fuel_web.client.list_nodes(), timeout=10 * 60,
timeout_msg='Timeout while waiting nodes to become offline')
# wait for nodes to appear after bootstrap
wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
timeout=10 * 60)
timeout=10 * 60,
timeout_msg='Timeout while waiting nodes to become online')
nodes = self.env.d_env.get_nodes(
name__in=["slave-01", "slave-02", "slave-03"])
@ -619,7 +621,8 @@ class UbuntuBootstrap(base_test_case.TestBasic):
# wait for nodes to appear after bootstrap
wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
timeout=10 * 60)
timeout=10 * 60,
timeout_msg='Timeout while waiting nodes to become online')
self.fuel_web.verify_network(cluster_id)
node = self.fuel_web.get_nailgun_node_by_name("slave-03")

View File

@ -58,7 +58,9 @@ class VcenterDeploy(TestBasic):
# Wait for launch VMs
for hypervisor in hypervisors_list:
wait(lambda: os_conn.get_hypervisor_vms_count(hypervisor) != 0,
timeout=300)
timeout=300,
timeout_msg='Timeout while waiting VM to be listed '
'in hypervisor')
def configure_nova_vlan(self, cluster_id):
# Configure network interfaces.

View File

@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from devops.error import TimeoutError
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from proboscis import asserts
@ -223,11 +222,9 @@ class RhFailoverGroup(ExtraComputesBase):
neutron=True, label=net_label)
vm_floating_ip = os_conn.assign_floating_ip(vm)
logger.info('Trying to get vm via tcp.')
try:
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip))
self.show_step(4)
self.warm_restart_nodes([target_node])
@ -243,11 +240,9 @@ class RhFailoverGroup(ExtraComputesBase):
os_conn.get_instance_detail(vm).status))
logger.info('Spawned VM is ACTIVE. Trying to '
'access it via ip: {0}'.format(vm_floating_ip.ip))
try:
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
logger.info('VM is accessible. Deleting it.')
os_conn.delete_instance(vm)
os_conn.verify_srv_deleted(vm)
@ -305,11 +300,9 @@ class RhFailoverGroup(ExtraComputesBase):
neutron=True, label=net_label)
vm_floating_ip = os_conn.assign_floating_ip(vm)
logger.info('Trying to get vm via tcp.')
try:
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip))
self.show_step(4)
target_node.destroy()
@ -331,11 +324,9 @@ class RhFailoverGroup(ExtraComputesBase):
os_conn.get_instance_detail(vm).status))
logger.info('Spawned VM is ACTIVE. Trying to '
'access it via ip: {0}'.format(vm_floating_ip.ip))
try:
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
logger.info('VM is accessible. Deleting it.')
os_conn.delete_instance(vm)
os_conn.verify_srv_deleted(vm)

View File

@ -23,7 +23,6 @@ from proboscis.asserts import assert_true
from six.moves.urllib.request import urlopen
# pylint: enable=import-error
from devops.error import TimeoutError
from devops.helpers.helpers import wait
from fuelweb_test import logger
from fuelweb_test import settings
@ -231,9 +230,8 @@ class PatchingTests(TestBasic):
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster(cluster_id)
wait(
lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)
self.fuel_web.wait_node_is_discovered(nodes[0])
# sanity set isn't running due to LP1457515
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['smoke', 'ha'])
@ -390,9 +388,8 @@ class PatchingMasterTests(TestBasic):
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster(cluster_id)
wait(
lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)
self.fuel_web.wait_node_is_discovered(nodes[0])
# sanity set isn't running due to LP1457515
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['smoke', 'ha'])
@ -407,14 +404,11 @@ class PatchingMasterTests(TestBasic):
self.fuel_web.wait_nodes_get_online_state(
active_nodes, timeout=10 * 60)
self.fuel_web.client.delete_cluster(cluster_id)
try:
wait((lambda: len(
self.fuel_web.client.list_nodes()) == number_of_nodes),
timeout=5 * 60)
except TimeoutError:
assert_true(len(
self.fuel_web.client.list_nodes()) == number_of_nodes,
'Nodes are not discovered in timeout 5 *60')
wait((lambda: len(
self.fuel_web.client.list_nodes()) == number_of_nodes),
timeout=5 * 60,
timeout_msg='Timeout: Nodes are not discovered')
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3])
@test(groups=["patching_master"],

View File

@ -13,9 +13,7 @@
# under the License.
from proboscis import test
from proboscis.asserts import assert_false, assert_equal
from devops.error import TimeoutError
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
from fuelweb_test import logger
from fuelweb_test.helpers.decorators import log_snapshot_after_test
@ -75,15 +73,7 @@ class HaScaleGroup2(TestBasic):
primary_controller)['id']
primary_controller.destroy()
self.show_step(5)
try:
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
primary_controller)['online'], timeout=30 * 8)
except TimeoutError:
assert_false(
self.fuel_web.get_nailgun_node_by_devops_node(
primary_controller)['online'],
'Node {0} has not become '
'offline after warm shutdown'.format(primary_controller.name))
self.fuel_web.wait_node_is_offline(primary_controller)
self.show_step(6)
self.fuel_web.delete_node(primary_controller_id)
self.fuel_web.wait_task_success('deployment')

View File

@ -142,7 +142,8 @@ class TestNessus(NeutronTunHaBase):
check_scan_complete = self.get_check_scan_complete(
nessus_client, scan_id, history_id)
wait(check_scan_complete, interval=10, timeout=60 * 30)
wait(check_scan_complete, interval=10, timeout=60 * 30,
timeout_msg='Timeout: nessus scan status != completed')
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
nessus_client.download_scan_result(
@ -199,7 +200,8 @@ class TestNessus(NeutronTunHaBase):
check_scan_complete = self.get_check_scan_complete(
nessus_client, scan_id, history_id)
wait(check_scan_complete, interval=10, timeout=60 * 50)
wait(check_scan_complete, interval=10, timeout=60 * 50,
timeout_msg='Timeout: nessus scan status != completed')
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
nessus_client.download_scan_result(
@ -260,7 +262,8 @@ class TestNessus(NeutronTunHaBase):
check_scan_complete = self.get_check_scan_complete(
nessus_client, scan_id, history_id)
wait(check_scan_complete, interval=10, timeout=60 * 30)
wait(check_scan_complete, interval=10, timeout=60 * 30,
timeout_msg='Timeout: nessus scan status != completed')
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
nessus_client.download_scan_result(

View File

@ -16,7 +16,6 @@ import os
from proboscis import test
from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from fuelweb_test.helpers.checkers import check_plugin_path_env
from fuelweb_test.helpers import utils
@ -149,8 +148,8 @@ class SeparateHorizonFailover(TestBasic):
# destroy one horizon node
horizon_node = self.env.d_env.nodes().slaves[3]
horizon_node.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
horizon_node)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_offline(horizon_node)
self.fuel_web.assert_os_services_ready(cluster_id)
self.fuel_web.run_ostf(
@ -175,8 +174,7 @@ class SeparateHorizonFailover(TestBasic):
# restart one horizon node
horizon_node = self.env.d_env.nodes().slaves[3]
self.fuel_web.warm_restart_nodes([horizon_node])
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
horizon_node)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_online(horizon_node)
self.fuel_web.assert_os_services_ready(cluster_id)
self.fuel_web.run_ostf(
@ -204,8 +202,7 @@ class SeparateHorizonFailover(TestBasic):
logger.debug(
"controller with primary role is {}".format(controller.name))
controller.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
controller)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_offline(controller)
self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1)
self.fuel_web.assert_os_services_ready(cluster_id, should_fail=1)
@ -247,8 +244,7 @@ class SeparateHorizonFailover(TestBasic):
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)
self.fuel_web.wait_node_is_discovered(nodes[0])
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])

View File

@ -16,7 +16,6 @@ import os
from proboscis import test
from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from fuelweb_test.helpers.checkers import check_plugin_path_env
from fuelweb_test.helpers import utils
@ -179,8 +178,8 @@ class SeparateAllFailover(TestBasic):
all_node = self.fuel_web.get_rabbit_master_node(
self.env.d_env.nodes().slaves[3].name)
all_node.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
all_node)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_offline(all_node)
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
@ -210,8 +209,7 @@ class SeparateAllFailover(TestBasic):
logger.debug(
"controller with primary role is {}".format(controller.name))
controller.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
controller)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_offline(controller)
# One test should fail: Check state of haproxy backends on controllers
self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1)

View File

@ -16,7 +16,6 @@ import os
from proboscis.asserts import assert_true
from proboscis import test
from devops.helpers.helpers import wait
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers import utils
@ -146,8 +145,8 @@ class SeparateRabbitFailover(TestBasic):
rabbit_node = self.fuel_web.get_rabbit_master_node(
self.env.d_env.nodes().slaves[3].name)
rabbit_node.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
rabbit_node)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_offline(rabbit_node)
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
@ -174,8 +173,7 @@ class SeparateRabbitFailover(TestBasic):
rabbit_node = self.fuel_web.get_rabbit_master_node(
self.env.d_env.nodes().slaves[3].name)
self.fuel_web.warm_restart_nodes([rabbit_node])
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
rabbit_node)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_online(rabbit_node)
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id, timeout=15 * 60)
@ -205,8 +203,7 @@ class SeparateRabbitFailover(TestBasic):
logger.debug(
"controller with primary role is {}".format(controller.name))
controller.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
controller)['online'], timeout=60 * 5)
self.fuel_web.wait_node_is_offline(controller)
# One test should fail: Check state of haproxy backends on controllers
self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1)
@ -266,8 +263,7 @@ class SeparateRabbitFailover(TestBasic):
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)
self.fuel_web.wait_node_is_discovered(nodes[0])
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])

View File

@ -114,17 +114,7 @@ class CICMaintenanceMode(TestBasic):
ip=_ip,
cmd="umm on")
logger.info('Wait a node-{0} offline status after turning on of'
' maintenance mode'.format(_id))
err_msg = ('Node-{0} has not become offline after'
'turning on of maintenance mode'.format(_id))
wait(
lambda: not
self.fuel_web.get_nailgun_node_by_devops_node(dregular_ctrl)
['online'], timeout=70 * 10, timeout_msg=err_msg)
logger.info('Check that node-{0} in maintenance mode after '
'switching'.format(_id))
self.fuel_web.wait_node_is_offline(dregular_ctrl)
asserts.assert_true(
checkers.check_ping(self.env.get_admin_node_ip(),
@ -140,17 +130,7 @@ class CICMaintenanceMode(TestBasic):
ip=_ip,
cmd="umm off")
logger.info('Wait a node-{0} online status'.format(_id))
err_msg = ('Node-{0} has not become online after'
'turning off maintenance mode'.format(_id))
wait(
lambda:
self.fuel_web.get_nailgun_node_by_devops_node(dregular_ctrl)
['online'], timeout=70 * 10, timeout_msg=err_msg)
# Wait until MySQL Galera is UP on some controller
self.fuel_web.wait_mysql_galera_is_up(
[dregular_ctrl.name])
self.fuel_web.wait_node_is_online(dregular_ctrl)
# Wait until Cinder services UP on a controller
self.fuel_web.wait_cinder_is_up(
@ -165,6 +145,7 @@ class CICMaintenanceMode(TestBasic):
timeout=1500)
logger.info('RabbitMQ cluster is available')
# TODO(astudenov): add timeout_msg
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
@ -173,6 +154,7 @@ class CICMaintenanceMode(TestBasic):
timeout=1500)
logger.info("Required services are running")
# TODO(astudenov): add timeout_msg
try:
self.fuel_web.run_ostf(cluster_id,
test_sets=['smoke', 'sanity', 'ha'])
@ -231,16 +213,11 @@ class CICMaintenanceMode(TestBasic):
wait(lambda:
not checkers.check_ping(self.env.get_admin_node_ip(),
_ip),
timeout=60 * 10)
timeout=60 * 10,
timeout_msg='Node {} still responds to ping'.format(
dregular_ctrl.name))
logger.info('Wait a node-{0} offline status after unexpected '
'reboot'.format(_id))
err_msg = ('Node-{0} has not become offline'
' after unexpected'.format(_id))
wait(
lambda: not
self.fuel_web.get_nailgun_node_by_devops_node(dregular_ctrl)
['online'], timeout=70 * 10, timeout_msg=err_msg)
self.fuel_web.wait_node_is_offline(dregular_ctrl)
logger.info('Check that node-{0} in maintenance mode after'
' unexpected reboot'.format(_id))
@ -262,14 +239,7 @@ class CICMaintenanceMode(TestBasic):
change_config(_ip)
logger.info('Wait a node-{0} online status'
.format(_id))
err_msg = ('Node-{0} has not become online after'
'turning off maintenance mode'.format(_id))
wait(
lambda:
self.fuel_web.get_nailgun_node_by_devops_node(dregular_ctrl)
['online'], timeout=70 * 10, timeout_msg=err_msg)
self.fuel_web.wait_node_is_online(dregular_ctrl)
# Wait until MySQL Galera is UP on some controller
self.fuel_web.wait_mysql_galera_is_up(
@ -421,7 +391,9 @@ class CICMaintenanceMode(TestBasic):
wait(lambda:
not checkers.check_ping(self.env.get_admin_node_ip(),
_ip),
timeout=60 * 10)
timeout=60 * 10,
timeout_msg='Node {} still responds to ping'.format(
dregular_ctrl.name))
# Node don't have enough time for set offline status
# after reboot --force
@ -433,10 +405,8 @@ class CICMaintenanceMode(TestBasic):
deadline=600),
"Host {0} is not reachable by ping during 600 sec"
.format(_ip))
logger.info('Wait a node-{0} online status after unexpected '
'reboot'.format(_id))
self.fuel_web.wait_nodes_get_online_state([dregular_ctrl])
self.fuel_web.wait_node_is_online(dregular_ctrl)
logger.info('Check that node-{0} not in maintenance mode after'
' unexpected reboot'.format(_id))
@ -461,6 +431,7 @@ class CICMaintenanceMode(TestBasic):
timeout=1500)
logger.info('RabbitMQ cluster is available')
# TODO(astudenov): add timeout_msg
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],

View File

@ -172,9 +172,7 @@ class TestHaFailoverBase(TestBasic):
[devops_node])
# Wait until Nailgun marked suspended controller as offline
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
devops_node)['online'],
timeout=60 * 5)
self.fuel_web.wait_node_is_offline(devops_node)
# Wait the pacemaker react to changes in online nodes
time.sleep(60)
@ -305,11 +303,9 @@ class TestHaFailoverBase(TestBasic):
# 3. Waiting for restore the IP
logger.debug("Waiting while deleted ip restores ...")
try:
wait(check_restore, timeout=60)
except TimeoutError:
logger.error("Resource has not been restored for a 60 sec")
raise
wait(check_restore, timeout=60,
timeout_msg='Resource has not been restored for a 60 sec')
new_nodes = self.fuel_web.get_pacemaker_resource_location(
devops_controllers[0].name,
@ -506,7 +502,9 @@ class TestHaFailoverBase(TestBasic):
remote.execute("iptables -I OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED -j DROP")
cmd = "netstat -nap | grep {0} | grep :5673".format(pid)
wait(lambda: len(remote.execute(cmd)['stdout']) == 0, timeout=300)
wait(lambda: len(remote.execute(cmd)['stdout']) == 0, timeout=300,
timeout_msg='Failed to drop AMQP connections on node {}'
''.format(p_d_ctrl.name))
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
@ -519,6 +517,7 @@ class TestHaFailoverBase(TestBasic):
with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote:
remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED")
# TODO(astudenov): add timeout_msg
_wait(lambda: assert_true(ocf_success in ''.join(
remote.execute(ocf_status)['stdout']).rstrip()), timeout=240)
newpid = ''.join(remote.execute('pgrep {0}'
@ -556,10 +555,8 @@ class TestHaFailoverBase(TestBasic):
wait(
lambda:
len(remote.execute('pgrep nova-compute')['stdout']) == 1,
timeout=120)
assert_true(
len(remote.execute('pgrep nova-compute')['stdout']) == 1,
'Nova service was not restarted')
timeout=120,
timeout_msg='Nova service was not restarted')
assert_true(len(remote.execute(
"grep \"nova-compute.*trying to restart\" "
"/var/log/monit.log")['stdout']) > 0,
@ -596,13 +593,9 @@ class TestHaFailoverBase(TestBasic):
DOWNLOAD_LINK))
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
try:
wait(
lambda: remote.execute("ls -1 {0}/{1}".format(
file_path, file_name))['exit_code'] == 0, timeout=60)
except TimeoutError:
raise TimeoutError(
"File download was not started")
wait(lambda: remote.execute("ls -1 {0}/{1}".format(
file_path, file_name))['exit_code'] == 0, timeout=60,
timeout_msg='File download was not started')
ip_slave_5 = self.fuel_web.get_nailgun_node_by_name('slave-05')['ip']
file_size1 = get_file_size(ip_slave_5, file_name, file_path)
@ -615,13 +608,8 @@ class TestHaFailoverBase(TestBasic):
file_size2,
file_size1))
devops_node.destroy()
try:
wait(
lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
devops_node)['online'], timeout=60 * 6)
except TimeoutError:
raise TimeoutError(
"Primary controller was not destroyed")
self.fuel_web.wait_node_is_offline(devops_node)
slave05 = self.fuel_web.get_nailgun_node_by_name('slave-05')
assert_true(
check_ping(slave05['ip'], DNS, deadline=120, interval=10),
@ -723,11 +711,9 @@ class TestHaFailoverBase(TestBasic):
floating_ip = os_conn.assign_floating_ip(instance)
# check instance
try:
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
p_d_ctrl = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
@ -738,13 +724,7 @@ class TestHaFailoverBase(TestBasic):
master_rabbit.destroy(False)
# Wait until Nailgun marked destroyed controller as offline
try:
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
master_rabbit)['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become offline '
'in nailgun'.format(master_rabbit.name))
self.fuel_web.wait_node_is_offline(master_rabbit)
# check ha
try:
@ -758,11 +738,9 @@ class TestHaFailoverBase(TestBasic):
test_sets=['ha'], should_fail=3)
# check instance
try:
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['controller'])
@ -778,26 +756,15 @@ class TestHaFailoverBase(TestBasic):
second_master_rabbit.destroy(False)
# Wait until Nailgun marked destroyed controller as offline
try:
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
second_master_rabbit)['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become offline '
'in nailgun'.format(second_master_rabbit.name))
self.fuel_web.wait_node_is_offline(second_master_rabbit)
# turn on 1-st master
master_rabbit.start()
# Wait until Nailgun marked destroyed controller as online
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
master_rabbit)['online'], timeout=60 * 10)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become online '
'in nailgun'.format(master_rabbit.name))
self.fuel_web.wait_node_is_online(master_rabbit)
self.fuel_web.check_ceph_status(
cluster_id,
offline_nodes=[self.fuel_web.get_nailgun_node_by_devops_node(
@ -819,13 +786,7 @@ class TestHaFailoverBase(TestBasic):
second_master_rabbit.start()
# Wait until Nailgun marked destroyed controller as online
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
second_master_rabbit)['online'], timeout=60 * 10)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become online'
'in nailgun'.format(second_master_rabbit.name))
self.fuel_web.wait_node_is_online(second_master_rabbit)
self.fuel_web.check_ceph_status(cluster_id)
# check ha
@ -840,7 +801,9 @@ class TestHaFailoverBase(TestBasic):
test_sets=['ha'])
# ping instance
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
# delete instance
os_conn = os_actions.OpenStackActions(public_vip)
@ -1054,13 +1017,7 @@ class TestHaFailoverBase(TestBasic):
rabbit_slaves[0].destroy()
# Wait until Nailgun marked destroyed controller as offline
try:
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
rabbit_slaves[0])['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become offline '
'in nailgun'.format(rabbit_slaves[0].name))
self.fuel_web.wait_node_is_offline(rabbit_slaves[0])
# check ha
logger.info('Node was destroyed {0}'.format(rabbit_slaves[0].name))
@ -1092,13 +1049,7 @@ class TestHaFailoverBase(TestBasic):
rabbit_slaves[0].start()
# Wait until Nailgun marked suspended controller as online
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
rabbit_slaves[0])['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become online '
'in nailgun'.format(rabbit_slaves[0].name))
self.fuel_web.wait_node_is_online(rabbit_slaves[0])
# check ha
with TimeStat("ha_ostf_after_rabbit_slave_power_on", is_uniq=True):
@ -1121,13 +1072,7 @@ class TestHaFailoverBase(TestBasic):
master_rabbit.destroy()
# Wait until Nailgun marked destroyed controller as offline
try:
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
master_rabbit)['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become offline'
'in nailgun'.format(master_rabbit.name))
self.fuel_web.wait_node_is_offline(master_rabbit)
# check ha and note that backend for destroyed node will be down
with TimeStat("ha_ostf_master_rabbit_destroy", is_uniq=True):
@ -1150,13 +1095,7 @@ class TestHaFailoverBase(TestBasic):
master_rabbit.start()
# Wait until Nailgun marked controller as online
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
master_rabbit)['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become online '
'in nailgun'.format(master_rabbit.name))
self.fuel_web.wait_node_is_online(master_rabbit)
# check ha
with TimeStat("ha_ostf_master_rabbit_power_on", is_uniq=True):
@ -1228,12 +1167,14 @@ class TestHaFailoverBase(TestBasic):
for count in xrange(500):
logger.debug('Checking splitbrain in the loop, '
'count number: {0}'.format(count))
# TODO(astudenov): add timeout_msg
_wait(
lambda: assert_equal(
remote_controller.execute(
'killall -TERM corosync')['exit_code'], 0,
'Corosync was not killed on controller, '
'see debug log, count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg
_wait(
lambda: assert_true(
_check_all_pcs_nodes_status(
@ -1241,6 +1182,7 @@ class TestHaFailoverBase(TestBasic):
'Offline'),
'Caught splitbrain, see debug log, '
'count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg
_wait(
lambda: assert_equal(
remote_controller.execute(
@ -1248,6 +1190,7 @@ class TestHaFailoverBase(TestBasic):
'restart')['exit_code'], 0,
'Corosync was not started, see debug log,'
' count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg
_wait(
lambda: assert_true(
_check_all_pcs_nodes_status(
@ -1446,13 +1389,7 @@ class TestHaFailoverBase(TestBasic):
master_rabbit_2.destroy()
# Wait until Nailgun marked suspended controller as offline
try:
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
master_rabbit_2)['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become offline '
'in nailgun'.format(master_rabbit_2.name))
self.fuel_web.wait_node_is_offline(master_rabbit_2)
# check ha, should fail 1 test according
# to haproxy backend from destroyed will be down
@ -1470,13 +1407,7 @@ class TestHaFailoverBase(TestBasic):
master_rabbit_2.start()
# Wait until Nailgun marked suspended controller as online
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
master_rabbit_2)['online'], timeout=60 * 5)
except TimeoutError:
raise TimeoutError('Node {0} does'
' not become online '
'in nailgun'.format(master_rabbit_2.name))
self.fuel_web.wait_node_is_online(master_rabbit_2)
# check ha
self.show_step(13)

View File

@ -16,7 +16,6 @@ from proboscis import SkipTest
from proboscis import test
from proboscis.asserts import assert_equal
from devops.error import TimeoutError
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from fuelweb_test import logger
@ -246,11 +245,9 @@ class FailoverGroup2(TestBasic):
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
try:
wait(lambda: tcp_ping(floating_ip_1.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(floating_ip_1.ip))
wait(lambda: tcp_ping(floating_ip_1.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(floating_ip_1.ip))
self.show_step(8)
self.fuel_web.run_ostf(cluster_id)

View File

@ -65,10 +65,13 @@ class RepeatableImageBased(TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.client.delete_cluster(cluster_id)
# wait nodes go to reboot
wait(lambda: not self.fuel_web.client.list_nodes(), timeout=10 * 60)
wait(lambda: not self.fuel_web.client.list_nodes(),
timeout=10 * 60,
timeout_msg='Nodes failed to become offline')
# wait for nodes to appear after bootstrap
wait(lambda: len(self.fuel_web.client.list_nodes()) == 5,
timeout=10 * 60)
timeout=10 * 60,
timeout_msg='Nodes failed to become online')
for slave in self.env.d_env.nodes().slaves[:5]:
slave.destroy()

View File

@ -209,7 +209,9 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
# Wait 60 second until ssh is available on instance
wait(
lambda: remote.execute(ssh_awail_cmd)['exit_code'] == 0,
timeout=60)
timeout=60,
timeout_msg='SSH port is not available in dhcp_namespace={}'
''.format(dhcp_namespace))
logger.debug('instance internal ip is {0}'.format(instance_ip))
@ -386,8 +388,7 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
# Destroy controller with l3 agent for start migration process
devops_node_with_l3.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
devops_node_with_l3)['online'], timeout=60 * 10)
self.fuel_web.wait_node_is_offline(devops_node_with_l3)
# Wait for HA services get ready
self.fuel_web.assert_ha_services_ready(cluster_id, should_fail=1)

View File

@ -14,7 +14,6 @@
import time
from warnings import warn
from devops.helpers.helpers import wait
from proboscis import test
from proboscis import SkipTest
@ -133,9 +132,8 @@ class CephRestart(TestBasic):
nailgun_node_id = self.fuel_web.get_nailgun_node_by_devops_node(
slave_06)['id']
slave_06.destroy()
self.fuel_web.wait_node_is_offline(slave_06)
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
slave_06)['online'], timeout=30 * 8)
self.fuel_web.delete_node(nailgun_node_id)
self.fuel_web.check_ceph_status(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
@ -149,8 +147,7 @@ class CephRestart(TestBasic):
slave_05)['id']
slave_05.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
slave_05)['online'], timeout=30 * 8)
self.fuel_web.wait_node_is_offline(slave_05)
self.fuel_web.delete_node(nailgun_node_id)
self.fuel_web.check_ceph_status(cluster_id)

View File

@ -267,7 +267,8 @@ class UpgradeRollback(DataDrivenUpgradeBase):
self.fuel_web.client.delete_cluster(cluster_id)
wait(lambda: not any([cluster['id'] == cluster_id for cluster in
self.fuel_web.client.list_clusters()]),
timeout=60 * 10)
timeout=60 * 10,
timeout_msg='Failed to delete cluster id={}'.format(cluster_id))
self.env.bootstrap_nodes(devops_nodes)
self.show_step(3)
@ -438,8 +439,7 @@ class UpgradeSmoke(DataDrivenUpgradeBase):
self.show_step(10)
self.show_step(11)
for node in pending_nodes:
wait(lambda: self.fuel_web.is_node_discovered(node),
timeout=6 * 60)
self.fuel_web.wait_node_is_discovered(node)
with self.fuel_web.get_ssh_for_node(
self.fuel_web.get_devops_node_by_nailgun_node(
node).name) as slave_remote:
@ -485,11 +485,10 @@ class UpgradeSmoke(DataDrivenUpgradeBase):
self.fuel_web.delete_node(node['id'])
self.show_step(4)
slaves = self.env.d_env.nodes().slaves[:2]
wait(lambda: all(self.env.nailgun_nodes(slaves)), timeout=10 * 60)
for node in self.fuel_web.client.list_cluster_nodes(
cluster_id=cluster_id):
wait(lambda: self.fuel_web.is_node_discovered(node), timeout=60)
nodes = self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id)
for node in nodes:
self.fuel_web.wait_node_is_discovered(node, timeout=10 * 60)
self.show_step(5)
self.fuel_web.update_nodes(
@ -535,7 +534,8 @@ class UpgradeSmoke(DataDrivenUpgradeBase):
)
self.fuel_web.client.delete_cluster(cluster_id)
wait(lambda: not any([cluster['id'] == cluster_id for cluster in
self.fuel_web.client.list_clusters()]))
self.fuel_web.client.list_clusters()]),
timeout_msg='Failed to delete cluster id={}'.format(cluster_id))
self.env.bootstrap_nodes(devops_nodes)
self.show_step(3)

View File

@ -14,7 +14,6 @@
import os
import traceback
from devops.error import TimeoutError
from devops.helpers.helpers import wait
from proboscis import test
from proboscis import asserts
@ -196,13 +195,10 @@ class CreateDeployEnvironmentCli(test_cli_base.CommandLine):
res['exit_code'] == 0)
with self.env.d_env.get_admin_remote() as remote:
try:
wait(lambda:
remote.execute("fuel env | awk '{print $1}'"
" | tail -n 1 | grep '^.$'")
['exit_code'] == 1, timeout=60 * 10)
except TimeoutError:
raise TimeoutError(
"cluster {0} was not deleted".format(cluster_id))
wait(lambda:
remote.execute("fuel env | awk '{print $1}'"
" | tail -n 1 | grep '^.$'")
['exit_code'] == 1, timeout=60 * 10,
timeout_msg='cluster {0} was not deleted'.format(cluster_id))
self.env.make_snapshot("review_fuel_cli_one_node_deploy")