Fix filling_root test

As running resources on controller may be not equal as at beginning
of the test, let's use 'Check pacemaker status' sanity OSTF test.

Change-Id: I6ad59702fb04d5f7b2fb97cc257b763c53ee2fc7
Closes-Bug: #1584097
This commit is contained in:
Maksym Strukov 2016-07-08 18:16:58 +03:00
parent 8a584b9305
commit 951d250b1d
2 changed files with 24 additions and 39 deletions

View File

@ -15,6 +15,8 @@ from xml.etree import ElementTree
from fuelweb_test.helpers.ssh_manager import SSHManager
ssh_manager = SSHManager()
def get_pacemaker_nodes_attributes(cibadmin_status_xml):
"""Parse 'cibadmin --query --scope status'.
@ -103,7 +105,7 @@ def parse_pcs_status_xml(remote_ip):
:param remote_ip: remote IP address
:return: nested dictionary with node-fqdn and attribute name as keys
"""
pcs_status_dict = SSHManager().execute_on_remote(
pcs_status_dict = ssh_manager.execute_on_remote(
remote_ip, 'pcs status xml')['stdout_str']
return pcs_status_dict
@ -116,7 +118,7 @@ def get_pacemaker_resource_name(remote_ip, resource_name):
:param resource_name: resource name string
:return: string with proper resource name
"""
cib = SSHManager().execute_on_remote(
cib = ssh_manager.execute_on_remote(
remote_ip, 'cibadmin -Q --scope resources')['stdout_str']
root = ElementTree.fromstring(cib)

View File

@ -18,17 +18,19 @@ from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from fuelweb_test.helpers.checkers import check_file_exists
from fuelweb_test.helpers.utils import run_on_remote_get_results
from fuelweb_test.helpers.pacemaker import get_pacemaker_nodes_attributes
from fuelweb_test.helpers.pacemaker import get_pcs_nodes
from fuelweb_test.helpers.pacemaker import parse_pcs_status_xml
from fuelweb_test.helpers.ssh_manager import SSHManager
from system_test import logger
from system_test import deferred_decorator
from system_test import action
from system_test.helpers.decorators import make_snapshot_if_step_fail
ssh_manager = SSHManager()
# pylint: disable=no-member
# noinspection PyUnresolvedReferences
@ -138,13 +140,12 @@ class FillRootActions(object):
self.primary_controller_fqdn = str(
self.fuel_web.fqdn(self.primary_controller))
primary_ctrl = \
self.primary_controller.get_ip_address_by_network_name('admin')
pcs_status = parse_pcs_status_xml(primary_ctrl)
nail_node = self.fuel_web.get_nailgun_node_by_devops_node(
self.primary_controller)
pcs_status = parse_pcs_status_xml(nail_node['ip'])
with self.fuel_web.get_ssh_for_node(
self.primary_controller.name) as remote:
root_free = run_on_remote_get_results(
remote, 'cibadmin --query --scope status')['stdout_str']
@ -199,10 +200,13 @@ class FillRootActions(object):
self.primary_controller.name)
self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='fallocate -l {}M /root/bigfile'.format(
cmd='fallocate -l {}M /root/bigfile && sync'.format(
self.primary_controller_space_to_filled)
)
check_file_exists(node['ip'], '/root/bigfile')
self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='ls /root/bigfile',
assert_ec_equal=[0])
@deferred_decorator([make_snapshot_if_step_fail])
@action
@ -230,10 +234,13 @@ class FillRootActions(object):
self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='fallocate -l {}M /root/bigfile2'.format(
cmd='fallocate -l {}M /root/bigfile2 && sync'.format(
controller_space_to_filled)
)
check_file_exists(node['ip'], '/root/bigfile2')
self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='ls /root/bigfile2',
assert_ec_equal=[0])
@deferred_decorator([make_snapshot_if_step_fail])
@action
@ -275,10 +282,9 @@ class FillRootActions(object):
"Checking for 'running_resources "
"attribute have '0' value")
primary_ctrl = \
self.primary_controller.get_ip_address_by_network_name(
'admin')
pcs_status = parse_pcs_status_xml(primary_ctrl)
nail_node = self.fuel_web.get_nailgun_node_by_devops_node(
self.primary_controller)
pcs_status = parse_pcs_status_xml(nail_node['ip'])
pcs_attribs = get_pcs_nodes(pcs_status)
return pcs_attribs[self.primary_controller_fqdn][
@ -348,22 +354,6 @@ class FillRootActions(object):
return '#health_disk' not in pcs_attribs[
self.primary_controller_fqdn]
def check_started_resources():
logger.info(
"Checking for 'running_resources' attribute "
"have {} value on node {}".format(
self.slave_node_running_resources,
self.primary_controller_fqdn))
primary_ctrl = \
self.primary_controller.get_ip_address_by_network_name(
'admin')
pcs_status = parse_pcs_status_xml(primary_ctrl)
pcs_attribs = get_pcs_nodes(pcs_status)
return pcs_attribs[self.primary_controller_fqdn][
'resources_running'] == self.slave_node_running_resources
wait(checking_health_disk_attribute_is_not_present,
timeout=self.pcs_check_timeout,
timeout_msg="Attribute #health_disk was appeared "
@ -371,11 +361,4 @@ class FillRootActions(object):
self.primary_controller_fqdn,
self.pcs_check_timeout))
wait(check_started_resources,
timeout=self.pcs_check_timeout,
timeout_msg="Attribute 'running_resources' "
"doesn't have {} value "
"on node {} in {} seconds".format(
self.slave_node_running_resources,
self.primary_controller_fqdn,
self.pcs_check_timeout))
self.fuel_web.assert_ha_services_ready(self.cluster_id)