Pytest: Add maintenance test cases

Convert the following robot test cases to pytest test cases
1. 15-Verify-Add-Delete-Controller.robot
2. 45-No-Power-Off-Option-GUI-Compute.robot
3. 47-Verify-Host-Powerdown-Powerup.robot
4. 50-Verify-No-Poweroff-Option-Controller-Gui.robot
5. 63-Time-Stamp-Correct-Alarms-After-Time-Zone-Updated.robot
6. 132-Pull-Management-Cable-Active-Controller.robot

Signed-off-by: Dongqi Chen <chen.dq@neusoft.com>
Change-Id: I200568225578a57b0c3292abb6dcdfc8f5fe6250
This commit is contained in:
Dongqi Chen 2021-07-15 10:34:10 +08:00
parent a68778f64d
commit a1bd40226e
8 changed files with 478 additions and 1 deletions

View File

@ -2,4 +2,4 @@
host=review.opendev.org
port=29418
project=starlingx/test.git
defaultbranch=r/stx.4.0
defaultbranch=devel

View File

@ -0,0 +1,27 @@
from pytest import fixture, skip
from testfixtures.resource_mgmt import *
from keywords import system_helper
from utils.clients import ssh
from utils.tis_log import LOG
NETWORK_NAME = "network"
SUBNET_NAME = "subnet"
SUBNET_RANGE = "192.168.1.0/24"
IP_VERSION = 4
@fixture(scope='session')
def no_aio_system():
LOG.fixture_step("(Session) Skip if AIO system")
if system_helper.is_aio_system():
skip('skip if AIO system')
@fixture(scope='session')
def no_bare_metal():
LOG.fixture_step("(Session) Skip if bare metal")
con_ssh = ssh.ControllerClient.get_active_controller()
output = con_ssh.exec_sudo_cmd('dmesg | grep -i paravirtualized')[1]
if 'KVM' not in output and 'bare hardware' in output:
skip('bare metal does not support')

View File

@ -0,0 +1,43 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to check system response to a compute network down.
#
# Author(s): Dongqi.Chen <dongqix.chen@intel.com>
#
###
import time
from pytest import mark
from consts.auth import HostLinuxUser
from keywords import system_helper, host_helper
from utils.clients.ssh import ControllerClient
@mark.maintenance
def test_host_reboot_with_network_down(no_aio_system):
"""
65-Host-Reboot-With-Network-Down.robot
Args:
no_aio_system:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
compute = system_helper.get_computes()[0]
mgmt_ip = system_helper.get_host_values(host=compute, fields='mgmt_ip')[0]
with host_helper.ssh_to_host(compute) as node_ssh:
cmd = "ifconfig | grep -B 1 {} | grep -v {} | awk -F ':' '{{print $1}}'".format(mgmt_ip, mgmt_ip)
host_dev = node_ssh.exec_cmd(cmd)[1]
node_ssh.send("sudo ifconfig {} down;sleep 5;sudo ifconfig {} up".format(host_dev, host_dev))
node_ssh.expect('Password:')
node_ssh.send(HostLinuxUser.get_password())
time.sleep(60)
con_ssh.send('echo receive msg')
con_ssh.expect('.*')
host_helper.wait_for_hosts_ready(compute, con_ssh=con_ssh)

View File

@ -0,0 +1,96 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to validate Alarm and Logs Timestamp after TimeZone is modified.
#
# Author(s): Dongqi.Chen <dongqix.chen@intel.com>
#
###
import datetime
from pytest import mark
from consts.auth import Tenant
from utils import cli
from utils.clients.ssh import ControllerClient
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
def get_files_time():
cmd = "ls -l --time-style=long-iso /var/log/ |awk '{print $6,$7}' | grep -v daemon-ocf.log " \
"| grep -v sm-scheduler.log| grep -v auth.log"
result = con_ssh.exec_sudo_cmd(cmd)[1]
return str.split(result, "\n")
def get_logs_internal_times():
intcmd = "head -1 /var/log/*.log | cut -d \",\" -d \".\" -f 1 | sed 's/T/ /' |"
grepcmd = "grep -v = | grep -v + | grep -v [a-z] | sed '/^$/d'"
cmd = intcmd + grepcmd
result = con_ssh.exec_sudo_cmd(cmd)[1]
return str.split(result, "\n")
def get_alarm_timestamp():
args = "--uuid |grep controller-0 | awk 'FNR==1 {print $2}'| cut -d \"|\" -f 1 | sed '/^$/d'"
uuid = cli.fm('alarm-list', args, ssh_client=con_ssh, auth_info=auth_info)[1]
args1 = "{}|grep -w timestamp | awk '{{print $4}}' | sed 's/T/ /'| cut -d \".\" -f 1".format(uuid)
initial_alarm_time = cli.fm('alarm-show', args1, ssh_client=con_ssh, auth_info=auth_info)[1]
return initial_alarm_time
def change_system_timezone(utc_system_time):
con_ssh.exec_sudo_cmd("rm /etc/localtime")
con_ssh.exec_sudo_cmd("ln -s /usr/share/zoneinfo/Mexico/General /etc/localtime")
time = con_ssh.exec_cmd("date +'%Y-%m-%d %H:%M:%S'")[1]
assert time != utc_system_time
def compare_file_times(time_difference, logs_time, logs_time_modified):
for index in range(0, 10):
result = (datetime.datetime.strptime(logs_time[index], '%Y-%m-%d %H:%M') -
datetime.datetime.strptime(logs_time_modified[index], '%Y-%m-%d %H:%M')).seconds
assert time_difference - 3800 < result < time_difference + 3800
def compare_logs_times(internal_log_times_1, internal_log_times_2):
for index in range(0, 10):
result = (datetime.datetime.strptime(internal_log_times_1[index], '%Y-%m-%d %H:%M:%S') -
datetime.datetime.strptime(internal_log_times_2[index], '%Y-%m-%d %H:%M:%S')).seconds
assert result <= 500
@mark.maintenance
def test_time_stamp_correct_alarms_after_time_zone_updated(no_simplex):
"""
63-Time-Stamp-Correct-Alarms-After-Time-Zone-Updated.robot
Args:
no_simplex:
Returns:
"""
utc_system_time = con_ssh.exec_cmd("date +'%Y-%m-%d %H:%M:%S'")[1]
logs_time = get_files_time()
internal_log_times_1 = get_logs_internal_times()
initial_alarm_time = get_alarm_timestamp()
change_system_timezone(utc_system_time)
logs_time_modified = get_files_time()
internal_log_times_2 = get_logs_internal_times()
mx_system_time = con_ssh.exec_cmd("date +'%Y-%m-%d %H:%M:%S'")[1]
modified_alarm_time = get_alarm_timestamp()
time_difference = (datetime.datetime.strptime(utc_system_time, '%Y-%m-%d %H:%M:%S') -
datetime.datetime.strptime(mx_system_time, '%Y-%m-%d %H:%M:%S')).seconds
alarms_time_diff = (datetime.datetime.strptime(initial_alarm_time, '%Y-%m-%d %H:%M:%S') -
datetime.datetime.strptime(modified_alarm_time, '%Y-%m-%d %H:%M:%S')).seconds
assert alarms_time_diff >= time_difference
compare_file_times(time_difference, logs_time, logs_time_modified)
compare_logs_times(internal_log_times_1, internal_log_times_2)
con_ssh.exec_sudo_cmd("rm /etc/localtime")
con_ssh.exec_sudo_cmd("ln -s /usr/share/zoneinfo/UTC /etc/localtime")

View File

@ -0,0 +1,231 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify Import profile and apply to multiple nodes.
#
# Author(s): Dongqi.Chen <dongqix.chen@intel.com>
#
###
import subprocess
import time
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts.auth import Tenant
from keywords import host_helper, system_helper, storage_helper, container_helper, kube_helper
from utils import exceptions, cli, table_parser
from utils.clients.ssh import ControllerClient
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
def wait_for_host_delete_status(host):
LOG.info("waiting for {} to delete".format(host))
end_time = time.time() + 300
exists = system_helper.host_exists(host)
while time.time() < end_time:
if not exists:
LOG.info("{} has been deleted".format(host))
return 0
time.sleep(20)
exists = system_helper.host_exists(host)
err_msg = "Timed out waiting for {} to delete".format(host)
raise exceptions.VMTimeout(err_msg)
def wait_for_host_install_status(host):
LOG.info("waiting for {} install_state status: completed".format(host))
end_time = time.time() + 2400
current_status = system_helper.get_host_values(host, "install_state")[0]
while time.time() < end_time:
if current_status == "completed":
LOG.info("host status has reached completed")
return 0
time.sleep(30)
current_status = system_helper.get_host_values(host, "install_state")[0]
err_msg = "Timed out waiting for {} install_state status: completed. {} " \
"install_state status: {}".format(host, host, current_status)
raise exceptions.VMTimeout(err_msg)
def wait_for_stor_configure_status():
LOG.info("waiting for controller-1 storage state: configured")
end_time = time.time() + 300
current_status = storage_helper.get_host_stors("controller-1", field="state")[0]
while time.time() < end_time:
if current_status == "configured":
LOG.info("controller-1 storage state has reached configured")
return 0
time.sleep(20)
current_status = storage_helper.get_host_stors("controller-1", field="state")[0]
err_msg = "Timed out waiting for controller-1 storage state: configured."
raise exceptions.VMTimeout(err_msg)
def install_virtual_node(host):
"""
Install the compute nodes of the system with given storage nodes list.
Args:
host:
Returns:
"""
LOG.info("install {} virtual start".format(host))
cmd = "virsh list | grep {} | awk '{{print$2}}'".format(host)
obj = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE)
virsh_name = str(obj.stdout.readline(), encoding="utf-8").replace('\n', '')
cmd = "virsh -c qemu:///system domiflist {} | grep br2 | awk '{{print $5}}'".format(virsh_name)
obj = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE)
assert obj, "failed to get domain {}".format(virsh_name)
mac_address = str(obj.stdout.readline(), encoding="utf-8")
cli.system('host-add', ' -n {} -p controller -m {}'.format(host, mac_address))
wait_for_host_install_status(host)
def unlock_host(host, con_ssh):
application_status = container_helper.get_apps(application="stx-openstack")[0]
if application_status == "applying":
container_helper.abort_app("stx-openstack")
host_helper.unlock_host(host, con_ssh=con_ssh, available_only=False,
check_hypervisor_up=False, check_webservice_up=False,
check_subfunc=False, check_containers=False)
def create_storage_function(host, con_ssh):
"""
Enables the storage nodes as Object Storage Device (OSD)
Returns:
"""
LOG.info("create storage stor start")
disk_uuid = storage_helper.get_host_disks(host, **{"device_node": "/dev/sdb"})[0]
tier_uuid = storage_helper.get_storage_tiers("ceph_cluster", **{"name": "storage"})[0]
cli.system("host-stor-add", " {} {} --tier-uuid {}".format(host, disk_uuid, tier_uuid),
ssh_client=con_ssh, auth_info=Tenant.get('admin_platform'))
def reinstall_virtual_controller():
"""
Reinstall the deleted controller
Returns:
"""
install_virtual_node("controller-1")
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
# Configuring OAM Network and Cluster Host Interface for controller-1
# Provide OAM Network Interface
LOG.info("Provide OAM Network Interface")
table_ = table_parser.table(cli.system("interface-network-list", "controller-0",
ssh_client=con_ssh,
auth_info=auth_info)[1])
# Get OAM interface
oam_if = table_parser.get_values(table_, "ifname", **{"network_name": "oam"})[0]
args = " -n oam0 -c platform controller-1 {}".format(oam_if)
cli.system('host-if-modify', args, ssh_client=con_ssh,
auth_info=auth_info)
cli.system("interface-network-assign", "controller-1 oam0 oam", ssh_client=con_ssh,
auth_info=auth_info)
# Setup Cluster-host Interfaces
LOG.info("Setup Cluster-host Interfaces")
cli.system("interface-network-assign", "controller-1 mgmt0 cluster-host", ssh_client=con_ssh,
auth_info=auth_info)
if system_helper.is_aio_duplex():
# Duplex
LOG.info("Configure data interfaces for controller-1.")
# Get Interface UUID
data0ifuuid = host_helper.get_host_interfaces("controller-1", field="uuid",
show_all=True, **{"ports": "[u'eth1000']"})[0]
data1ifuuid = host_helper.get_host_interfaces("controller-1", field="uuid",
show_all=True, **{"ports": "[u'eth1001']"})[0]
# Add Interface To Data Network
args0 = "-m 1500 -n data0 -c data controller-1 {}".format(data0ifuuid)
args1 = "-m 1500 -n data1 -c data controller-1 {}".format(data1ifuuid)
cli.system('host-if-modify', args0, ssh_client=con_ssh, auth_info=auth_info)
cli.system('host-if-modify', args1, ssh_client=con_ssh, auth_info=auth_info)
cli.system("interface-datanetwork-assign", "controller-1 {} {}"
.format(data0ifuuid, "physnet0"),
ssh_client=con_ssh, auth_info=auth_info)
cli.system("interface-datanetwork-assign", "controller-1 {} {}"
.format(data1ifuuid, "physnet1"),
ssh_client=con_ssh, auth_info=auth_info)
LOG.info("Set up disk partition for nova-local volume group")
rootfs = system_helper.get_host_values("controller-1", "rootfs_device")[0]
uuid = storage_helper.get_host_disks("controller-1", **{"device_node": rootfs})[0]
# Add an OSD on controller-0 for Ceph
disk_uuid = storage_helper.get_host_disks("controller-1", **{"device_node": "/dev/sdb"})[0]
cli.system("host-stor-add", " controller-1 {}".format(disk_uuid),
ssh_client=con_ssh, auth_info=auth_info)
# Set up disk partition for nova-local volume group
args = " -t lvm_phys_vol controller-1 {} 100".format(uuid)
out = cli.system('host-disk-partition-add', args, ssh_client=con_ssh, auth_info=auth_info)[1]
new_uuid = table_parser.get_value_two_col_table(table_parser.table(out), "uuid")
# Add Local Volume Group
cli.system("host-lvg-add", "controller-1 nova-local", ssh_client=con_ssh,
auth_info=auth_info)
# Add Physical Volume
cli.system("host-pv-add", "controller-1 nova-local {}".format(new_uuid), ssh_client=con_ssh,
auth_info=auth_info)
# Enable Containerized Services
labels = ["openstack-control-plane", "openstack-compute-node", "openvswitch", "sriov"]
host_helper.assign_host_labels("controller-1", labels, unlock=False)
unlock_host("controller-1", con_ssh)
else:
# MN-Local
# Add ODS To Tier
host_helper.assign_host_labels("controller-1", ["openstack-control-plane"], unlock=False)
unlock_host("controller-1", con_ssh)
if not system_helper.is_storage_system():
create_storage_function("controller-1", con_ssh)
wait_for_stor_configure_status()
# check ceph status
tuple_ = con_ssh.exec_sudo_cmd('ceph osd ls')
for osd_id in tuple_[1].split('\n'):
assert storage_helper.is_osd_up(osd_id, con_ssh), "OSD status is down"
@mark.maintenance
def test_verify_add_delete_controller(no_simplex, admin_home_pg, no_bare_metal):
"""
15-Verify-Add-Delete-Controller.robot
Args:
no_simplex:
admin_home_pg:
no_bare_metal:
Returns:
"""
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
standby = system_helper.get_standby_controller_name()
if standby != 'controller-1':
host_helper.swact_host('controller-1')
standby = 'controller-1'
hostinventory_pg.lock_host(standby)
system_helper.wait_for_hosts_states(standby, timeout=360, check_interval=30,
availability=['online'])
with host_helper.ssh_to_host(standby) as host_ssh:
LOG.info("Clear partition information")
# In order to start the target host from ipxe, Need to clear the data in /dev/sda
host_ssh.exec_sudo_cmd("dd if=/dev/zero of=/dev/sda bs=512 count=1")
hostinventory_pg.delete_host(standby)
wait_for_host_delete_status(standby)
reinstall_virtual_controller()
storage_helper.wait_for_ceph_health_ok(timeout=900, check_interval=30)
application_status = container_helper.get_apps(application="stx-openstack")[0]
if application_status == "applying":
container_helper.abort_app(app_name="stx-openstack")
pods_status = kube_helper.wait_for_pods_healthy(namespace="openstack", timeout=20, fail_ok=True)
if not pods_status:
container_helper.remove_app(app_name="stx-openstack", applied_timeout=600)
container_helper.apply_app(app_name="stx-openstack", applied_timeout=3600,
check_interval=30, wait_for_alarm_gone=False)

View File

@ -0,0 +1,70 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify host power-down/power-up feature for unlocked storage node via GUI.
#
# Author(s): Dongqi.Chen <dongqix.chen@intel.com>
#
###
from pytest import mark
from selenium.webdriver.common import by
from testfixtures.horizon import admin_home_pg, driver
from keywords import system_helper
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
@mark.maintenance
def test_verify_host_powerdown_powerup(no_simplex, admin_home_pg):
"""
45-No-Power-Off-Option-GUI-Compute.robot
47-Verify-Host-Powerdown-Powerup.robot
50-Verify-No-Poweroff-Option-Controller-Gui.robot
Args:
no_simplex:
admin_home_pg:
Returns:
"""
secondary_actions_locator = (
by.By.CSS_SELECTOR,
'td.actions_column *.btn-group > ul > li > a, button')
secondary_actions_opener_locator = (
by.By.CSS_SELECTOR,
'td.actions_column *.btn-group > *.btn:nth-child(2)')
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
computes = system_helper.get_computes()
controllers = system_helper.get_controllers()
for controller in controllers:
assert hostinventory_pg.get_host_info(controller, 'Admin State') == 'Unlocked'
assert hostinventory_pg.get_host_info(controller, 'Operational State') == 'Enabled'
row = hostinventory_pg._get_row_with_host_name(controller)
row._get_element(*secondary_actions_opener_locator).click()
elements = row._get_elements(*secondary_actions_locator)
for element in elements:
LOG.info(element.text)
assert 'Power-Off Host' != element.text
assert 'Power-Down Host' != element.text
row._get_element(*secondary_actions_opener_locator).click()
if not system_helper.is_aio_system():
for compute in computes:
assert hostinventory_pg.get_host_info(compute, 'Admin State') == 'Unlocked'
assert hostinventory_pg.get_host_info(compute, 'Operational State') == 'Enabled'
row = hostinventory_pg._get_row_with_host_name(compute)
row._get_element(*secondary_actions_opener_locator).click()
elements = row._get_elements(*secondary_actions_locator)
for element in elements:
LOG.info(element.text)
assert 'Power-Off Host' != element.text
assert 'Power-Down Host' != element.text
row._get_element(*secondary_actions_opener_locator).click()

View File

@ -39,6 +39,11 @@ class HostsTable(tables.TableRegion):
def unlock_host(self, unlock_button, row):
unlock_button.click()
@tables.bind_row_action('delete')
def delete_host(self, delete_button, row):
delete_button.click()
return forms.BaseFormRegion(self.driver)
@tables.bind_row_anchor_column('Host Name')
def go_to_host_detail_page(self, row_link, row):
row_link.click()
@ -133,6 +138,11 @@ class HostInventoryPage(basepage.BasePage):
row = self._get_row_with_host_name(name)
self.hosts_table(name).unlock_host(row)
def delete_host(self, name):
row = self._get_row_with_host_name(name)
confirm_form = self.hosts_table(name).delete_host(row)
confirm_form.submit()
def is_host_present(self, name):
return bool(self._get_row_with_host_name(name))