Merge "Upgrade rework"

This commit is contained in:
Jenkins 2016-07-12 16:18:09 +00:00 committed by Gerrit Code Review
commit 8a584b9305
4 changed files with 383 additions and 384 deletions

View File

@ -482,21 +482,33 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
if assert_ec_equal is None: if assert_ec_equal is None:
assert_ec_equal = [0] assert_ec_equal = [0]
result = remote.execute(cmd) result = remote.execute(cmd)
if result['exit_code'] not in assert_ec_equal:
error_details = {
'command': cmd,
'host': remote.host,
'stdout': result['stdout'],
'stderr': result['stderr'],
'exit_code': result['exit_code']}
error_msg = (err_msg or "Unexpected exit_code returned:" result['stdout_str'] = ''.join(result['stdout']).strip()
" actual {0}, expected {1}." result['stdout_len'] = len(result['stdout'])
.format(error_details['exit_code'], result['stderr_str'] = ''.join(result['stderr']).strip()
' '.join(map(str, assert_ec_equal)))) result['stderr_len'] = len(result['stderr'])
log_msg = ("{0} Command: '{1}' Details: {2}".format(error_msg,
cmd, details_log = (
error_details)) "Host: {host}\n"
"Command: '{cmd}'\n"
"Exit code: {code}\n"
"STDOUT:\n{stdout}\n"
"STDERR:\n{stderr}".format(
host=remote.host, cmd=cmd, code=result['exit_code'],
stdout=result['stdout_str'], stderr=result['stderr_str']
))
if result['exit_code'] not in assert_ec_equal:
error_msg = (
err_msg or
"Unexpected exit_code returned: actual {0}, expected {1}."
"".format(
result['exit_code'],
' '.join(map(str, assert_ec_equal))))
log_msg = (
"{0} Command: '{1}' "
"Details:\n{2}".format(
error_msg, cmd, details_log))
logger.error(log_msg) logger.error(log_msg)
if raise_on_assert: if raise_on_assert:
raise Exception(log_msg) raise Exception(log_msg)
@ -504,11 +516,6 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
if clear: if clear:
remote.clear() remote.clear()
result['stdout_str'] = ''.join(result['stdout'])
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr'])
result['stderr_len'] = len(result['stderr'])
if jsonify: if jsonify:
try: try:
result['stdout_json'] = json_deserialize(result['stdout_str']) result['stdout_json'] = json_deserialize(result['stdout_str'])

View File

@ -22,18 +22,17 @@ from six.moves.urllib.error import HTTPError
# pylint: enable=import-error # pylint: enable=import-error
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests import base_test_case from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade
@test(groups=["clone_env_for_os_upgrade", "os_upgrade"]) @test(groups=["clone_env_for_os_upgrade"],
class TestCloneEnv(base_test_case.TestBasic): depends_on_groups=["upgrade_ceph_ha_restore"],
enabled=False)
class TestCloneEnv(TestBasic):
snapshot = 'upgrade_ha_ceph_for_all_ubuntu_neutron_vlan' snapshot = 'upgrade_ha_ceph_for_all_ubuntu_neutron_vlan'
@test( @test(groups=["test_clone_environment"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_environment(self): def test_clone_environment(self):
"""Test clone environment """Test clone environment
@ -117,7 +116,8 @@ class TestCloneEnv(base_test_case.TestBasic):
network["vlan_start"]) network["vlan_start"])
@test( @test(
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan], depends_on_groups=['upgrade_old_nodes'],
# TODO(astepanov) maintain names changes later
groups=["test_clone_nonexistent_cluster"]) groups=["test_clone_nonexistent_cluster"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_nonexistent_cluster(self): def test_clone_nonexistent_cluster(self):
@ -143,9 +143,7 @@ class TestCloneEnv(base_test_case.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_wo_name_in_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_wo_name_in_body"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_wo_name_in_body(self): def test_clone_wo_name_in_body(self):
"""Test clone without name in POST body """Test clone without name in POST body
@ -175,9 +173,7 @@ class TestCloneEnv(base_test_case.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_wo_release_id_in_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_wo_release_id_in_body"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_wo_release_id_in_body(self): def test_clone_wo_release_id_in_body(self):
"""Test clone without release id in POST body """Test clone without release id in POST body
@ -204,9 +200,7 @@ class TestCloneEnv(base_test_case.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_with_empty_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_with_empty_body"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_with_empty_body(self): def test_clone_with_empty_body(self):
"""Test clone with empty body """Test clone with empty body
@ -229,16 +223,14 @@ class TestCloneEnv(base_test_case.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_with_nonexistent_release_id"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_with_nonexistent_release_id"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_with_nonexistent_release_id(self): def test_clone_with_nonexistent_release_id(self):
"""Test clone with nonexistent release id in POST body """Test clone with nonexistent release id in POST body
Scenario: Scenario:
1. Revert snapshot "upgrade_ha_ceph_for_all_ubuntu_neutron_vlan" 1. Revert snapshot "upgrade_ha_ceph_for_all_ubuntu_neutron_vlan"
2. Try to clone environment with nonexistent 2. Try to clone environment with nonexistent
release id in POST body release id in POST body
3. Check status code 3. Check status code
""" """
@ -260,9 +252,7 @@ class TestCloneEnv(base_test_case.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_clone_with_incorrect_release_id"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_clone_with_incorrect_release_id"])
@log_snapshot_after_test @log_snapshot_after_test
def test_clone_with_incorrect_release_id(self): def test_clone_with_incorrect_release_id(self):
"""Test clone with incorrect release id in POST body """Test clone with incorrect release id in POST body
@ -291,9 +281,7 @@ class TestCloneEnv(base_test_case.TestBasic):
else: else:
fail("Doesn't raise needed error") fail("Doesn't raise needed error")
@test( @test(groups=["test_double_clone_environment"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["test_double_clone_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def test_double_clone_environment(self): def test_double_clone_environment(self):
"""Test double clone environment """Test double clone environment

View File

@ -22,19 +22,17 @@ from six.moves.urllib.error import HTTPError
# pylint: enable=import-error # pylint: enable=import-error
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.tests import base_test_case
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade
@test(groups=["reassign_node_for_os_upgrade", "os_upgrade"]) @test(groups=["reassign_node_for_os_upgrade", "os_upgrade"],
class TestReassignNode(base_test_case.TestBasic): depends_on_groups=["upgrade_ceph_ha_restore"],
enabled=False)
class TestReassignNode(TestBasic):
snapshot = 'upgrade_ha_ceph_for_all_ubuntu_neutron_vlan' snapshot = 'upgrade_ha_ceph_for_all_ubuntu_neutron_vlan'
@test( @test(groups=["reassign_node_to_cloned_environment"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_to_cloned_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_to_cloned_environment(self): def reassign_node_to_cloned_environment(self):
"""Test reassign node """Test reassign node
@ -113,9 +111,7 @@ class TestReassignNode(base_test_case.TestBasic):
) )
self.fuel_web.assert_task_success(task) self.fuel_web.assert_task_success(task)
@test( @test(groups=["reassign_node_to_nonexistent_cluster"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_to_nonexistent_cluster"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_to_nonexistent_cluster(self): def reassign_node_to_nonexistent_cluster(self):
"""Test reassign node to nonexistent cluster """Test reassign node to nonexistent cluster
@ -149,9 +145,7 @@ class TestReassignNode(base_test_case.TestBasic):
"to non-existing" "to non-existing"
"cluster 123456".format(controller_node["id"])) "cluster 123456".format(controller_node["id"]))
@test( @test(groups=["reassign_node_with_empty_body"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_with_empty_body"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_with_empty_body(self): def reassign_node_with_empty_body(self):
"""Test reassign node with empty body """Test reassign node with empty body
@ -188,9 +182,7 @@ class TestReassignNode(base_test_case.TestBasic):
fail("Doesn't raise HTTP 400 error on request" fail("Doesn't raise HTTP 400 error on request"
"to reassigning node with empty body") "to reassigning node with empty body")
@test( @test(groups=["reassign_node_with_incorrect_node"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_node_with_incorrect_node"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_node_with_incorrect_node(self): def reassign_node_with_incorrect_node(self):
"""Test reassign node with incorrect node in POST body """Test reassign node with incorrect node in POST body
@ -231,9 +223,7 @@ class TestReassignNode(base_test_case.TestBasic):
fail("Doesn't raise HTTP 400 error on request" fail("Doesn't raise HTTP 400 error on request"
"to reassigning node with incorrect node_id") "to reassigning node with incorrect node_id")
@test( @test(groups=["reassign_nonexistent_node_to_cloned_environment"])
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["reassign_nonexistent_node_to_cloned_environment"])
@log_snapshot_after_test @log_snapshot_after_test
def reassign_nonexistent_node_to_cloned_environment(self): def reassign_nonexistent_node_to_cloned_environment(self):
"""Test reassign node with nonexistent node in POST body """Test reassign node with nonexistent node in POST body

View File

@ -1,4 +1,4 @@
# Copyright 2015 Mirantis, Inc. # Copyright 2016 Mirantis, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -12,225 +12,140 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import time from proboscis.asserts import assert_equal, assert_not_equal
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_true from proboscis.asserts import assert_true
from proboscis import test from proboscis import test
from proboscis import SkipTest from proboscis import SkipTest
from fuelweb_test.helpers import checkers from fuelweb_test import logger
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers.utils import install_pkg from fuelweb_test.settings import KEYSTONE_CREDS
from fuelweb_test.tests import base_test_case from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test import settings from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade import \
DataDrivenUpgradeBase
@test(groups=["prepare_os_upgrade"]) @test(groups=["os_upgrade"])
class PrepareOSupgrade(base_test_case.TestBasic): class TestOSupgrade(DataDrivenUpgradeBase):
@staticmethod
def check_release_requirements():
if OPENSTACK_RELEASE_UBUNTU not in OPENSTACK_RELEASE:
raise SkipTest('{0} not in {1}'.format(
OPENSTACK_RELEASE_UBUNTU, OPENSTACK_RELEASE))
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_9], def minimal_check(self, seed_cluster_id, nwk_check=False):
groups=["ha_ceph_for_all_ubuntu_neutron_vlan"]) def next_step():
@log_snapshot_after_test return self.current_log_step + 1
def ha_ceph_for_all_ubuntu_neutron_vlan(self):
"""Deploy cluster with ha mode, ceph for all, neutron vlan
Scenario: if nwk_check:
1. Create cluster self.show_step(next_step())
2. Add 3 nodes with controller role self.fuel_web.verify_network(seed_cluster_id)
3. Add 3 nodes with compute and ceph OSD roles
4. Deploy the cluster
5. Run ostf
6. Make snapshot
Duration 50m self.show_step(next_step())
Snapshot ha_ceph_for_all_ubuntu_neutron_vlan self.fuel_web.run_single_ostf_test(
""" cluster_id=seed_cluster_id, test_sets=['sanity'],
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE: test_name=('fuel_health.tests.sanity.test_sanity_identity'
raise SkipTest('Openstack release is not Ubuntu') '.SanityIdentityTest.test_list_users'))
self.check_run('ha_ceph_for_all_ubuntu_neutron_vlan') def check_ceph_health(self, ip):
self.env.revert_snapshot("ready_with_9_slaves") ceph_health = self.ssh_manager.execute_on_remote(
ip=ip, cmd="ceph health")["stdout_str"]
data = { # There are an issue with PG calculation - LP#1464656
'volumes_ceph': True, try:
'images_ceph': True, assert_true("HEALTH_OK" in ceph_health,
'ephemeral_ceph': True, "Ceph health is not ok! Inspect output below:\n"
'objects_ceph': True, "{!r}".format(ceph_health))
'volumes_lvm': False, except AssertionError:
'net_provider': 'neutron', logger.warning("Ceph health is not ok! trying to check LP#1464656")
'net_segment_type': settings.NEUTRON_SEGMENT['vlan'] if "HEALTH_WARN" in ceph_health and \
} "too many PGs per OSD" in ceph_health:
logger.info("Known issue in ceph - see LP#1464656 for details")
else:
raise
cluster_id = self.fuel_web.create_cluster( @property
name=self.__class__.__name__, def orig_cluster_id(self):
mode=settings.DEPLOYMENT_MODE_HA, return self.fuel_web.client.get_cluster_id('prepare_upgrade_ceph_ha')
settings=data
)
self.fuel_web.update_nodes( @test(depends_on_groups=['upgrade_ceph_ha_restore'],
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute', 'ceph-osd'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['compute', 'ceph-osd']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("ha_ceph_for_all_ubuntu_neutron_vlan",
is_make=True)
@test(groups=["os_upgrade"],
enabled=False)
class TestOSupgrade(base_test_case.TestBasic):
@test(groups=["upgrade_ha_ceph_for_all_ubuntu_neutron_vlan"])
@log_snapshot_after_test
def upgrade_ha_ceph_for_all_ubuntu_neutron_vlan(self):
"""Upgrade master node ha mode, ceph for all, neutron vlan
Scenario:
1. Revert snapshot with ha mode, ceph for all, neutron vlan env
2. Run upgrade on master
3. Check that upgrade was successful
"""
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE:
raise SkipTest('Openstack release is not Ubuntu')
self.check_run('upgrade_ha_ceph_for_all_ubuntu_neutron_vlan')
self.env.revert_snapshot('ha_ceph_for_all_ubuntu_neutron_vlan')
cluster_id = self.fuel_web.get_last_created_cluster()
# TODO: Upgrade procedure were changed and upgrade to 9.0 not
# implemented yet. This should be replaced with actual code when it
# will be ready
# self.env.admin_actions.upgrade_master_node()
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.d_env.nodes().slaves[:6])
self.fuel_web.assert_fuel_version(settings.UPGRADE_FUEL_TO)
self.fuel_web.assert_nailgun_upgrade_migration()
self.env.make_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan",
is_make=True)
@test(depends_on=[upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
groups=["prepare_before_os_upgrade"])
@log_snapshot_after_test
def prepare_before_os_upgrade(self):
"""Make prepare actions before os upgrade
Scenario:
1. Revert snapshot upgraded with ceph, neutron vlan
2. yum update
3. pip install pyzabbix
4. yum install fuel-octane
5. Create mirrors
"""
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE:
raise SkipTest('Openstack release is not Ubuntu')
self.check_run('prepare_before_os_upgrade')
self.env.revert_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan")
with self.env.d_env.get_admin_remote() as remote:
remote.execute("yum -y update")
remote.execute("pip install pyzabbix")
install_pkg(remote, "fuel-octane")
cmd = (
"sed -i 's/DEBUG=\"no\"/DEBUG=\"yes\"/' {}".format(
'/etc/fuel-createmirror/config/ubuntu.cfg'
)
)
remote.execute(cmd)
remote.execute("/usr/bin/fuel-createmirror")
self.env.make_snapshot("prepare_before_os_upgrade", is_make=True)
@test(depends_on=[prepare_before_os_upgrade],
groups=["os_upgrade_env"]) groups=["os_upgrade_env"])
@log_snapshot_after_test @log_snapshot_after_test
def os_upgrade_env(self): def os_upgrade_env(self):
"""Octane clone target environment """Octane clone target environment
Scenario: Scenario:
1. Revert snapshot prepare_before_os_upgrade 1. Revert snapshot upgrade_ceph_ha_restore
2. run octane upgrade-env <target_env_id> 2. Run "octane upgrade-env <orig_env_id>"
3. Ensure that new cluster was created with correct release
""" """
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest('Openstack release is not Ubuntu')
self.check_run('os_upgrade_env') self.check_run('os_upgrade_env')
self.env.revert_snapshot("prepare_before_os_upgrade") self.env.revert_snapshot("upgrade_ceph_ha_restore", skip_timesync=True)
self.install_octane()
cluster_id = self.fuel_web.get_last_created_cluster() self.ssh_manager.execute_on_remote(
ip=self.env.get_admin_node_ip(),
cmd="octane upgrade-env {0}".format(self.orig_cluster_id),
err_msg="'upgrade-env' command failed, inspect logs for details")
with self.env.d_env.get_admin_remote() as remote: new_cluster_id = self.fuel_web.get_last_created_cluster()
octane_upgrade_env = remote.execute( assert_not_equal(self.orig_cluster_id, new_cluster_id,
"octane upgrade-env {0}".format(cluster_id) "Cluster IDs are the same: {!r} and {!r}".format(
) self.orig_cluster_id, new_cluster_id))
assert_equal(self.fuel_web.get_cluster_release_id(new_cluster_id),
cluster_id = self.fuel_web.get_last_created_cluster() self.fuel_web.client.get_release_id(
release_name='Liberty on Ubuntu 14.04'))
assert_equal(0, octane_upgrade_env['exit_code'])
assert_equal(cluster_id,
int(octane_upgrade_env['stdout'][0].split()[0]))
self.env.make_snapshot("os_upgrade_env", is_make=True) self.env.make_snapshot("os_upgrade_env", is_make=True)
@test(depends_on=[os_upgrade_env], @test(depends_on=[os_upgrade_env], groups=["upgrade_first_cic"])
groups=["upgrade_first_cic"])
@log_snapshot_after_test @log_snapshot_after_test
def upgrade_first_cic(self): def upgrade_first_cic(self):
"""Upgrade first controller """Upgrade first controller
Scenario: Scenario:
1. Revert snapshot os_upgrade_env 1. Revert snapshot os_upgrade_env
2. run octane upgrade-node --isolated <seed_env_id> <node_id> 2. Select cluster for upgrade and upgraded cluster
3. Select controller for upgrade
4. Run "octane upgrade-node --isolated <seed_env_id> <node_id>"
5. Check tasks status after upgrade run completion
6. Run minimal OSTF sanity check (user list) on target cluster
""" """
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest('Openstack release is not Ubuntu')
self.check_run('upgrade_first_cic') self.check_run('upgrade_first_cic')
self.env.revert_snapshot("os_upgrade_env")
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(1, initialize=True)
'TestOSupgrade' self.env.revert_snapshot("os_upgrade_env")
) self.install_octane()
self.show_step(2)
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
self.show_step(3)
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["controller"] self.orig_cluster_id, ["controller"])
) self.show_step(4)
with self.env.d_env.get_admin_remote() as remote: self.ssh_manager.execute_on_remote(
octane_upgrade_node = remote.execute( ip=self.ssh_manager.admin_ip,
"octane upgrade-node --isolated {0} {1}".format( cmd="octane upgrade-node --isolated "
seed_cluster_id, controllers[-1]["id"]) "{0} {1}".format(seed_cluster_id, controllers[-1]["id"]),
) err_msg="octane upgrade-node failed")
assert_equal(0, octane_upgrade_node['exit_code'],
"octane upgrade-node returns non zero" self.show_step(5)
"status code,"
"current result {}".format(octane_upgrade_node))
tasks_started_by_octane = [ tasks_started_by_octane = [
task for task in self.fuel_web.client.get_tasks() task for task in self.fuel_web.client.get_tasks()
if task['cluster'] == seed_cluster_id if task['cluster'] == seed_cluster_id]
]
for task in tasks_started_by_octane: for task in tasks_started_by_octane:
self.fuel_web.assert_task_success(task) self.fuel_web.assert_task_success(task)
self.show_step(6)
self.minimal_check(seed_cluster_id=seed_cluster_id)
self.env.make_snapshot("upgrade_first_cic", is_make=True) self.env.make_snapshot("upgrade_first_cic", is_make=True)
@test(depends_on=[upgrade_first_cic], @test(depends_on=[upgrade_first_cic],
@ -241,77 +156,72 @@ class TestOSupgrade(base_test_case.TestBasic):
Scenario: Scenario:
1. Revert snapshot upgrade_first_cic 1. Revert snapshot upgrade_first_cic
2. run octane upgrade-db <target_env_id> <seed_env_id> 2. Select cluster for upgrade and upgraded cluster
3. Select controller for db upgrade
4. Collect from db IDs for upgrade (used in checks)
5. Run "octane upgrade-db <orig_env_id> <seed_env_id>"
6. Check upgrade status
""" """
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest('Openstack release is not Ubuntu')
self.check_run('upgrade_db') self.check_run('upgrade_db')
self.env.revert_snapshot("upgrade_first_cic")
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(1, initialize=True)
'TestOSupgrade' self.env.revert_snapshot("upgrade_first_cic", skip_timesync=True)
) self.install_octane()
target_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["controller"] self.show_step(2)
)[0]
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"]
)[0]
with self.env.d_env.get_ssh_to_remote( self.show_step(3)
target_controller["ip"]) as remote: orig_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_ids = remote.execute( self.orig_cluster_id, ["controller"])[0]
'mysql cinder <<< "select id from volumes;"; ' seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
'mysql glance <<< "select id from images"; ' seed_cluster_id, ["controller"])[0]
'mysql neutron <<< "(select id from networks) '
'UNION (select id from routers) '
'UNION (select id from subnets)"; '
'mysql keystone <<< "(select id from project) '
'UNION (select id from user)"'
)["stdout"]
with self.env.d_env.get_admin_remote() as remote: mysql_req = (
octane_upgrade_db = remote.execute( 'mysql cinder <<< "select id from volumes;"; '
"octane upgrade-db {0} {1}".format( 'mysql glance <<< "select id from images"; '
target_cluster_id, seed_cluster_id) 'mysql neutron <<< "(select id from networks) '
) 'UNION (select id from routers) '
'UNION (select id from subnets)"; '
'mysql keystone <<< "(select id from project) '
'UNION (select id from user)"')
assert_equal(0, octane_upgrade_db['exit_code'], self.show_step(4)
"octane upgrade-db returns non zero" target_ids = self.ssh_manager.execute_on_remote(
"status code," ip=orig_controller["ip"], cmd=mysql_req)['stdout']
"current result is {}".format(octane_upgrade_db))
with self.env.d_env.get_ssh_to_remote(controller["ip"]) as remote: self.show_step(5)
stdout = remote.execute("crm resource status")["stdout"] self.ssh_manager.execute_on_remote(
seed_ids = remote.execute( ip=self.ssh_manager.admin_ip,
'mysql cinder <<< "select id from volumes;"; ' cmd="octane upgrade-db {0} {1}".format(
'mysql glance <<< "select id from images"; ' self.orig_cluster_id, seed_cluster_id),
'mysql neutron <<< "(select id from networks) ' err_msg="octane upgrade-db failed")
'UNION (select id from routers) '
'UNION (select id from subnets)"; '
'mysql keystone <<< "(select id from project) '
'UNION (select id from user)"'
)["stdout"]
while stdout: self.show_step(6)
current = stdout.pop(0)
crm_status = self.ssh_manager.execute_on_remote(
ip=seed_controller["ip"], cmd="crm resource status")['stdout']
while crm_status:
current = crm_status.pop(0)
if "vip" in current: if "vip" in current:
assert_true("Started" in current) assert_true("Started" in current)
elif "master_p" in current: elif "master_p" in current:
next_element = stdout.pop(0) next_element = crm_status.pop(0)
assert_true("Masters: [ node-" in next_element) assert_true("Masters: [ node-" in next_element)
elif any(x in current for x in ["ntp", "mysql", "dns"]): elif any(x in current for x in ["ntp", "mysql", "dns"]):
next_element = stdout.pop(0) next_element = crm_status.pop(0)
assert_true("Started" in next_element) assert_true("Started" in next_element)
elif any(x in current for x in ["nova", "cinder", "keystone", elif any(x in current for x in ["nova", "cinder", "keystone",
"heat", "neutron", "glance"]): "heat", "neutron", "glance"]):
next_element = stdout.pop(0) next_element = crm_status.pop(0)
assert_true("Stopped" in next_element) assert_true("Stopped" in next_element)
seed_ids = self.ssh_manager.execute_on_remote(
ip=seed_controller["ip"], cmd=mysql_req)['stdout']
assert_equal(sorted(target_ids), sorted(seed_ids), assert_equal(sorted(target_ids), sorted(seed_ids),
"Objects in target and seed dbs are different") "Objects in target and seed dbs are different")
@ -325,149 +235,253 @@ class TestOSupgrade(base_test_case.TestBasic):
Scenario: Scenario:
1. Revert snapshot upgrade_db 1. Revert snapshot upgrade_db
2. run octane upgrade-ceph <target_env_id> <seed_env_id> 2. Select cluster for upgrade and upgraded cluster
3. Run octane upgrade-ceph <orig_env_id> <seed_env_id>
4. Check CEPH health on seed env
""" """
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest('Openstack release is not Ubuntu')
self.check_run('upgrade_ceph') self.check_run('upgrade_ceph')
self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_db") self.env.revert_snapshot("upgrade_db")
self.install_octane()
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(2)
'TestOSupgrade'
)
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"]
)[0]
with self.env.d_env.get_admin_remote() as remote: seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
octane_upgrade_ceph = remote.execute( seed_cluster_id, ["controller"])[0]
"octane upgrade-ceph {0} {1}".format(
target_cluster_id, seed_cluster_id)
)
assert_equal(0, octane_upgrade_ceph['exit_code'], self.show_step(3)
"octane upgrade-ceph returns non zero status code," self.ssh_manager.execute_on_remote(
"current result is {}".format(octane_upgrade_ceph)) ip=self.ssh_manager.admin_ip,
cmd="octane upgrade-ceph {0} {1}".format(
self.orig_cluster_id, seed_cluster_id),
err_msg="octane upgrade-ceph failed")
with self.env.d_env.get_ssh_to_remote(controller["ip"]) as remote: self.show_step(4)
ceph_health = remote.execute("ceph health")["stdout"][0][:-1] self.check_ceph_health(seed_controller['ip'])
assert_equal("HEALTH_OK", ceph_health)
self.env.make_snapshot("upgrade_ceph", is_make=True) self.env.make_snapshot("upgrade_ceph", is_make=True)
@test(depends_on=[upgrade_ceph], @test(depends_on=[upgrade_ceph],
groups=["upgrade_control_plane"]) groups=["upgrade_controllers"])
@log_snapshot_after_test @log_snapshot_after_test
def upgrade_control_plane(self): def upgrade_controllers(self):
"""Upgrade control plane """Upgrade control plane and remaining controllers
Scenario: Scenario:
1. Revert snapshot upgrade_ceph 1. Revert snapshot upgrade_ceph
2. run octane upgrade-control <target_env_id> <seed_env_id> 2. Select cluster for upgrade and upgraded cluster
3. run octane upgrade-node <seed_cluster_id> <node_id> <node_id> 3. Run octane upgrade-control <orig_env_id> <seed_env_id>
4. Check cluster consistency
5. Collect old controllers for upgrade
6. Run octane upgrade-node <seed_cluster_id> <node_id> <node_id>
7. Check tasks status after upgrade run completion
8. Run network verification on target cluster
9. Run minimal OSTF sanity check (user list) on target cluster
""" """
if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE: self.check_release_requirements()
raise SkipTest('Openstack release is not Ubuntu') self.check_run('upgrade_controllers')
self.check_run('upgrade_control_plane') self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_ceph") self.env.revert_snapshot("upgrade_ceph")
self.install_octane()
target_cluster_id = self.fuel_web.client.get_cluster_id( self.show_step(2)
'TestOSupgrade'
)
seed_cluster_id = self.fuel_web.get_last_created_cluster() seed_cluster_id = self.fuel_web.get_last_created_cluster()
with self.env.d_env.get_admin_remote() as remote: self.show_step(3)
octane_upgrade_control = remote.execute( self.ssh_manager.execute_on_remote(
"octane upgrade-control {0} {1}".format( ip=self.ssh_manager.admin_ip,
target_cluster_id, seed_cluster_id) cmd="octane upgrade-control {0} {1}".format(
) self.orig_cluster_id, seed_cluster_id),
err_msg="octane upgrade-control failed")
assert_equal(0, octane_upgrade_control['exit_code'],
"octane upgrade-control returns non zero status code,"
"current result is {}".format(octane_upgrade_control))
self.show_step(4)
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"] seed_cluster_id, ["controller"])
)
old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["compute"]
)
old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
target_cluster_id, ["controller"]
)
ping_ips = [] old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
for node in controllers + old_computes: self.orig_cluster_id, ["controller"])
for data in node["network_data"]:
if data["name"] == "management": old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
ping_ips.append(data["ip"].split("/")[0]) self.orig_cluster_id, ["compute"])
def collect_management_ips(node_list):
result = []
for item in node_list:
for data in item["network_data"]:
if data["name"] == "management":
result.append(data["ip"].split("/")[0])
return result
ping_ips = collect_management_ips(controllers + old_computes)
ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id)) ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id))
non_ping_ips = [] non_ping_ips = collect_management_ips(old_controllers)
for node in old_controllers:
for data in node["network_data"]: ping_cmd = "ping -W 1 -i 1 -s 56 -c 1 -w 10 {host}"
if data["name"] == "management":
non_ping_ips.append(data["ip"].split("/")[0])
for node in controllers + old_computes: for node in controllers + old_computes:
self.ssh_manager.execute(node["ip"], "ip -s -s neigh flush all") self.ssh_manager.execute_on_remote(
ip=node["ip"], cmd="ip -s -s neigh flush all")
for ip in ping_ips: for ip in ping_ips:
assert_true(checkers.check_ping(node["ip"], ip), self.ssh_manager.execute_on_remote(
"Can not ping {0} from {1}" ip=node["ip"],
cmd=ping_cmd.format(host=ip),
err_msg="Can not ping {0} from {1}"
"need to check network" "need to check network"
" connectivity".format(ip, node["ip"])) " connectivity".format(ip, node["ip"]))
for ip in non_ping_ips: for ip in non_ping_ips:
assert_false(checkers.check_ping(node["ip"], ip), self.ssh_manager.execute_on_remote(
"Patch ports from old controllers" ip=node["ip"],
"isn't removed") cmd=ping_cmd.format(host=ip),
err_msg="Patch ports from old controllers isn't removed",
assert_ec_equal=[1, 2]) # No reply, Other errors
time.sleep(180) # TODO need to remove crm = self.ssh_manager.execute_on_remote(
# after fix of https://bugs.launchpad.net/fuel/+bug/1499696 ip=controllers[0]["ip"],
cmd="crm resource status")["stdout"]
with self.env.d_env.get_ssh_to_remote(controllers[0]["ip"]) as remote: while crm:
stdout = remote.execute("crm resource status")["stdout"] current = crm.pop(0)
while stdout:
current = stdout.pop(0)
if "vip" in current: if "vip" in current:
assert_true("Started" in current) assert_true("Started" in current)
elif "master_p" in current: elif "master_p" in current:
next_element = stdout.pop(0) next_element = crm.pop(0)
assert_true("Masters: [ node-" in next_element) assert_true("Masters: [ node-" in next_element)
elif any(x in current for x in ["ntp", "mysql", "dns"]): elif any(x in current for x in ["ntp", "mysql", "dns",
next_element = stdout.pop(0) "nova", "cinder", "keystone",
assert_true("Started" in next_element)
elif any(x in current for x in ["nova", "cinder", "keystone",
"heat", "neutron", "glance"]): "heat", "neutron", "glance"]):
next_element = stdout.pop(0) next_element = crm.pop(0)
assert_true("Started" in next_element) assert_true("Started" in next_element)
with self.env.d_env.get_admin_remote() as remote: # upgrade controllers part
octane_upgrade_node = remote.execute( self.show_step(5)
"octane upgrade-node {0} {1} {2}".format( seed_cluster_id = self.fuel_web.get_last_created_cluster()
seed_cluster_id, old_controllers[0]["id"],
old_controllers[1]["id"]) self.show_step(6)
) self.ssh_manager.execute_on_remote(
assert_equal(0, octane_upgrade_node['exit_code'], ip=self.ssh_manager.admin_ip,
"octane upgrade-node returns non zero" cmd="octane upgrade-node {0} {1}".format(
"status code," seed_cluster_id,
"current result {}".format(octane_upgrade_node)) " ".join([str(ctrl["id"]) for ctrl in old_controllers])),
err_msg="octane upgrade-node failed")
self.show_step(7)
tasks_started_by_octane = [ tasks_started_by_octane = [
task for task in self.fuel_web.client.get_tasks() task for task in self.fuel_web.client.get_tasks()
if task['cluster'] == seed_cluster_id if task['cluster'] == seed_cluster_id]
]
for task in tasks_started_by_octane: for task in tasks_started_by_octane:
self.fuel_web.assert_task_success(task) self.fuel_web.assert_task_success(task)
self.env.make_snapshot("upgrade_control_plane", is_make=True) self.show_step(8)
self.show_step(9)
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
self.env.make_snapshot("upgrade_controllers", is_make=True)
@test(depends_on=[upgrade_controllers], groups=["upgrade_ceph_osd"])
@log_snapshot_after_test
def upgrade_ceph_osd(self):
"""Upgrade ceph osd
Scenario:
1. Revert snapshot upgrade_all_controllers
2. Select cluster for upgrade and upgraded cluster
3. Run octane upgrade-osd <target_env_id> <seed_env_id>
4. Check CEPH health on seed env
5. run network verification on target cluster
6. run minimal OSTF sanity check (user list) on target cluster
"""
self.check_release_requirements()
self.check_run('upgrade_ceph_osd')
self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_controllers")
self.install_octane()
self.show_step(2)
seed_cluster_id = self.fuel_web.get_last_created_cluster()
seed_controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
seed_cluster_id, ["controller"]
)[0]
self.show_step(3)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="octane upgrade-osd --admin-password {0} {1}".format(
KEYSTONE_CREDS['password'],
self.orig_cluster_id),
err_msg="octane upgrade-osd failed"
)
self.show_step(4)
self.check_ceph_health(seed_controller['ip'])
self.minimal_check(seed_cluster_id=seed_cluster_id, nwk_check=True)
self.env.make_snapshot("upgrade_ceph_osd", is_make=True)
@test(depends_on=[upgrade_ceph_osd], groups=["upgrade_old_nodes"])
@log_snapshot_after_test
def upgrade_old_nodes(self):
"""Upgrade all non controller nodes
Scenario:
1. Revert snapshot upgrade_all_controllers
2. Select cluster for upgrade and upgraded cluster
3. Collect nodes for upgrade
4. Run octane upgrade-node $SEED_ID <ID>
5. run network verification on target cluster
6. run OSTF check
7. Drop old cluster
"""
self.check_release_requirements()
self.check_run('upgrade_old_nodes')
self.show_step(1, initialize=True)
self.env.revert_snapshot("upgrade_ceph_osd")
self.install_octane()
self.show_step(2)
seed_cluster_id = self.fuel_web.get_last_created_cluster()
self.show_step(3)
# old_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
# orig_cluster_id, ["compute"]
# )
# TODO(astepanov): validate, that only correct nodes acquired
old_nodes = self.fuel_web.client.list_cluster_nodes(
self.orig_cluster_id)
self.show_step(4)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd="octane upgrade-node {0} {1}".format(
seed_cluster_id,
" ".join([str(ctrl["id"]) for ctrl in old_nodes])),
err_msg="octane upgrade-node failed"
)
self.show_step(5)
self.fuel_web.verify_network(seed_cluster_id)
self.show_step(6)
self.fuel_web.run_ostf(seed_cluster_id)
self.show_step(7)
self.fuel_web.delete_env_wait(self.orig_cluster_id)