Detach ceph-ha test from upgrade file

Next upgrade refactor step: now its possible to move each test suite to separate file.
This refactor will simplify future changes.

Change-Id: I7587dfe038c9ca9a64e12b721b8cf83e66d60ddc
This commit is contained in:
Vladimir Khlyunev 2016-08-11 13:11:02 +03:00
parent 80361961dc
commit dee67482f7
5 changed files with 238 additions and 211 deletions

View File

@ -655,28 +655,13 @@ Restart tests
Upgrade tests
=============
Test Data-Driven Upgrade Base
-----------------------------
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base
:members:
Test Data-Driven Upgrade
------------------------
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade
:members:
Test Data-Driven Upgrade - Network templates
--------------------------------------------
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_ceph_ha
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_net_tmpl
:members:
Test Data-Driven Upgrade - Plugins engine
-----------------------------------------
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_plugin
:members:
Upgrade Master tool
-------------------
.. automodule:: fuelweb_test.tests.tests_upgrade.upgrader_tool
:members:

View File

@ -15,12 +15,14 @@
# pylint: disable=line-too-long
from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade # noqa
from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_ceph_ha # noqa
from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_net_tmpl # noqa
from fuelweb_test.tests.tests_upgrade import test_data_driven_upgrade_plugin # noqa
from fuelweb_test.tests.tests_upgrade import upgrader_tool # noqa
__all__ = [
'test_data_driven_upgrade',
'test_data_driven_upgrade_ceph_ha',
'test_data_driven_upgrade_net_tmpl',
'test_data_driven_upgrade_plugin',
'upgrader_tool'

View File

@ -72,30 +72,6 @@ class UpgradePrepare(DataDrivenUpgradeBase):
"""
super(self.__class__, self).prepare_upgrade_smoke()
@test(groups=['upgrade_ceph_ha_backup'],
depends_on=[SetupEnvironment.prepare_release])
@log_snapshot_after_test
def upgrade_ceph_ha_backup(self):
"""Prepare HA, ceph for all cluster using previous version of Fuel.
Nailgun password should be changed via KEYSTONE_PASSWORD env variable
Scenario:
1. Create cluster with NeutronVLAN and ceph for all (replica factor 3)
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 3 node with ceph osd role
5. Verify networks
6. Deploy cluster
7. Install fuel-octane package
8. Create backup file using 'octane fuel-backup'
9. Download the backup to the host
Duration: TODO
Snapshot: upgrade_ceph_ha_backup
"""
super(self.__class__, self).prepare_upgrade_ceph_ha()
@test(groups=['upgrade_smoke_tests'])
class UpgradeSmoke(DataDrivenUpgradeBase):
@ -372,128 +348,6 @@ class UpgradeSmoke(DataDrivenUpgradeBase):
self.fuel_web.run_ostf(cluster_id)
@test(groups=['upgrade_ceph_ha_tests'])
class UpgradeCephHA(DataDrivenUpgradeBase):
def __init__(self):
super(UpgradeCephHA, self).__init__()
self.source_snapshot_name = "upgrade_ceph_ha_backup"
self.snapshot_name = "upgrade_ceph_ha_restore"
self.backup_name = "backup_ceph_ha.tar.gz"
self.repos_backup_name = "repos_backup_ceph_ha.tar.gz"
@test(groups=['upgrade_ceph_ha_restore'])
@log_snapshot_after_test
def upgrade_ceph_ha_restore(self):
"""Reinstall Fuel and restore data with Tun+Ceph+HA cluster
Scenario:
1. Revert "upgrade_ceph_ha_backup" snapshot
2. Reinstall Fuel master using iso given in ISO_PATH
3. Install fuel-octane package
4. Upload the backup back to reinstalled Fuel maser node
5. Restore master node using 'octane fuel-restore'
6. Verify networks for restored cluster
7. Run OSTF for restored cluster
Snapshot: upgrade_ceph_ha_restore
Duration: TODO
"""
self.check_run(self.snapshot_name)
assert_true(os.path.exists(self.repos_local_path))
assert_true(os.path.exists(self.local_path))
intermediate_snapshot = 'ceph_ha_before_restore'
if not self.env.d_env.has_snapshot(intermediate_snapshot):
self.show_step(1, initialize=True)
assert_true(
self.env.revert_snapshot(self.source_snapshot_name),
"The test can not use given environment - snapshot "
"'upgrade_ceph_ha_backup' does not exists")
self.show_step(2)
self.reinstall_master_node()
self.env.make_snapshot(intermediate_snapshot)
else:
self.env.d_env.revert(intermediate_snapshot)
self.env.resume_environment()
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.do_restore(self.backup_path, self.local_path,
self.repos_backup_path, self.repos_local_path)
self.fuel_web.change_default_network_settings()
self.show_step(6)
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
# Live migration test could fail
# https://bugs.launchpad.net/fuel/+bug/1471172
# https://bugs.launchpad.net/fuel/+bug/1604749
self.fuel_web.run_ostf(cluster_id, should_fail=1)
self.env.make_snapshot(self.snapshot_name, is_make=True)
self.cleanup()
@test(groups=['upgrade_ceph_ha_reboot_ctrl'],
depends_on_groups=['upgrade_ceph_ha_restore'])
@log_snapshot_after_test
def upgrade_ceph_ha_reboot_ctrl(self):
"""Ensure that controller receives correct boot order from cobbler
Scenario:
1. Revert "upgrade_ceph_ha_restore" snapshot.
2. Warm restart of a controller.
3. Wait until HA services become ready.
4. Run OSTF.
Duration: 20m
"""
self.show_step(1)
self.env.revert_snapshot(self.snapshot_name)
self.show_step(2)
cluster_id = self.fuel_web.get_last_created_cluster()
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id=cluster_id,
roles=['controller'])
d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)
self.fuel_web.warm_restart_nodes([d_ctrls[0]])
self.show_step(3)
self.fuel_web.assert_ha_services_ready(cluster_id)
self.show_step(4)
self.fuel_web.run_ostf(cluster_id)
@test(groups=['upgrade_ceph_ha_scale_ceph'],
depends_on_groups=['upgrade_ceph_ha_restore'])
@log_snapshot_after_test
def upgrade_ceph_ha_scale_ceph(self):
""" Add 1 ceph node to existing cluster after upgrade
Scenario:
1. Revert "upgrade_ceph_ha_restore" snapshot.
2. Add 1 ceph node
3. Verify networks
4. Deploy cluster
5. Run OSTF
"""
self.show_step(1, initialize=True)
self.env.revert_snapshot(self.snapshot_name)
self.show_step(2)
cluster_id = self.fuel_web.get_last_created_cluster()
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[8:9])
self.fuel_web.update_nodes(cluster_id, {'slave-09': ['ceph-osd']})
self.show_step(3)
self.fuel_web.verify_network(cluster_id)
self.show_step(4)
# LP 1562736 get_devops_node_by_nailgun_node is not working
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(5)
self.fuel_web.run_ostf(cluster_id)
@test(groups=['upgrade_no_cluster_tests'])
class UpgradeNoCluster(DataDrivenUpgradeBase):
def __init__(self):

View File

@ -380,54 +380,6 @@ class DataDrivenUpgradeBase(TestBasic):
self.repos_backup_path, self.repos_local_path)
self.env.make_snapshot("upgrade_smoke_backup", is_make=True)
def prepare_upgrade_ceph_ha(self):
self.backup_name = "backup_ceph_ha.tar.gz"
self.repos_backup_name = "repos_backup_ceph_ha.tar.gz"
self.check_run("upgrade_ceph_ha_backup")
self.env.revert_snapshot("ready", skip_timesync=True)
intermediate_snapshot = "prepare_upgrade_ceph_ha_before_backup"
assert_not_equal(
settings.KEYSTONE_CREDS['password'], 'admin',
"Admin password was not changed, aborting execution")
cluster_settings = {
'net_provider': settings.NEUTRON,
'net_segment_type': settings.NEUTRON_SEGMENT['vlan'],
'volumes_lvm': False,
'volumes_ceph': True,
'images_ceph': True,
'objects_ceph': True,
'ephemeral_ceph': True,
'osd_pool_size': '3'
}
cluster_settings.update(self.cluster_creds)
if not self.env.d_env.has_snapshot(intermediate_snapshot):
self.deploy_cluster(
{'name': self.prepare_upgrade_ceph_ha.__name__,
'settings': cluster_settings,
'nodes':
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute'],
'slave-06': ['ceph-osd'],
'slave-07': ['ceph-osd'],
'slave-08': ['ceph-osd']}
}
)
self.env.make_snapshot(intermediate_snapshot)
self.env.revert_snapshot(intermediate_snapshot)
self.do_backup(self.backup_path, self.local_path,
self.repos_backup_path, self.repos_local_path)
self.env.make_snapshot("upgrade_ceph_ha_backup", is_make=True)
def prepare_upgrade_no_cluster(self):
self.backup_name = "backup_no_cluster.tar.gz"
self.repos_backup_name = "repos_backup_no_cluster.tar.gz"

View File

@ -0,0 +1,234 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis import test
from proboscis.asserts import assert_true, assert_not_equal
from fuelweb_test import settings
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base import \
DataDrivenUpgradeBase
@test
class UpgradeCephHA(DataDrivenUpgradeBase):
def __init__(self):
super(UpgradeCephHA, self).__init__()
self.source_snapshot_name = "upgrade_ceph_ha_backup"
self.backup_snapshot_name = self.source_snapshot_name
self.snapshot_name = "upgrade_ceph_ha_restore"
self.backup_name = "backup_ceph_ha.tar.gz"
self.repos_backup_name = "repos_backup_ceph_ha.tar.gz"
assert_not_equal(
settings.KEYSTONE_CREDS['password'], 'admin',
"Admin password was not changed, aborting execution")
@test(groups=['prepare_upgrade_ceph_ha_before_backup'],
depends_on=[SetupEnvironment.prepare_release])
@log_snapshot_after_test
def prepare_upgrade_ceph_ha_before_backup(self):
"""Prepare HA, ceph for all cluster using previous version of Fuel.
Nailgun password should be changed via KEYSTONE_PASSWORD env variable
Scenario:
1. Create cluster with NeutronVLAN and ceph for all (replica factor 3)
2. Add 3 node with controller role
3. Add 2 node with compute role
4. Add 3 node with ceph osd role
5. Verify networks
6. Deploy cluster
Duration: TODO
Snapshot: prepare_upgrade_ceph_ha_before_backup
"""
self.check_run("prepare_upgrade_ceph_ha_before_backup")
self.env.revert_snapshot("ready", skip_timesync=True)
cluster_settings = {
'net_provider': settings.NEUTRON,
'net_segment_type': settings.NEUTRON_SEGMENT['vlan'],
'volumes_lvm': False,
'volumes_ceph': True,
'images_ceph': True,
'objects_ceph': True,
'ephemeral_ceph': True,
'osd_pool_size': '3'
}
cluster_settings.update(self.cluster_creds)
self.show_step(1)
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.show_step(6)
self.deploy_cluster(
{'name': self.prepare_upgrade_ceph_ha_before_backup.__name__,
'settings': cluster_settings,
'nodes':
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute'],
'slave-06': ['ceph-osd'],
'slave-07': ['ceph-osd'],
'slave-08': ['ceph-osd']}
})
self.env.make_snapshot(
"prepare_upgrade_ceph_ha_before_backup", is_make=True)
@test(groups=['upgrade_ceph_ha_backup'],
depends_on_groups=['prepare_upgrade_ceph_ha_before_backup'])
@log_snapshot_after_test
def upgrade_ceph_ha_backup(self):
"""Create upgrade backup file for ceph HA cluster
Scenario:
1. Revert "prepare_upgrade_ceph_ha_before_backup" snapshot
2. Install fuel-octane package
3. Create backup file using 'octane fuel-backup'
4. Download the backup to the host
Snapshot: upgrade_ceph_ha_backup
"""
self.check_run(self.backup_snapshot_name)
self.show_step(1)
self.env.revert_snapshot("prepare_upgrade_ceph_ha_before_backup",
skip_timesync=True)
self.show_step(2)
self.show_step(3)
self.show_step(4)
self.do_backup(self.backup_path, self.local_path,
self.repos_backup_path, self.repos_local_path)
self.env.make_snapshot(self.backup_snapshot_name, is_make=True)
@test(groups=['upgrade_ceph_ha_tests', 'upgrade_ceph_ha_restore'])
@log_snapshot_after_test
def upgrade_ceph_ha_restore(self):
"""Reinstall Fuel and restore data with Tun+Ceph+HA cluster
Scenario:
1. Revert "upgrade_ceph_ha_backup" snapshot
2. Reinstall Fuel master using iso given in ISO_PATH
3. Install fuel-octane package
4. Upload the backup back to reinstalled Fuel maser node
5. Restore master node using 'octane fuel-restore'
6. Verify networks for restored cluster
7. Run OSTF for restored cluster
Snapshot: upgrade_ceph_ha_restore
Duration: TODO
"""
self.check_run(self.snapshot_name)
assert_true(
os.path.exists(self.local_path),
"Data backup file was not found at {!r}".format(self.local_path))
assert_true(
os.path.exists(self.repos_local_path),
"Repo backup file was not found at {!r}".format(
self.repos_local_path))
intermediate_snapshot = 'ceph_ha_before_restore'
if not self.env.d_env.has_snapshot(intermediate_snapshot):
self.show_step(1)
self.revert_backup()
self.show_step(2)
self.reinstall_master_node()
self.env.make_snapshot(intermediate_snapshot)
else:
self.env.d_env.revert(intermediate_snapshot)
self.env.resume_environment()
self.show_step(3)
self.show_step(4)
self.show_step(5)
self.do_restore(self.backup_path, self.local_path,
self.repos_backup_path, self.repos_local_path)
self.show_step(6)
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
# Live migration test could fail
# https://bugs.launchpad.net/fuel/+bug/1471172
# https://bugs.launchpad.net/fuel/+bug/1604749
self.fuel_web.run_ostf(cluster_id, should_fail=1)
self.env.make_snapshot(self.snapshot_name, is_make=True)
self.cleanup()
@test(groups=['upgrade_ceph_ha_tests', 'upgrade_ceph_ha_reboot_ctrl'],
depends_on_groups=['upgrade_ceph_ha_restore'])
@log_snapshot_after_test
def upgrade_ceph_ha_reboot_ctrl(self):
"""Ensure that controller receives correct boot order from cobbler
Scenario:
1. Revert "upgrade_ceph_ha_restore" snapshot.
2. Warm restart of a controller.
3. Wait until HA services become ready.
4. Run OSTF.
Duration: 20m
"""
self.show_step(1)
self.env.revert_snapshot(self.snapshot_name)
self.show_step(2)
cluster_id = self.fuel_web.get_last_created_cluster()
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id=cluster_id,
roles=['controller'])
d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)
self.fuel_web.warm_restart_nodes([d_ctrls[0]])
self.show_step(3)
self.fuel_web.assert_ha_services_ready(cluster_id)
self.show_step(4)
self.fuel_web.run_ostf(cluster_id)
@test(groups=['upgrade_ceph_ha_tests', 'upgrade_ceph_ha_scale_ceph'],
depends_on_groups=['upgrade_ceph_ha_restore'])
@log_snapshot_after_test
def upgrade_ceph_ha_scale_ceph(self):
""" Add 1 ceph node to existing cluster after upgrade
Scenario:
1. Revert "upgrade_ceph_ha_restore" snapshot.
2. Add 1 ceph node
3. Verify networks
4. Deploy cluster
5. Run OSTF
"""
self.show_step(1)
self.env.revert_snapshot(self.snapshot_name)
self.show_step(2)
cluster_id = self.fuel_web.get_last_created_cluster()
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[8:9])
self.fuel_web.update_nodes(cluster_id, {'slave-09': ['ceph-osd']})
self.show_step(3)
self.fuel_web.verify_network(cluster_id)
self.show_step(4)
# LP 1562736 get_devops_node_by_nailgun_node is not working
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(5)
self.fuel_web.run_ostf(cluster_id)