Fix docstrings for sphinx, fix snapshot names for updates
Some fixes for 7.0 branch: - fix docstrings to match sphinx requirements - remove outdated 'tests_os_patching' module - fix snapshot names for 6.1 -> 7.0 rollback/update jobs - add separate groups for each of rollback/update jobs to reduce amount of "skipped" test cases. Change-Id: I4569db85099e4e3eb09b8b0d0a069fc243176f4c Closes-Bug:#1468392 Related-Bug:#1470168
This commit is contained in:
parent
686d8b5d63
commit
92054440a4
|
@ -97,7 +97,7 @@ Test Neutron
|
|||
|
||||
Test Neutron Vlan Ceph Mongo
|
||||
----------------------------
|
||||
.. automodule:: fuelweb_test.tests.gd_based_tests.test_neutron
|
||||
.. automodule:: fuelweb_test.tests.gd_based_tests.test_neutron_vlan_ceph_mongo
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -140,15 +140,6 @@ Zabbix tests
|
|||
:members:
|
||||
|
||||
|
||||
Openstack patching tests
|
||||
========================
|
||||
|
||||
Openstack patching tests
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_os_patching.test_os_patching
|
||||
:members:
|
||||
|
||||
|
||||
Patching tests
|
||||
==============
|
||||
|
||||
|
|
|
@ -40,9 +40,8 @@ from time import sleep
|
|||
|
||||
@logwrap
|
||||
def check_cinder_status(remote):
|
||||
"""Parse output and return False
|
||||
if any enabled service is down.
|
||||
'cinder service-list' stdout example:
|
||||
"""Parse output and return False if any enabled service is down.
|
||||
'cinder service-list' stdout example:
|
||||
| cinder-scheduler | node-1.test.domain.local | nova | enabled | up |
|
||||
| cinder-scheduler | node-2.test.domain.local | nova | enabled | down |
|
||||
"""
|
||||
|
@ -922,8 +921,7 @@ def get_file_size(remote, file_name, file_path):
|
|||
|
||||
@logwrap
|
||||
def check_ping(remote, host, deadline=10, size=56, timeout=1, interval=1):
|
||||
"""Check network connectivity from
|
||||
remote to host using ICMP (ping)
|
||||
"""Check network connectivity from remote to host using ICMP (ping)
|
||||
:param remote: SSHClient
|
||||
:param host: string IP address or host/domain name
|
||||
:param deadline: time in seconds before ping exits
|
||||
|
|
|
@ -464,19 +464,21 @@ def duration(func):
|
|||
"""Measuring execution time of the decorated method in context of a test.
|
||||
|
||||
settings.TIMESTAT_PATH_YAML contains file name for collected data.
|
||||
Data are stored in the following format:
|
||||
Data are stored to YAML file in the following format:
|
||||
|
||||
<name_of_system_test_method>:
|
||||
<name_of_decorated_method>_XX: <seconds>
|
||||
|
||||
, where:
|
||||
<name_of_system_test_method> - The name of the system test method started
|
||||
by proboscis,
|
||||
<name_of_decorated_method> - Name of the method to which this decorator
|
||||
is implemented. _XX is a number of the method
|
||||
call while test is running, from _00 to _99.
|
||||
<seconds> - Time in seconds with floating point, consumed by the decorated
|
||||
method.
|
||||
|
||||
- name_of_system_test_method: Name of the system test method started
|
||||
by proboscis;
|
||||
- name_of_decorated_method: Name of the method to which this decorator
|
||||
is implemented. _XX is a number of the method
|
||||
call while test is running, from _00 to _99
|
||||
- seconds: Time in seconds with floating point, consumed by the
|
||||
decorated method
|
||||
|
||||
Thus, different tests can call the same decorated method multiple times
|
||||
and get the separate measurement for each call.
|
||||
"""
|
||||
|
|
|
@ -63,12 +63,14 @@ class BaseActions(object):
|
|||
:param copy_from: path to copy file from
|
||||
:param copy_to: path to copy file to
|
||||
For ex.:
|
||||
to copy from container to master node use:
|
||||
copy_from = container:path_from
|
||||
copy_to = path_to
|
||||
to copy from master node to container use:
|
||||
copy_from = path_from
|
||||
copy_to = container:path_to
|
||||
|
||||
- to copy from container to master node use:
|
||||
copy_from = container:path_from
|
||||
copy_to = path_to
|
||||
- to copy from master node to container use:
|
||||
copy_from = path_from
|
||||
copy_to = container:path_to
|
||||
|
||||
:return:
|
||||
Standard output from console
|
||||
"""
|
||||
|
@ -366,9 +368,11 @@ class FuelPluginBuilder(object):
|
|||
:param old_file: a path to the file content from to be changed
|
||||
:param new_file: a path to the new file to ve created with new content
|
||||
:param element: tuple with path to element to be changed
|
||||
|
||||
for example: ['root_elem', 'first_elem', 'target_elem']
|
||||
if there are a few elements with equal names use integer
|
||||
to identify which element should be used
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ class PuppetTestGenerator:
|
|||
You should give constructor following arguments:
|
||||
|
||||
- local_modules_path* Path to puppet modules which will be scanned for
|
||||
test files
|
||||
test files
|
||||
- tests_directory_path* Output directory where files will be written
|
||||
- debug level
|
||||
"""
|
||||
|
|
|
@ -277,15 +277,15 @@ class NeutronGre(TestBasic):
|
|||
def step_5_run_cluster_haproxy_controller(self):
|
||||
"""Execute cluster-haproxy task on controller, create snapshot
|
||||
Depends:
|
||||
"run_virtual_ips_controller"
|
||||
"run_tasks_end_with_vips"
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot "run_virtual_ips_controller"
|
||||
2. Get cluster id
|
||||
4. Execute cluster-haproxy task on controller
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute cluster-haproxy task on controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_5_run_cluster_haproxy_controller"
|
||||
"""
|
||||
|
@ -344,10 +344,10 @@ class NeutronGre(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step 5 run cluster haproxy controller"
|
||||
2. Get cluster id
|
||||
4. Execute openstack-haproxy task on controller
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute openstack-haproxy task on controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_6_run_openstack_haproxy_controller"
|
||||
"""
|
||||
|
@ -426,10 +426,10 @@ class NeutronGre(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_6_run_openstack_haproxy_controller"
|
||||
2. Get cluster id
|
||||
4. Execute openstack-controller task on controller
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute openstack-controller task on controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_7_run_openstack_controller"
|
||||
"""
|
||||
|
@ -483,10 +483,10 @@ class NeutronGre(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_7_run_openstack_controller"
|
||||
2. Get cluster id
|
||||
4. Executecontroller_remaining_tasks on controller
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Executecontroller_remaining_tasks on controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_8_run_controller_remaining_tasks"
|
||||
"""
|
||||
|
@ -573,10 +573,10 @@ class NeutronGre(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_8_run_controller_remaining_task"
|
||||
2. Get cluster id
|
||||
4. Execute top-role-compute task on computes
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute top-role-compute task on computes
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_9_run_top_role_compute"
|
||||
"""
|
||||
|
@ -651,10 +651,10 @@ class NeutronGre(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_9_run_top_role_compute"
|
||||
2. Get cluster id
|
||||
4. Execute top-role-cinder task on cinder nodes
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute top-role-cinder task on cinder nodes
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_10_run_top_role_cinder"
|
||||
"""
|
||||
|
|
|
@ -553,7 +553,7 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
3. Execute openstack-haproxy task on primary controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
. Create snapshot
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_8_run_openstack_haproxy_primary_controller"
|
||||
"""
|
||||
|
@ -989,10 +989,10 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step 13 run cluster haproxy controller"
|
||||
2. Get cluster id
|
||||
4. Execute openstack-haproxy task on controller
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute openstack-haproxy task on controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_14_run_openstack_haproxy_controller"
|
||||
"""
|
||||
|
@ -1081,10 +1081,10 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_14_run_openstack_haproxy_controller"
|
||||
2. Get cluster id
|
||||
4. Execute openstack-controller task on controller
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute openstack-controller task on controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_15_run_openstack_controller"
|
||||
"""
|
||||
|
@ -1143,10 +1143,10 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_15_run_openstack_controller"
|
||||
2. Get cluster id
|
||||
4. Execute controller_remaining_tasks on controller
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute controller_remaining_tasks on controller
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_16_run_controller_remaining_tasks"
|
||||
"""
|
||||
|
@ -1233,10 +1233,10 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_16_run_controller_remaining_tasks"
|
||||
2. Get cluster id
|
||||
4. Execute top-role-compute task on computes
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute top-role-compute task on computes
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_17_run_top_role_compute"
|
||||
"""
|
||||
|
@ -1312,10 +1312,10 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
Scenario:
|
||||
1. Revert snapshot "step_17_run_top_role_compute"
|
||||
2. Get cluster id
|
||||
4. Execute top-role-ceph-osd task on cinder nodes
|
||||
5. Verify that task was finished with success.
|
||||
6. Assert task execution
|
||||
7. Create snapshot
|
||||
3. Execute top-role-ceph-osd task on cinder nodes
|
||||
4. Verify that task was finished with success.
|
||||
5. Assert task execution
|
||||
6. Create snapshot
|
||||
|
||||
Snapshot: "step_18_run_top_role_ceph_osd"
|
||||
"""
|
||||
|
|
|
@ -216,7 +216,7 @@ class ExamplePlugin(TestBasic):
|
|||
7. Deploy the cluster
|
||||
8. Run network verification
|
||||
9. Check plugin health
|
||||
10 Add 2 nodes with controller role
|
||||
10. Add 2 nodes with controller role
|
||||
11. Deploy cluster
|
||||
12. Check plugin health
|
||||
13. Run OSTF
|
||||
|
|
|
@ -157,17 +157,17 @@ class RebootPlugin(TestBasic):
|
|||
"""Check deployment is failed by reboot task plugin.
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot with 3 nodes
|
||||
2. Download and install fuel-plugin-builder
|
||||
3. Create plugin with reboot task,
|
||||
set timeout for reboot task as 1 second
|
||||
4. Build and copy plugin from container nailgun
|
||||
5. Install plugin to fuel
|
||||
6. Create cluster and enable plugin
|
||||
7. Provision nodes
|
||||
8. Deploy cluster
|
||||
9. Check deployment was failed by reboot task
|
||||
10. Check error msg at the logs
|
||||
1. Revert snapshot with 3 nodes
|
||||
2. Download and install fuel-plugin-builder
|
||||
3. Create plugin with reboot task,
|
||||
set timeout for reboot task as 1 second
|
||||
4. Build and copy plugin from container nailgun
|
||||
5. Install plugin to fuel
|
||||
6. Create cluster and enable plugin
|
||||
7. Provision nodes
|
||||
8. Deploy cluster
|
||||
9. Check deployment was failed by reboot task
|
||||
10. Check error msg at the logs
|
||||
|
||||
Duration 15m
|
||||
"""
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
""" Launchers for Tempest scenarios
|
||||
"""Launchers for Tempest scenarios
|
||||
|
||||
To launch these Fuel-tests, you should specify several properties in global
|
||||
environment.
|
||||
|
@ -27,16 +27,18 @@ Another important variable is name of snapshot (variable: SNAPSHOT) which
|
|||
Tempest will verify.
|
||||
|
||||
Optional properties:
|
||||
TEMPEST_PATH - path to Tempest (default: './tempest')
|
||||
TEMPEST_XML_LOG_FILE - path to file which will store results of
|
||||
verification in JUnit XML format
|
||||
(default: './logs/$EXEC_NUMBER_tempest.xml')
|
||||
|
||||
- TEMPEST_PATH: path to Tempest (default: './tempest')
|
||||
- TEMPEST_XML_LOG_FILE: path to file which will store results of
|
||||
verification in JUnit XML format
|
||||
(default: './logs/$EXEC_NUMBER_tempest.xml')
|
||||
|
||||
Cheat:
|
||||
TEMPEST_GOD_MODE - if you specify this variable, fuel-tests will be
|
||||
marked as failed (will raise exception) only when xml log file is
|
||||
missed(don't matter Tempest scenarios are finished successfully or
|
||||
some of them are crashed).
|
||||
|
||||
- TEMPEST_GOD_MODE: if you specify this variable, fuel-tests will be
|
||||
marked as failed (will raise exception) only when xml log file is
|
||||
missed(don't matter Tempest scenarios are finished successfully or
|
||||
some of them are crashed).
|
||||
|
||||
"""
|
||||
|
||||
|
@ -164,15 +166,19 @@ class TestByTempest(base_test_case.TestBasic):
|
|||
|
||||
Scenario:
|
||||
1. Revert cluster(snapshot) which Tempest will test.
|
||||
2. Prepare Tempest
|
||||
2.1 Discover nailgun node ip and cluster id (if Tempest
|
||||
configuration file is not presented)
|
||||
2.2 Modify environment
|
||||
2. Prepare Tempest:
|
||||
|
||||
- Discover nailgun node ip and cluster id (if Tempest
|
||||
configuration file is not presented)
|
||||
- Modify environment
|
||||
|
||||
3. Validate cluster with set of Tempest-tests
|
||||
|
||||
Specific test variable:
|
||||
|
||||
TEMPEST_TEST_SET - name of Tempest tests set, which will be
|
||||
launched. Allowed names:
|
||||
launched. Allowed names:
|
||||
|
||||
- full (used by default)
|
||||
- smoke
|
||||
- baremetal
|
||||
|
@ -206,10 +212,12 @@ class TestByTempest(base_test_case.TestBasic):
|
|||
|
||||
Scenario:
|
||||
1. Revert cluster(snapshot) which Tempest will test.
|
||||
2. Prepare Tempest
|
||||
2.1 Discover nailgun node ip and cluster id (if Tempest
|
||||
configuration file is not presented)
|
||||
2.2 Modify environment
|
||||
2. Prepare Tempest:
|
||||
|
||||
- Discover nailgun node ip and cluster id (if Tempest
|
||||
configuration file is not presented)
|
||||
- Modify environment
|
||||
|
||||
3. Validate cluster with list of Tempest-tests
|
||||
|
||||
Specific test variable:
|
||||
|
|
|
@ -684,14 +684,14 @@ class CheckCephPartitionsAfterReboot(TestBasic):
|
|||
3. Add 1 node with compute and Ceph OSD roles
|
||||
4. Add 1 node with Ceph OSD role
|
||||
5. Deploy the cluster
|
||||
7. Check Ceph status
|
||||
8. Read current partitions
|
||||
9. Warm-reboot Ceph nodes
|
||||
10. Read partitions again
|
||||
11. Check Ceph health
|
||||
12. Cold-reboot Ceph nodes
|
||||
13. Read partitions again
|
||||
14. Check Ceph health
|
||||
6. Check Ceph status
|
||||
7. Read current partitions
|
||||
8. Warm-reboot Ceph nodes
|
||||
9. Read partitions again
|
||||
10. Check Ceph health
|
||||
11. Cold-reboot Ceph nodes
|
||||
12. Read partitions again
|
||||
13. Check Ceph health
|
||||
|
||||
Duration 40m
|
||||
Snapshot check_ceph_partitions_after_reboot
|
||||
|
|
|
@ -45,7 +45,7 @@ class TestHaVLAN(TestBasic):
|
|||
4. Set up cluster to use Network VLAN manager with 8 networks
|
||||
5. Deploy the cluster
|
||||
6. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
7. Run network verification
|
||||
8. Run OSTF
|
||||
9. Create snapshot
|
||||
|
@ -153,7 +153,7 @@ class TestHaFlat(TestBasic):
|
|||
3. Add 2 nodes with compute roles
|
||||
4. Deploy the cluster
|
||||
5. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
6. Run verify networks
|
||||
7. Run OSTF
|
||||
8. Make snapshot
|
||||
|
@ -459,7 +459,7 @@ class BackupRestoreHa(TestBasic):
|
|||
1. Revert snapshot "deploy_ha_flat"
|
||||
2. Backup master
|
||||
3. Check backup
|
||||
4 Run OSTF
|
||||
4. Run OSTF
|
||||
5. Add 1 node with compute role
|
||||
6. Restore master
|
||||
7. Check restore
|
||||
|
|
|
@ -54,7 +54,7 @@ class OneNodeDeploy(TestBasic):
|
|||
2. Add 1 node with controller role
|
||||
3. Deploy the cluster
|
||||
4. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
|
||||
Duration 20m
|
||||
|
||||
|
@ -101,7 +101,7 @@ class HAOneControllerFlat(HAOneControllerFlatBase):
|
|||
3. Add 1 node with compute role
|
||||
4. Deploy the cluster
|
||||
5. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
6. Verify networks
|
||||
7. Verify network configuration on controller
|
||||
8. Run OSTF
|
||||
|
@ -192,7 +192,7 @@ class HAOneControllerFlat(HAOneControllerFlatBase):
|
|||
3. Add 1 node with compute role
|
||||
4. Deploy the cluster
|
||||
5. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
6. Block first VLAN
|
||||
7. Run Verify network and assert it fails
|
||||
8. Restore first VLAN
|
||||
|
@ -240,11 +240,11 @@ class HAOneControllerFlat(HAOneControllerFlatBase):
|
|||
3. Add 1 node with compute role
|
||||
4. Deploy the cluster
|
||||
5. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
6. Add 1 node with role compute
|
||||
7. Deploy changes
|
||||
8. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
9. Verify services list on compute nodes
|
||||
10. Run OSTF
|
||||
|
||||
|
@ -313,7 +313,7 @@ class HAOneControllerVlan(TestBasic):
|
|||
4. Set up cluster to use Network VLAN manager with 8 networks
|
||||
5. Deploy the cluster
|
||||
6. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
7. Run network verification
|
||||
8. Run OSTF
|
||||
|
||||
|
@ -371,8 +371,7 @@ class HAOneControllerVlan(TestBasic):
|
|||
3. Deploy the cluster
|
||||
4. Run network verification
|
||||
5. Run OSTF
|
||||
6. Ssh to the base-os node and check
|
||||
/etc/astute.yaml link source.
|
||||
6. Ssh to the base-os node and check /etc/astute.yaml link source.
|
||||
7. Make snapshot.
|
||||
|
||||
Snapshot: deploy_base_os_node
|
||||
|
@ -580,7 +579,7 @@ class HAOneControllerCinder(TestBasic):
|
|||
4. Add 1 node with cinder role
|
||||
5. Deploy the cluster
|
||||
6. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
7. Run OSTF
|
||||
|
||||
Duration 30m
|
||||
|
@ -991,7 +990,7 @@ class HAOneControllerFlatUSB(HAOneControllerFlatBase):
|
|||
3. Add 1 node with compute role
|
||||
4. Deploy the cluster
|
||||
5. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
6. Verify networks
|
||||
7. Verify network configuration on controller
|
||||
8. Run OSTF
|
||||
|
@ -1031,10 +1030,10 @@ class ProvisioningScripts(TestBasic):
|
|||
Scenario:
|
||||
1. Deploy master node
|
||||
2. Check sizes of the files
|
||||
1. Create cluster
|
||||
2. Add 1 node with controller role
|
||||
3. Deploy the cluster
|
||||
4. Check sizes of the files again
|
||||
3. Create cluster
|
||||
4. Add 1 node with controller role
|
||||
5. Deploy the cluster
|
||||
6. Check sizes of the files again
|
||||
|
||||
Duration 45m
|
||||
"""
|
||||
|
|
|
@ -39,9 +39,9 @@ class TestMultipleClusterNets(TestBasic):
|
|||
1. Revert snapshot with 5 slaves
|
||||
2. Check that slaves got IPs via DHCP from both admin/pxe networks
|
||||
3. Make environment snapshot
|
||||
|
||||
Duration 6m
|
||||
Snapshot multiple_cluster_net_setup
|
||||
|
||||
"""
|
||||
|
||||
if not MULTIPLE_NETWORKS:
|
||||
|
|
|
@ -870,9 +870,9 @@ class HeatHAOneController(TestBasic):
|
|||
2. Add 1 node with controller role and mongo
|
||||
3. Add 1 nodes with compute role
|
||||
4. Set Ceilometer install option
|
||||
4. Deploy the cluster
|
||||
5. Verify Heat, Ceilometer services
|
||||
6. Run OSTF platform tests
|
||||
5. Deploy the cluster
|
||||
6. Verify Heat, Ceilometer services
|
||||
7. Run OSTF platform tests
|
||||
|
||||
Duration 40m
|
||||
Snapshot: deploy_heat_ha_one_controller_nova
|
||||
|
@ -958,13 +958,12 @@ class HeatHA(TestBasic):
|
|||
2. Add 3 node with controller role and mongo
|
||||
3. Add 1 nodes with compute role
|
||||
4. Set Ceilometer install option
|
||||
4. Deploy the cluster
|
||||
5. Verify Heat and Ceilometer services
|
||||
6. Run OSTF platform tests
|
||||
5. Deploy the cluster
|
||||
6. Verify Heat and Ceilometer services
|
||||
7. Run OSTF platform tests
|
||||
|
||||
Duration 70m
|
||||
Snapshot: deploy_heat_ha
|
||||
|
||||
"""
|
||||
|
||||
self.env.revert_snapshot("ready_with_5_slaves")
|
||||
|
|
|
@ -43,7 +43,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
logger.debug("slave kernel is {0}".format(kernel))
|
||||
return kernel
|
||||
|
||||
@test(groups=["upgrade_ha_one_controller"])
|
||||
@test(groups=["upgrade_ha_one_controller",
|
||||
"upgrade_ceph_ha_one_controller"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_ha_one_controller_env(self):
|
||||
"""Upgrade ha one controller deployed cluster with ceph
|
||||
|
@ -59,12 +60,9 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
8. Run OSTF
|
||||
|
||||
"""
|
||||
|
||||
# For upgrade jobs *from* 6.1, change snapshot name to
|
||||
# "ceph_ha_one_controller_compact"
|
||||
if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
|
||||
if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
|
||||
raise SkipTest()
|
||||
self.env.revert_snapshot("ceph_multinode_compact")
|
||||
self.env.revert_snapshot('ceph_ha_one_controller_compact')
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
|
@ -123,7 +121,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
|
||||
self.env.make_snapshot("upgrade_ha_one_controller")
|
||||
|
||||
@test(groups=["upgrade_ha_one_controller_delete_node"])
|
||||
@test(groups=["upgrade_ha_one_controller_delete_node",
|
||||
"upgrade_ceph_ha_one_controller"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_ha_one_controller_delete_node(self):
|
||||
"""Upgrade ha 1 controller deployed cluster with ceph and
|
||||
|
@ -140,12 +139,9 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
8. Run OSTF
|
||||
|
||||
"""
|
||||
|
||||
# For upgrade jobs *from* 6.1, change snapshot name to
|
||||
# "ceph_ha_one_controller_compact"
|
||||
if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
|
||||
if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
|
||||
raise SkipTest()
|
||||
self.env.revert_snapshot("ceph_multinode_compact")
|
||||
self.env.revert_snapshot('ceph_ha_one_controller_compact')
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
checkers.upload_tarball(self.env.d_env.get_admin_remote(),
|
||||
|
@ -190,7 +186,7 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
|
||||
self.env.make_snapshot("upgrade_ha_one_controller_delete_node")
|
||||
|
||||
@test(groups=["upgrade_ha"])
|
||||
@test(groups=["upgrade_ha", "upgrade_neutron_gre_ha"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_ha_env(self):
|
||||
"""Upgrade ha deployed cluster
|
||||
|
@ -285,7 +281,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
cluster_id=cluster_id)
|
||||
self.env.make_snapshot("upgrade_ha")
|
||||
|
||||
@test(groups=["deploy_ha_after_upgrade"])
|
||||
@test(groups=["deploy_ha_after_upgrade",
|
||||
"upgrade_ceph_ha_one_controller"])
|
||||
@log_snapshot_after_test
|
||||
def deploy_ha_after_upgrade(self):
|
||||
"""Upgrade and deploy new ha cluster
|
||||
|
@ -300,12 +297,9 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
7. Run OSTF
|
||||
|
||||
"""
|
||||
|
||||
# For upgrade jobs *from* 6.1, change snapshot name to
|
||||
# "ceph_ha_one_controller_compact"
|
||||
if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
|
||||
if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
|
||||
raise SkipTest()
|
||||
self.env.revert_snapshot("ceph_multinode_compact")
|
||||
self.env.revert_snapshot('ceph_ha_one_controller_compact')
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
available_releases_before = self.fuel_web.get_releases_list_for_os(
|
||||
|
@ -376,7 +370,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
cluster_id=cluster_id)
|
||||
self.env.make_snapshot("deploy_ha_after_upgrade")
|
||||
|
||||
@test(groups=["upgrade_fuel_after_rollback"])
|
||||
@test(groups=["upgrade_fuel_after_rollback",
|
||||
"rollback_neutron_gre"])
|
||||
@log_snapshot_after_test
|
||||
def upgrade_fuel_after_rollback(self):
|
||||
"""Upgrade Fuel after rollback and deploy new cluster
|
||||
|
@ -481,7 +476,7 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
class RollbackFuelMaster(base_test_data.TestBasic):
|
||||
"""RollbackFuelMaster.""" # TODO documentation
|
||||
|
||||
@test(groups=["rollback_automatic_ha"])
|
||||
@test(groups=["rollback_automatic_ha", "rollback_neutron_gre_ha"])
|
||||
@log_snapshot_after_test
|
||||
def rollback_automatically_ha_env(self):
|
||||
"""Rollback manually ha deployed cluster
|
||||
|
@ -543,7 +538,8 @@ class RollbackFuelMaster(base_test_data.TestBasic):
|
|||
|
||||
self.env.make_snapshot("rollback_automatic_ha")
|
||||
|
||||
@test(groups=["rollback_automatic_ha_one_controller"])
|
||||
@test(groups=["rollback_automatic_ha_one_controller",
|
||||
"rollback_ceph_ha_one_controller"])
|
||||
@log_snapshot_after_test
|
||||
def rollback_automatically_ha_one_controller_env(self):
|
||||
"""Rollback automatically ha one controller deployed cluster
|
||||
|
@ -560,10 +556,10 @@ class RollbackFuelMaster(base_test_data.TestBasic):
|
|||
9. Run OSTF
|
||||
|
||||
"""
|
||||
if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
|
||||
if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
|
||||
raise SkipTest()
|
||||
|
||||
self.env.revert_snapshot("ceph_multinode_compact")
|
||||
self.env.revert_snapshot('ceph_ha_one_controller_compact')
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
|
||||
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
|
||||
|
@ -616,7 +612,8 @@ class RollbackFuelMaster(base_test_data.TestBasic):
|
|||
|
||||
self.env.make_snapshot("rollback_automatic_ha_one_controller")
|
||||
|
||||
@test(groups=["rollback_automatically_delete_node"])
|
||||
@test(groups=["rollback_automatically_delete_node",
|
||||
"rollback_neutron_gre"])
|
||||
@log_snapshot_after_test
|
||||
def rollback_automatically_delete_node(self):
|
||||
"""Rollback automatically ha one controller deployed cluster
|
||||
|
|
|
@ -303,10 +303,9 @@ class VcenterDeploy(TestBasic):
|
|||
2. Configure CephOSD as backend for Glance and Cinder
|
||||
3. Add 3 nodes with Controller+CephOSD roles
|
||||
4. Add 2 nodes with compute role
|
||||
6. Deploy the cluster
|
||||
7. Run network verification
|
||||
8. Run OSTF
|
||||
|
||||
5. Deploy the cluster
|
||||
6. Run network verification
|
||||
7. Run OSTF
|
||||
"""
|
||||
self.env.revert_snapshot("ready_with_5_slaves")
|
||||
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,372 +0,0 @@
|
|||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
from proboscis.asserts import assert_not_equal
|
||||
from proboscis.asserts import assert_true
|
||||
from proboscis import factory
|
||||
from proboscis import SkipTest
|
||||
from proboscis import test
|
||||
|
||||
from fuelweb_test.helpers import checkers
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.helpers import packages_fixture
|
||||
from fuelweb_test.helpers import utils
|
||||
from fuelweb_test import settings as hlp_data
|
||||
from fuelweb_test.tests.base_test_case import TestBasic
|
||||
from fuelweb_test import logger
|
||||
|
||||
|
||||
@test(groups=["os_patching"])
|
||||
class TestPatch(TestBasic):
|
||||
"""TestPatch.""" # TODO documentation
|
||||
|
||||
def __init__(self, snapshot):
|
||||
super(TestPatch, self).__init__()
|
||||
self.snapshot = snapshot
|
||||
|
||||
@test
|
||||
@log_snapshot_after_test
|
||||
def deploy_and_patch(self):
|
||||
"""Update OS on reverted env
|
||||
|
||||
Scenario:
|
||||
1. Revert environment
|
||||
2. Upload tarball
|
||||
3. Check that it uploaded
|
||||
4. Extract data
|
||||
5. Get available releases
|
||||
6. Run upgrade script
|
||||
7. Check that new release appears
|
||||
8. Put new release into cluster
|
||||
9. Run cluster update
|
||||
10. Get cluster net configuration
|
||||
11. Check that services are restarted
|
||||
12. Check that packages are updated
|
||||
13. Run OSTF
|
||||
14. Create snapshot
|
||||
|
||||
"""
|
||||
logger.info("snapshot name is {0}".format(self.snapshot))
|
||||
|
||||
if not self.env.manager.devops_env.has_snapshot(self.snapshot):
|
||||
logger.error('There is no shaphot found {0}'.format(self.snapshot))
|
||||
raise SkipTest('Can not find snapshot {0}'.format(self.snapshot))
|
||||
|
||||
# 1. Revert environment
|
||||
self.env.revert_snapshot(self.snapshot)
|
||||
|
||||
logger.info("Start upload upgrade archive")
|
||||
node_ssh = self.env.d_env.get_ssh_to_remote(
|
||||
self.fuel_web.admin_node_ip)
|
||||
|
||||
# 2. Upload tarball
|
||||
checkers.upload_tarball(
|
||||
node_ssh=node_ssh, tar_path=hlp_data.TARBALL_PATH,
|
||||
tar_target='/var/tmp')
|
||||
|
||||
logger.info("Archive should upload. "
|
||||
"Lets check that it exists on master node ...")
|
||||
# 3. Check that it uploaded
|
||||
checkers.check_tarball_exists(node_ssh, os.path.basename(
|
||||
hlp_data.TARBALL_PATH), '/var/tmp')
|
||||
|
||||
logger.info("Extract archive to the /var/tmp")
|
||||
|
||||
# 4. Extract data
|
||||
checkers.untar(node_ssh, os.path.basename(
|
||||
hlp_data.TARBALL_PATH), '/var/tmp')
|
||||
|
||||
logger.info("Get release ids for deployed operation"
|
||||
" system before upgrade.")
|
||||
|
||||
# Get cluster nodes
|
||||
nailgun_nodes = [
|
||||
self.fuel_web.get_nailgun_node_by_devops_node(node)
|
||||
for node in self.env.d_env.nodes().slaves
|
||||
if self.fuel_web.get_nailgun_node_by_devops_node(node)]
|
||||
|
||||
# Try to remember installed nova-packages before update
|
||||
p_version_before = {}
|
||||
for node in nailgun_nodes:
|
||||
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
|
||||
res = checkers.get_package_versions_from_node(
|
||||
remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
|
||||
p_version_before[node["devops_name"]] = res
|
||||
|
||||
# 5. Get available releases
|
||||
available_releases_before = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
|
||||
logger.info('Time to run upgrade...')
|
||||
|
||||
# 6. Run upgrade script
|
||||
|
||||
checkers.run_script(node_ssh, '/var/tmp', 'upgrade.sh',
|
||||
password=hlp_data.KEYSTONE_CREDS['password'])
|
||||
logger.info('Check if the upgrade complete.')
|
||||
|
||||
checkers.wait_upgrade_is_done(node_ssh=node_ssh,
|
||||
phrase='*** UPGRADE DONE SUCCESSFULLY',
|
||||
timeout=600 * 10)
|
||||
|
||||
available_releases_after = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
|
||||
logger.info('release ids list after upgrade is {0}'.format(
|
||||
available_releases_after))
|
||||
# 7. Check that new release appears
|
||||
assert_true(
|
||||
len(available_releases_after) > len(available_releases_before),
|
||||
"There is no new release, release ids before {0},"
|
||||
" release ids after {1}". format(
|
||||
available_releases_before, available_releases_after))
|
||||
|
||||
release_version = hlp_data.RELEASE_VERSION
|
||||
logger.debug("Release version is {0}".format(release_version))
|
||||
|
||||
if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
|
||||
res = utils.get_yaml_to_json(
|
||||
node_ssh,
|
||||
'/etc/puppet/{0}/manifests/ubuntu-versions.yaml'.format(
|
||||
release_version))
|
||||
res_packages = json.loads(res[0])
|
||||
logger.debug('what we have in res_packages {0}'.format(
|
||||
res_packages))
|
||||
else:
|
||||
res = utils.get_yaml_to_json(
|
||||
node_ssh,
|
||||
'/etc/puppet/{0}/manifests/centos-versions.yaml'.format(
|
||||
release_version))
|
||||
res_packages = json.loads(res[0])
|
||||
logger.debug('what we have in res_packages {0}'.format(
|
||||
res_packages))
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
logger.debug("Cluster id is {0}".format(cluster_id))
|
||||
|
||||
# 8. Put new release into cluster
|
||||
if release_version:
|
||||
added_release = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE,
|
||||
release_version=release_version)
|
||||
logger.debug("Does we have here release id ? {0}".format(
|
||||
release_version))
|
||||
else:
|
||||
added_release = [id for id in available_releases_after
|
||||
if id not in available_releases_before]
|
||||
|
||||
# get nova pids on controller before update
|
||||
ssh_to_controller = self.fuel_web.get_ssh_for_node(
|
||||
[n["devops_name"] for n in nailgun_nodes
|
||||
if 'controller' in n['roles']][0])
|
||||
|
||||
nova_controller_services = ['nova-api', 'nova-cert',
|
||||
'nova-objectstore', 'nova-conductor',
|
||||
'nova-scheduler']
|
||||
|
||||
nova_pids_before = utils.nova_service_get_pid(
|
||||
ssh_to_controller, nova_controller_services)
|
||||
|
||||
logger.debug('Nova pids on controller before {0}'.format(
|
||||
nova_pids_before))
|
||||
|
||||
# 9. Run cluster update
|
||||
self.fuel_web.update_cluster(
|
||||
cluster_id=cluster_id,
|
||||
data={
|
||||
'pending_release_id': added_release[0],
|
||||
'release_id': self.fuel_web.get_cluster_release_id(
|
||||
cluster_id)})
|
||||
|
||||
logger.info('Huh all preparation for update are done.'
|
||||
'It is time to update cluster.')
|
||||
|
||||
self.fuel_web.run_update(cluster_id=cluster_id,
|
||||
timeout=hlp_data.UPDATE_TIMEOUT, interval=20)
|
||||
|
||||
# 10. Get cluster net configuration
|
||||
|
||||
cluster_net = self.fuel_web.client.get_cluster(
|
||||
cluster_id)['net_provider']
|
||||
|
||||
logger.debug('cluster net is {0}'.format(cluster_net))
|
||||
|
||||
# 11. Check is services are restarted
|
||||
if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
|
||||
utils.check_if_service_restarted_ubuntu(
|
||||
ssh_to_controller, ["keystone'",
|
||||
"glance-registry'",
|
||||
"glance-api'",
|
||||
"heat-api-cfn'",
|
||||
"heat-engine'",
|
||||
"heat-api'",
|
||||
"heat-api-cloudwatch'"])
|
||||
else:
|
||||
utils.check_if_service_restarted_centos(
|
||||
ssh_to_controller, ["keystone",
|
||||
"glance-registry",
|
||||
"glance-api",
|
||||
"heat-api-cfn",
|
||||
"heat-engine",
|
||||
"heat-api",
|
||||
"heat-api-cloudwatch",
|
||||
"nova-novncproxy"])
|
||||
|
||||
# get nova pids on controller after update
|
||||
nova_pids_after = utils.nova_service_get_pid(
|
||||
ssh_to_controller, nova_controller_services)
|
||||
|
||||
logger.debug('Nova pids on controller before {0}'.format(
|
||||
nova_pids_before))
|
||||
|
||||
assert_not_equal(nova_pids_before, nova_pids_after)
|
||||
|
||||
# 12. Check is packages are updated
|
||||
|
||||
if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
|
||||
for package in packages_fixture.dep:
|
||||
packages_fixture.dep[package] = res_packages[package]
|
||||
logger.debug("Current state of dict is {0}".format(
|
||||
packages_fixture.dep))
|
||||
for key in packages_fixture.dep:
|
||||
res = checkers.get_package_versions_from_node(
|
||||
ssh_to_controller, name=key, os_type='Ubuntu')
|
||||
logger.debug('res_from_node is {0}'.format(res))
|
||||
assert_true(
|
||||
packages_fixture.dep[key] in res,
|
||||
"Wrong version of package {0}. "
|
||||
"Should be {1} but get {2}".format(
|
||||
key, packages_fixture.dep[key], res))
|
||||
else:
|
||||
for package in packages_fixture.rpm:
|
||||
packages_fixture.rpm[package] = res_packages[package]
|
||||
logger.debug("Current state of dict is {0}".format(
|
||||
packages_fixture.rpm))
|
||||
for key in packages_fixture.rpm:
|
||||
res = checkers.get_package_versions_from_node(
|
||||
ssh_to_controller, name=key,
|
||||
os_type=hlp_data.OPENSTACK_RELEASE)
|
||||
assert_true(
|
||||
packages_fixture.rpm[key] in res,
|
||||
"Wrong version of package {0}. "
|
||||
"Should be {1} but get {2}".format(
|
||||
key, packages_fixture.rpm[key], res))
|
||||
p_version_after = {}
|
||||
for node in nailgun_nodes:
|
||||
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
|
||||
res = checkers.get_package_versions_from_node(
|
||||
remote=remote, name="openstack",
|
||||
os_type=hlp_data.OPENSTACK_RELEASE)
|
||||
p_version_after[node["devops_name"]] = res
|
||||
|
||||
logger.info("packages before {0}".format(p_version_before))
|
||||
logger.info("packages after {0}".format(p_version_after))
|
||||
assert_true(p_version_before != p_version_after)
|
||||
|
||||
# 13. Run OSTF
|
||||
self.fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
# 14. Create snapshot
|
||||
self.env.make_snapshot('{0}_and_patch'.format(self.snapshot))
|
||||
|
||||
# TODO (tleontovich) enable if rollback will be available
|
||||
# @test(depends_on=[deploy_and_patch])
|
||||
@log_snapshot_after_test
|
||||
def deploy_and_rollback(self):
|
||||
"""Rollback/Downgrade os on reverted env
|
||||
|
||||
Scenario:
|
||||
1. Revert patched environment
|
||||
2. Get release ids
|
||||
2. Identify release id for rollback/downgrade
|
||||
3. Run rollback/downgrade
|
||||
4. Check that operation was successful
|
||||
5. Run OSTF
|
||||
|
||||
"""
|
||||
|
||||
logger.info("snapshot name is {0}".format(self.snapshot))
|
||||
|
||||
if not self.env.manager.devops_env.has_snapshot(
|
||||
'{0}_and_patch'.format(self.snapshot)):
|
||||
raise SkipTest('Can not find snapshot {0}'.format(self.snapshot))
|
||||
|
||||
self.env.revert_snapshot('{0}_and_patch'.format(self.snapshot))
|
||||
|
||||
logger.info("Get release ids for deployed operation"
|
||||
" system before rollback..")
|
||||
|
||||
# Get cluster nodes
|
||||
nailgun_nodes = [
|
||||
self.fuel_web.get_nailgun_node_by_devops_node(node)
|
||||
for node in self.env.d_env.nodes().slaves
|
||||
if self.fuel_web.get_nailgun_node_by_devops_node(node)]
|
||||
|
||||
logger.info("Find next nodes {0}".format(nailgun_nodes))
|
||||
|
||||
# Try to remember installed nova-packages before update
|
||||
p_version_before = {}
|
||||
for node in nailgun_nodes:
|
||||
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
|
||||
res = checkers.get_package_versions_from_node(
|
||||
remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
|
||||
p_version_before[node["devops_name"]] = res
|
||||
|
||||
avail_release_ids = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
|
||||
logger.info('Available release ids before rollback {0}'.format(
|
||||
avail_release_ids))
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
cluster_release_id = self.fuel_web.get_cluster_release_id(
|
||||
cluster_id)
|
||||
|
||||
logger.info('Time to run rollback...')
|
||||
|
||||
self.fuel_web.update_cluster(
|
||||
cluster_id=cluster_id,
|
||||
data={
|
||||
'pending_release_id': [i for i in avail_release_ids
|
||||
if i != cluster_release_id][0],
|
||||
'release_id': self.fuel_web.get_cluster_release_id(
|
||||
cluster_id)})
|
||||
|
||||
self.fuel_web.run_update(cluster_id=cluster_id,
|
||||
timeout=hlp_data.UPDATE_TIMEOUT, interval=20)
|
||||
|
||||
# Check packages after
|
||||
|
||||
p_version_after = {}
|
||||
for node in nailgun_nodes:
|
||||
remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
|
||||
res = checkers.get_package_versions_from_node(
|
||||
remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
|
||||
p_version_after[node["devops_name"]] = res
|
||||
|
||||
logger.info("packages after {0}".format(p_version_after))
|
||||
logger.info("packages before {0}".format(p_version_before))
|
||||
|
||||
# TODO tleontovich: Add assert for packages, when test repo will avail
|
||||
|
||||
self.fuel_web.run_ostf(cluster_id=cluster_id,)
|
||||
|
||||
self.env.make_snapshot('{0}_and_rollback'.format(self.snapshot))
|
||||
|
||||
|
||||
@factory
|
||||
def generate_patch_tests():
|
||||
snap = hlp_data.SNAPSHOT.split(",")
|
||||
return [TestPatch(s) for s in snap]
|
|
@ -37,7 +37,7 @@ class TestHaNeutronFailover(TestHaFailoverBase):
|
|||
2. Add 3 nodes with controller roles
|
||||
3. Add 2 nodes with compute roles
|
||||
4. Deploy the cluster
|
||||
8. Make snapshot
|
||||
5. Make snapshot
|
||||
|
||||
Duration 70m
|
||||
Snapshot prepare_ha_neutron
|
||||
|
@ -211,7 +211,7 @@ class TestHaNeutronFailover(TestHaFailoverBase):
|
|||
def ha_neutron_virtual_router(self):
|
||||
"""Verify connection is present and
|
||||
downloading maintained by conntrackd
|
||||
after primary controller destroy
|
||||
after primary controller destroy
|
||||
|
||||
Scenario:
|
||||
1. SSH to compute node
|
||||
|
@ -234,8 +234,8 @@ class TestHaNeutronFailover(TestHaFailoverBase):
|
|||
1. SSH to controller
|
||||
2. set 5 % package loss on br-mgmt
|
||||
3. run ostf
|
||||
Duration
|
||||
|
||||
Duration
|
||||
"""
|
||||
# TODO enable test when fencing will be implements
|
||||
super(self.__class__, self).ha_controller_loss_packages()
|
||||
|
|
|
@ -40,7 +40,7 @@ class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
|
|||
3. Add 1 node with compute role
|
||||
4. Deploy the cluster
|
||||
5. Validate cluster was set up correctly, there are no dead
|
||||
services, there are no errors in logs
|
||||
services, there are no errors in logs
|
||||
6. Verify networks
|
||||
7. Verify network configuration on controller
|
||||
8. Run OSTF
|
||||
|
@ -48,7 +48,6 @@ class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
|
|||
10. Run openstack verification
|
||||
|
||||
Duration 1000m
|
||||
|
||||
"""
|
||||
self.env.revert_snapshot("ready_with_3_slaves")
|
||||
|
||||
|
|
|
@ -147,6 +147,7 @@ class TestNeutronFailover(base_test_case.TestBasic):
|
|||
6. Check network connectivity from instance via
|
||||
dhcp namespace
|
||||
7. Run OSTF
|
||||
|
||||
Duration 30m
|
||||
"""
|
||||
self.env.revert_snapshot("deploy_ha_neutron")
|
||||
|
|
|
@ -35,19 +35,18 @@ class CephRestart(TestBasic):
|
|||
@log_snapshot_after_test
|
||||
def ceph_ha_one_controller_with_cinder_restart(self):
|
||||
"""Restart cluster with ceph and cinder in ha mode
|
||||
|
||||
Scenario:
|
||||
|
||||
1. Create cluster in ha mode with 1 controller
|
||||
2. Add 1 node with controller and ceph OSD roles
|
||||
3. Add 1 node with compute role
|
||||
4. Add 2 nodes with cinder and ceph OSD roles
|
||||
5. Deploy the cluster
|
||||
7. Warm restart
|
||||
8. Check ceph status
|
||||
6. Warm restart
|
||||
7. Check ceph status
|
||||
|
||||
Duration 90m
|
||||
Snapshot None
|
||||
|
||||
"""
|
||||
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
|
||||
raise SkipTest()
|
||||
|
|
Loading…
Reference in New Issue