From 24d98abd3ebd3035f4d763df1e7424c90a2b8b91 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Wed, 18 Dec 2019 09:48:08 +0100 Subject: [PATCH] Retire repository Fuel (from openstack namespace) and fuel-ccp (in x namespace) repositories are unused and ready to retire. This change removes all content from the repository and adds the usual README file to point out that the repository is retired following the process from https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project See also http://lists.openstack.org/pipermail/openstack-discuss/2019-December/011647.html Depends-On: https://review.opendev.org/699362 Change-Id: I658efd028f1b23bc21b327fc55d39a7fa5824ec1 --- .gitignore | 14 - LICENSE | 176 -- MAINTAINERS | 66 - MANIFEST.in | 3 - README.md | 123 -- README.rst | 10 + etc/ostf/ostf.conf | 144 -- etc/tools/prepare_database.sh | 8 - etc/tools/prepare_settings.sh | 17 - fuel_health/__init__.py | 0 fuel_health/ceilometermanager.py | 482 ------ fuel_health/cleanup.py | 244 --- fuel_health/cloudvalidation.py | 51 - fuel_health/common/__init__.py | 0 fuel_health/common/facts.py | 72 - fuel_health/common/log.py | 117 -- fuel_health/common/ssh.py | 233 --- fuel_health/common/test_mixins.py | 220 --- fuel_health/common/utils/__init__.py | 4 - fuel_health/common/utils/data_utils.py | 75 - fuel_health/common/utils/misc.py | 25 - fuel_health/config.py | 977 ----------- fuel_health/etc/heat_autoscaling_neutron.yaml | 75 - fuel_health/etc/heat_autoscaling_nova.yaml | 71 - .../heat_create_neutron_stack_template.yaml | 24 - .../etc/heat_create_nova_stack_template.yaml | 20 - .../heat_update_neutron_stack_template.yaml | 33 - .../etc/heat_update_nova_stack_template.yaml | 26 - .../etc/heat_wait_condition_neutron.yaml | 103 -- fuel_health/etc/heat_wait_condition_nova.yaml | 66 - fuel_health/etc/server.txt | 0 fuel_health/etc/test.conf | 171 -- fuel_health/exceptions.py | 190 --- fuel_health/glancemanager.py | 129 -- fuel_health/ha_base.py | 571 ------- fuel_health/heatmanager.py | 274 --- fuel_health/hooks.py | 20 - fuel_health/ironicmanager.py | 125 -- fuel_health/manager.py | 29 - fuel_health/muranomanager.py | 481 ------ fuel_health/neutronmanager.py | 192 --- fuel_health/nmanager.py | 1477 ----------------- fuel_health/saharamanager.py | 227 --- fuel_health/test.py | 143 -- fuel_health/tests/__init__.py | 0 fuel_health/tests/cloudvalidation/__init__.py | 30 - .../cloudvalidation/test_disk_space_db.py | 65 - .../cloudvalidation/test_disk_space_outage.py | 53 - .../tests/cloudvalidation/test_keystone.py | 62 - .../tests/cloudvalidation/test_logrotate.py | 53 - .../cloudvalidation/test_vm_auto_start.py | 59 - fuel_health/tests/configuration/__init__.py | 30 - .../tests/configuration/test_configuration.py | 129 -- fuel_health/tests/ha/__init__.py | 30 - fuel_health/tests/ha/test_haproxy.py | 94 -- .../tests/ha/test_mysql_replication.py | 163 -- fuel_health/tests/ha/test_mysql_status.py | 219 --- fuel_health/tests/ha/test_pacemaker_status.py | 173 -- fuel_health/tests/ha/test_rabbit.py | 124 -- fuel_health/tests/sanity/__init__.py | 29 - .../tests/sanity/test_sanity_ceilometer.py | 55 - .../tests/sanity/test_sanity_compute.py | 106 -- .../tests/sanity/test_sanity_glance.py | 60 - fuel_health/tests/sanity/test_sanity_heat.py | 36 - .../tests/sanity/test_sanity_identity.py | 55 - .../sanity/test_sanity_infrastructure.py | 167 -- .../tests/sanity/test_sanity_ironic.py | 148 -- .../tests/sanity/test_sanity_murano.py | 90 - .../tests/sanity/test_sanity_networking.py | 60 - .../tests/sanity/test_sanity_sahara.py | 183 -- fuel_health/tests/smoke/__init__.py | 29 - fuel_health/tests/smoke/test_create_flavor.py | 50 - fuel_health/tests/smoke/test_create_images.py | 132 -- fuel_health/tests/smoke/test_create_volume.py | 201 --- fuel_health/tests/smoke/test_dpdk.py | 155 -- .../tests/smoke/test_live_migration.py | 170 -- .../tests/smoke/test_neutron_actions.py | 243 --- ..._nova_create_instance_with_connectivity.py | 366 ---- .../tests/smoke/test_nova_image_actions.py | 185 --- fuel_health/tests/smoke/test_user_create.py | 133 -- fuel_health/tests/smoke/test_vcenter.py | 576 ------- fuel_health/tests/tests_platform/__init__.py | 31 - .../io.murano.apps.Simple/Classes/Simple.yaml | 30 - .../io.murano.apps.Simple/UI/ui.yaml | 53 - .../io.murano.apps.Simple/manifest.yaml | 11 - .../tests/tests_platform/test_ceilometer.py | 552 ------ fuel_health/tests/tests_platform/test_heat.py | 889 ---------- .../tests/tests_platform/test_murano_linux.py | 553 ------ .../tests/tests_platform/test_sahara.py | 172 -- fuel_plugin/__init__.py | 0 fuel_plugin/consts.py | 43 - fuel_plugin/ostf_adapter/__init__.py | 0 fuel_plugin/ostf_adapter/config.py | 106 -- fuel_plugin/ostf_adapter/logger.py | 87 - fuel_plugin/ostf_adapter/mixins.py | 343 ---- fuel_plugin/ostf_adapter/nailgun_hooks.py | 88 - .../ostf_adapter/nose_plugin/__init__.py | 31 - .../ostf_adapter/nose_plugin/nose_adapter.py | 164 -- .../nose_plugin/nose_discovery.py | 122 -- .../nose_plugin/nose_storage_plugin.py | 143 -- .../nose_plugin/nose_test_runner.py | 36 - .../ostf_adapter/nose_plugin/nose_utils.py | 237 --- fuel_plugin/ostf_adapter/server.py | 84 - fuel_plugin/ostf_adapter/storage/__init__.py | 0 fuel_plugin/ostf_adapter/storage/alembic.ini | 49 - .../ostf_adapter/storage/alembic_cli.py | 39 - fuel_plugin/ostf_adapter/storage/engine.py | 48 - fuel_plugin/ostf_adapter/storage/fields.py | 45 - .../ostf_adapter/storage/migrations/README | 1 - .../storage/migrations/__init__.py | 0 .../ostf_adapter/storage/migrations/env.py | 90 - .../storage/migrations/script.py.mako | 22 - .../versions/36e3fd684a9e_versioning.py | 44 - .../5133b1e66258_pid_field_for_testru.py | 38 - .../versions/53af7c2d9ccc_initial.py | 118 -- .../54904076d82d_list_of_excl_testset.py | 43 - fuel_plugin/ostf_adapter/storage/models.py | 404 ----- fuel_plugin/ostf_adapter/wsgi/__init__.py | 0 .../ostf_adapter/wsgi/access_control.py | 26 - fuel_plugin/ostf_adapter/wsgi/app.py | 68 - fuel_plugin/ostf_adapter/wsgi/controllers.py | 197 --- fuel_plugin/ostf_adapter/wsgi/hooks.py | 61 - fuel_plugin/ostf_adapter/wsgi/root.py | 31 - fuel_plugin/ostf_client/__init__.py | 0 fuel_plugin/ostf_client/client.py | 209 --- fuel_plugin/testing/__init__.py | 0 fuel_plugin/testing/fixture/__init__.py | 0 .../testing/fixture/dummy_tests/__init__.py | 0 .../fixture/dummy_tests/config_test.py | 29 - .../dependent_testsets/__init__.py | 0 .../dependent_testsets/gemini_first_test.py | 36 - .../dependent_testsets/gemini_second_test.py | 36 - .../deployment_types_tests/__init__.py | 0 .../alternative_depl_tags_test.py | 38 - .../ha_deployment_test.py | 71 - .../multinode_deployment_test.py | 58 - .../fixture/dummy_tests/general_test.py | 76 - .../fixture/dummy_tests/stopped_test.py | 48 - .../dummy_tests/test_environment_variables.py | 35 - .../fixture/dummy_tests/test_versioning.py | 59 - .../fixture/dummy_tests/test_with_error.py | 54 - fuel_plugin/testing/tests/__init__.py | 0 fuel_plugin/testing/tests/base.py | 498 ------ .../testing/tests/functional/__init__.py | 0 fuel_plugin/testing/tests/functional/base.py | 140 -- fuel_plugin/testing/tests/functional/tests.py | 673 -------- .../testing/tests/integration/__init__.py | 0 .../tests/integration/test_models_methods.py | 507 ------ .../integration/test_wsgi_controllers.py | 306 ---- .../tests/integration/test_wsgi_interface.py | 76 - fuel_plugin/testing/tests/unit/__init__.py | 0 .../testing/tests/unit/test_nose_discovery.py | 253 --- .../testing/tests/unit/test_requirements.py | 19 - .../testing/tests/unit/test_results_logger.py | 75 - .../tests/unit/test_support_utilities.py | 206 --- ostf.service | 9 - pylintrc | 236 --- requirements.txt | 28 - run_tests.sh | 264 --- setup.cfg | 39 - setup.py | 28 - specs/fuel-ostf.spec | 140 -- test-requirements.txt | 7 - tools/test-setup.sh | 57 - tox.ini | 52 - 165 files changed, 10 insertions(+), 21472 deletions(-) delete mode 100644 .gitignore delete mode 100644 LICENSE delete mode 100644 MAINTAINERS delete mode 100644 MANIFEST.in delete mode 100644 README.md create mode 100644 README.rst delete mode 100644 etc/ostf/ostf.conf delete mode 100644 etc/tools/prepare_database.sh delete mode 100644 etc/tools/prepare_settings.sh delete mode 100644 fuel_health/__init__.py delete mode 100644 fuel_health/ceilometermanager.py delete mode 100755 fuel_health/cleanup.py delete mode 100644 fuel_health/cloudvalidation.py delete mode 100644 fuel_health/common/__init__.py delete mode 100644 fuel_health/common/facts.py delete mode 100644 fuel_health/common/log.py delete mode 100644 fuel_health/common/ssh.py delete mode 100644 fuel_health/common/test_mixins.py delete mode 100644 fuel_health/common/utils/__init__.py delete mode 100644 fuel_health/common/utils/data_utils.py delete mode 100644 fuel_health/common/utils/misc.py delete mode 100644 fuel_health/config.py delete mode 100644 fuel_health/etc/heat_autoscaling_neutron.yaml delete mode 100644 fuel_health/etc/heat_autoscaling_nova.yaml delete mode 100644 fuel_health/etc/heat_create_neutron_stack_template.yaml delete mode 100644 fuel_health/etc/heat_create_nova_stack_template.yaml delete mode 100644 fuel_health/etc/heat_update_neutron_stack_template.yaml delete mode 100644 fuel_health/etc/heat_update_nova_stack_template.yaml delete mode 100644 fuel_health/etc/heat_wait_condition_neutron.yaml delete mode 100644 fuel_health/etc/heat_wait_condition_nova.yaml delete mode 100644 fuel_health/etc/server.txt delete mode 100644 fuel_health/etc/test.conf delete mode 100644 fuel_health/exceptions.py delete mode 100644 fuel_health/glancemanager.py delete mode 100644 fuel_health/ha_base.py delete mode 100644 fuel_health/heatmanager.py delete mode 100644 fuel_health/hooks.py delete mode 100644 fuel_health/ironicmanager.py delete mode 100644 fuel_health/manager.py delete mode 100644 fuel_health/muranomanager.py delete mode 100644 fuel_health/neutronmanager.py delete mode 100644 fuel_health/nmanager.py delete mode 100644 fuel_health/saharamanager.py delete mode 100644 fuel_health/test.py delete mode 100644 fuel_health/tests/__init__.py delete mode 100644 fuel_health/tests/cloudvalidation/__init__.py delete mode 100644 fuel_health/tests/cloudvalidation/test_disk_space_db.py delete mode 100644 fuel_health/tests/cloudvalidation/test_disk_space_outage.py delete mode 100644 fuel_health/tests/cloudvalidation/test_keystone.py delete mode 100644 fuel_health/tests/cloudvalidation/test_logrotate.py delete mode 100644 fuel_health/tests/cloudvalidation/test_vm_auto_start.py delete mode 100644 fuel_health/tests/configuration/__init__.py delete mode 100644 fuel_health/tests/configuration/test_configuration.py delete mode 100644 fuel_health/tests/ha/__init__.py delete mode 100644 fuel_health/tests/ha/test_haproxy.py delete mode 100644 fuel_health/tests/ha/test_mysql_replication.py delete mode 100644 fuel_health/tests/ha/test_mysql_status.py delete mode 100644 fuel_health/tests/ha/test_pacemaker_status.py delete mode 100644 fuel_health/tests/ha/test_rabbit.py delete mode 100644 fuel_health/tests/sanity/__init__.py delete mode 100644 fuel_health/tests/sanity/test_sanity_ceilometer.py delete mode 100644 fuel_health/tests/sanity/test_sanity_compute.py delete mode 100644 fuel_health/tests/sanity/test_sanity_glance.py delete mode 100644 fuel_health/tests/sanity/test_sanity_heat.py delete mode 100644 fuel_health/tests/sanity/test_sanity_identity.py delete mode 100644 fuel_health/tests/sanity/test_sanity_infrastructure.py delete mode 100644 fuel_health/tests/sanity/test_sanity_ironic.py delete mode 100644 fuel_health/tests/sanity/test_sanity_murano.py delete mode 100644 fuel_health/tests/sanity/test_sanity_networking.py delete mode 100644 fuel_health/tests/sanity/test_sanity_sahara.py delete mode 100644 fuel_health/tests/smoke/__init__.py delete mode 100644 fuel_health/tests/smoke/test_create_flavor.py delete mode 100644 fuel_health/tests/smoke/test_create_images.py delete mode 100644 fuel_health/tests/smoke/test_create_volume.py delete mode 100644 fuel_health/tests/smoke/test_dpdk.py delete mode 100644 fuel_health/tests/smoke/test_live_migration.py delete mode 100644 fuel_health/tests/smoke/test_neutron_actions.py delete mode 100644 fuel_health/tests/smoke/test_nova_create_instance_with_connectivity.py delete mode 100644 fuel_health/tests/smoke/test_nova_image_actions.py delete mode 100644 fuel_health/tests/smoke/test_user_create.py delete mode 100644 fuel_health/tests/smoke/test_vcenter.py delete mode 100644 fuel_health/tests/tests_platform/__init__.py delete mode 100644 fuel_health/tests/tests_platform/io.murano.apps.Simple/Classes/Simple.yaml delete mode 100644 fuel_health/tests/tests_platform/io.murano.apps.Simple/UI/ui.yaml delete mode 100644 fuel_health/tests/tests_platform/io.murano.apps.Simple/manifest.yaml delete mode 100644 fuel_health/tests/tests_platform/test_ceilometer.py delete mode 100644 fuel_health/tests/tests_platform/test_heat.py delete mode 100644 fuel_health/tests/tests_platform/test_murano_linux.py delete mode 100644 fuel_health/tests/tests_platform/test_sahara.py delete mode 100644 fuel_plugin/__init__.py delete mode 100644 fuel_plugin/consts.py delete mode 100644 fuel_plugin/ostf_adapter/__init__.py delete mode 100644 fuel_plugin/ostf_adapter/config.py delete mode 100644 fuel_plugin/ostf_adapter/logger.py delete mode 100644 fuel_plugin/ostf_adapter/mixins.py delete mode 100644 fuel_plugin/ostf_adapter/nailgun_hooks.py delete mode 100644 fuel_plugin/ostf_adapter/nose_plugin/__init__.py delete mode 100644 fuel_plugin/ostf_adapter/nose_plugin/nose_adapter.py delete mode 100644 fuel_plugin/ostf_adapter/nose_plugin/nose_discovery.py delete mode 100644 fuel_plugin/ostf_adapter/nose_plugin/nose_storage_plugin.py delete mode 100644 fuel_plugin/ostf_adapter/nose_plugin/nose_test_runner.py delete mode 100644 fuel_plugin/ostf_adapter/nose_plugin/nose_utils.py delete mode 100644 fuel_plugin/ostf_adapter/server.py delete mode 100644 fuel_plugin/ostf_adapter/storage/__init__.py delete mode 100644 fuel_plugin/ostf_adapter/storage/alembic.ini delete mode 100644 fuel_plugin/ostf_adapter/storage/alembic_cli.py delete mode 100644 fuel_plugin/ostf_adapter/storage/engine.py delete mode 100644 fuel_plugin/ostf_adapter/storage/fields.py delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/README delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/__init__.py delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/env.py delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/script.py.mako delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/versions/36e3fd684a9e_versioning.py delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/versions/5133b1e66258_pid_field_for_testru.py delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/versions/53af7c2d9ccc_initial.py delete mode 100644 fuel_plugin/ostf_adapter/storage/migrations/versions/54904076d82d_list_of_excl_testset.py delete mode 100644 fuel_plugin/ostf_adapter/storage/models.py delete mode 100644 fuel_plugin/ostf_adapter/wsgi/__init__.py delete mode 100644 fuel_plugin/ostf_adapter/wsgi/access_control.py delete mode 100644 fuel_plugin/ostf_adapter/wsgi/app.py delete mode 100644 fuel_plugin/ostf_adapter/wsgi/controllers.py delete mode 100644 fuel_plugin/ostf_adapter/wsgi/hooks.py delete mode 100644 fuel_plugin/ostf_adapter/wsgi/root.py delete mode 100644 fuel_plugin/ostf_client/__init__.py delete mode 100644 fuel_plugin/ostf_client/client.py delete mode 100644 fuel_plugin/testing/__init__.py delete mode 100644 fuel_plugin/testing/fixture/__init__.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/__init__.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/config_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/__init__.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_first_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_second_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/__init__.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/alternative_depl_tags_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/ha_deployment_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/multinode_deployment_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/general_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/stopped_test.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/test_environment_variables.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/test_versioning.py delete mode 100644 fuel_plugin/testing/fixture/dummy_tests/test_with_error.py delete mode 100644 fuel_plugin/testing/tests/__init__.py delete mode 100644 fuel_plugin/testing/tests/base.py delete mode 100644 fuel_plugin/testing/tests/functional/__init__.py delete mode 100644 fuel_plugin/testing/tests/functional/base.py delete mode 100644 fuel_plugin/testing/tests/functional/tests.py delete mode 100644 fuel_plugin/testing/tests/integration/__init__.py delete mode 100644 fuel_plugin/testing/tests/integration/test_models_methods.py delete mode 100644 fuel_plugin/testing/tests/integration/test_wsgi_controllers.py delete mode 100644 fuel_plugin/testing/tests/integration/test_wsgi_interface.py delete mode 100644 fuel_plugin/testing/tests/unit/__init__.py delete mode 100644 fuel_plugin/testing/tests/unit/test_nose_discovery.py delete mode 100644 fuel_plugin/testing/tests/unit/test_requirements.py delete mode 100644 fuel_plugin/testing/tests/unit/test_results_logger.py delete mode 100644 fuel_plugin/testing/tests/unit/test_support_utilities.py delete mode 100644 ostf.service delete mode 100644 pylintrc delete mode 100644 requirements.txt delete mode 100755 run_tests.sh delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 specs/fuel-ostf.spec delete mode 100644 test-requirements.txt delete mode 100755 tools/test-setup.sh delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index cc57eb1d..00000000 --- a/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -.idea -*.pyc -*.log -nosetests.xml -*.egg-info -/*.egg -.tox -build -dist -*.out -.coverage -.ropeproject -*.swp -test_run diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/MAINTAINERS b/MAINTAINERS deleted file mode 100644 index ef1579db..00000000 --- a/MAINTAINERS +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: - For Fuel team structure and contribution policy, see [1]. - - This is repository level MAINTAINERS file. All contributions to this - repository must be approved by one or more Core Reviewers [2]. - If you are contributing to files (or create new directories) in - root folder of this repository, please contact Core Reviewers for - review and merge requests. - - If you are contributing to subfolders of this repository, please - check 'maintainers' section of this file in order to find maintainers - for those specific modules. - - It is mandatory to get +1 from one or more maintainers before asking - Core Reviewers for review/merge in order to decrease a load on Core Reviewers [3]. - Exceptions are when maintainers are actually cores, or when maintainers - are not available for some reason (e.g. on vacation). - - [1] https://specs.openstack.org/openstack/fuel-specs/policy/team-structure - [2] https://review.openstack.org/#/admin/groups/660,members - [3] http://lists.openstack.org/pipermail/openstack-dev/2015-August/072406.html - - Please keep this file in YAML format in order to allow helper scripts - to read this as a configuration data. - -maintainers: - -- fuel_plugin/: - - name: Artem Roma - email: aroma@mirantis.com - IRC: a_teem - -- fuel_health/: - - name: Vladimir Khlyunev - email: vkhlyunev@mirantis.com - IRC: vkhlyunev - - - name: Volodymyr Shypyguzov - email: vshypyguzov@mirantis.com - IRC: vshypyguzov - -- specs/: - - name: Mikhail Ivanov - email: mivanov@mirantis.com - IRC: mivanov - - - name: Artem Silenkov - email: asilenkov@mirantis.com - IRC: asilenkov - - - name: Alexander Tsamutali - email: atsamutali@mirantis.com - IRC: astsmtl - - - name: Daniil Trishkin - email: dtrishkin@mirantis.com - IRC: dtrishkin - - - name: Ivan Udovichenko - email: iudovichenko@mirantis.com - IRC: tlbr - - - name: Igor Yozhikov - email: iyozhikov@mirantis.com - IRC: IgorYozhikov diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 455d1943..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -recursive-include fuel_health * -recursive-include fuel_plugin * -include requirements.txt \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index 15746427..00000000 --- a/README.md +++ /dev/null @@ -1,123 +0,0 @@ -Team and repository tags -======================== - -[![Team and repository tags](http://governance.openstack.org/badges/fuel-ostf.svg)](http://governance.openstack.org/reference/tags/index.html) - - - -Fuel OSTF tests -=============== -After OpenStack installation via Fuel, it is very important to understand whether it was successful and if it is ready for work. Fuel-ostf provides a set of health checks to be run against from Fuel console check the proper operation of all system components in typical conditions. - -Details of Fuel OSTF tests -========================== -Tests are included to Fuel, so they will be accessible as soon as you install Fuel on your lab. Fuel ostf architecture is quite simple, it consists of two main packages: - - -On the other hand, there is some information necessary for test execution itself. There are several modules that gather information and parse them into objects which will be used in the tests themselves. -All information is gathered from Nailgun component. - -Python REST API interface -========================= -Fuel-ostf module provides not only testing, but also RESTful interface, a means for interaction with the components. - -In terms of REST, all types of OSTF entities are managed by three HTTP verbs: GET, POST and PUT. - -The following basic URL is used to make requests to OSTF: - - {ostf_host}:{ostf_port}/v1/{requested_entity}/{cluster_id} - -Currently, you can get information about testsets, tests and testruns via GET request on corresponding URLs for ostf_plugin. - -To get information about testsets, make the following GET request on: - - {ostf_host}:{ostf_port}/v1/testsets/{cluster_id} - -To get information about tests, make GET request on: - - {ostf_host}:{ostf_port}/v1/tests/{cluster_id} - -To get information about executed tests, make the following GET requests: - -for the whole set of testruns: - - {ostf_host}:{ostf_port}/v1/testruns/ - -for the particular testrun: - - {ostf_host}:{ostf_port}/v1/testruns/{testrun_id} - -for the list of testruns executed on the particular cluster: - - {ostf_host}:{ostf_port}/v1/testruns/last/{cluster_id} - -To start test execution, make the following POST request on this URL: - - {ostf_host}:{ostf_port}/v1/testruns/ - - -The body must consist of JSON data structure with testsets and the list of tests belonging to it that must be executed. It should also have metadata with the information about the cluster (the key with the “cluster_id” name is used to store the parameter’s value): - - [ - { - "testset": "test_set_name", - "tests": ["module.path.to.test.1", ..., "module.path.to.test.n"], - "metadata": {"cluster_id": id} - }, - - ..., - - {...}, # info for another testrun - {...}, - - ..., - - {...} - ] - -If succeeded, OSTF adapter returns attributes of created testrun entities in JSON format. If you want to launch only one test, put its id into the list. To launch all tests, leave the list empty (by default). Example of the response: - - [ - { - "status": "running", - "testset": "sanity", - "meta": null, - "ended_at": "2014-12-12 15:31:54.528773", - "started_at": "2014-12-12 15:31:41.481071", - "cluster_id": 1, - "id": 1, - "tests": [.....info on tests.....] - }, - - .... - ] - -You can also stop and restart testruns. To do that, make a PUT request on testruns. The request body must contain the list of the testruns and tests to be stopped or restarted. Example: - - [ - { - "id": test_run_id, - "status": ("stopped" | "restarted"), - "tests": ["module.path.to.test.1", ..., "module.path.to.test.n"] - }, - - ..., - - {...}, # info for another testrun - {...}, - - ..., - - {...} - ] - - -Testing -========== -There are next test targets that can be run to validate the code. - - tox -e pep8 - style guidelines enforcement - tox -e py27 - unit and integration testing diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..86e34d67 --- /dev/null +++ b/README.rst @@ -0,0 +1,10 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/etc/ostf/ostf.conf b/etc/ostf/ostf.conf deleted file mode 100644 index 288f1b68..00000000 --- a/etc/ostf/ostf.conf +++ /dev/null @@ -1,144 +0,0 @@ -[adapter] -server_host = 127.0.0.1 -server_port = 8777 -dbpath = postgresql+psycopg2://ostf:ostf@localhost/ostf -lock_dir = /var/lock -nailgun_host = 127.0.0.1 -nailgun_port = 8000 -log_file = /var/log/ostf.log -after_init_hook = False -auth_enable = False - -[keystone_authtoken] - -# -# Options defined in keystonemiddleware.auth_token -# - -# Prefix to prepend at the beginning of the path. Deprecated, -# use identity_uri. (string value) -#auth_admin_prefix= - -# Host providing the admin Identity API endpoint. Deprecated, -# use identity_uri. (string value) -#auth_host=127.0.0.1 - -# Port of the admin Identity API endpoint. Deprecated, use -# identity_uri. (integer value) -#auth_port=35357 - -# Protocol of the admin Identity API endpoint (http or https). -# Deprecated, use identity_uri. (string value) -#auth_protocol=https - -# Complete public Identity API endpoint (string value) -#auth_uri= - -# Complete admin Identity API endpoint. This should specify -# the unversioned root endpoint eg. https://localhost:35357/ -# (string value) -#identity_uri= - -# API version of the admin Identity API endpoint (string -# value) -#auth_version= - -# Do not handle authorization requests within the middleware, -# but delegate the authorization decision to downstream WSGI -# components (boolean value) -#delay_auth_decision=false - -# Request timeout value for communicating with Identity API -# server. (boolean value) -#http_connect_timeout= - -# How many times are we trying to reconnect when communicating -# with Identity API Server. (integer value) -#http_request_max_retries=3 - -# Single shared secret with the Keystone configuration used -# for bootstrapping a Keystone installation, or otherwise -# bypassing the normal authentication process. (string value) -#admin_token= - -# Keystone account username (string value) -#admin_user= - -# Keystone account password (string value) -#admin_password= - -# Keystone service account tenant name to validate user tokens -# (string value) -#admin_tenant_name=admin - -# Env key for the swift cache (string value) -#cache= - -# Required if Keystone server requires client certificate -# (string value) -#certfile= - -# Required if Keystone server requires client certificate -# (string value) -#keyfile= - -# A PEM encoded Certificate Authority to use when verifying -# HTTPs connections. Defaults to system CAs. (string value) -#cafile= - -# Verify HTTPS connections. (boolean value) -#insecure=false - -# Directory used to cache files related to PKI tokens (string -# value) -#signing_dir= - -# Optionally specify a list of memcached server(s) to use for -# caching. If left undefined, tokens will instead be cached -# in-process. (list value) -# Deprecated group/name - [DEFAULT]/memcache_servers -#memcached_servers= - -# In order to prevent excessive effort spent validating -# tokens, the middleware caches previously-seen tokens for a -# configurable duration (in seconds). Set to -1 to disable -# caching completely. (integer value) -#token_cache_time=300 - -# Determines the frequency at which the list of revoked tokens -# is retrieved from the Identity service (in seconds). A high -# number of revocation events combined with a low cache -# duration may significantly reduce performance. (integer -# value) -#revocation_cache_time=300 - -# (optional) if defined, indicate whether token data should be -# authenticated or authenticated and encrypted. Acceptable -# values are MAC or ENCRYPT. If MAC, token data is -# authenticated (with HMAC) in the cache. If ENCRYPT, token -# data is encrypted and authenticated in the cache. If the -# value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -#memcache_security_strategy= - -# (optional, mandatory if memcache_security_strategy is -# defined) this string is used for key derivation. (string -# value) -#memcache_secret_key= - -# (optional) indicate whether to set the X-Service-Catalog -# header. If False, middleware will not ask for service -# catalog on token validation and will not set the X-Service- -# Catalog header. (boolean value) -#include_service_catalog=true - -# Used to control the use and type of token binding. Can be -# set to: "disabled" to not check token binding. "permissive" -# (default) to validate binding information if the bind type -# is of a form known to the server and ignore it if not. -# "strict" like "permissive" but if the bind type is unknown -# the token will be rejected. "required" any form of token -# binding is needed to be allowed. Finally the name of a -# binding method that must be present in tokens. (string -# value) -#enforce_token_bind=permissive diff --git a/etc/tools/prepare_database.sh b/etc/tools/prepare_database.sh deleted file mode 100644 index c6c939a7..00000000 --- a/etc/tools/prepare_database.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -echo "Preparing pgpass file ${DB_ROOTPGPASS}" -echo "*:*:*:${OSTF_DB_ROOT}:${OSTF_DB_ROOTPW}" > ${OSTF_DB_ROOTPGPASS} -chmod 600 ${OSTF_DB_ROOTPGPASS} - -export PGPASSFILE=${OSTF_DB_ROOTPGPASS} -cat $PGPASSFIL diff --git a/etc/tools/prepare_settings.sh b/etc/tools/prepare_settings.sh deleted file mode 100644 index 261e5e46..00000000 --- a/etc/tools/prepare_settings.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -cat > ${CUSTOM_OSTF_CONFIG} < 0: - return True # All good. - LOG.debug("Waiting for while metrics will available.") - - if not fuel_health.test.call_until_true(check_status, 600, 10): - - self.fail("Timed out waiting to become alarm") - else: - return self.ceilometer_client.statistics.list(meter_name, q=query, - period=period) - - def wait_for_ceilo_objects(self, object_list, query, ceilo_obj_type): - for obj in object_list: - self.wait_for_object_sample(obj, query, ceilo_obj_type) - - def create_image_sample(self, image_id): - sample = self.ceilometer_client.samples.create( - resource_id=image_id, counter_name='image', counter_type='delta', - counter_unit='image', counter_volume=1, - resource_metadata={'user': 'example_metadata'}) - return sample - - def get_samples_count(self, meter_name, query): - return self.ceilometer_client.statistics.list( - meter_name=meter_name, q=query)[0].count - - def wait_samples_count(self, meter_name, query, count): - - def check_count(): - new_count = self.get_samples_count(meter_name, query) - return new_count > count - - if not fuel_health.test.call_until_true(check_count, 60, 1): - self.fail('Count of samples list isn\'t ' - 'greater than expected value') - - def check_event_type(self, event_type): - event_list = [event.event_type for event - in self.ceilometer_client.event_types.list()] - if event_type not in event_list: - self.fail('"{event_type}" not found in event type list.'.format( - event_type=event_type)) - - def check_event_message_id(self, events_list, instance_id): - for event in events_list: - try: - if next(x['value'] for x in event.traits - if x['name'] == "instance_id") == instance_id: - return event.message_id - except StopIteration: - self.fail('Trait "instance_id" not found in trait list.') - self.fail('No events found for "{instance_id}" instance.'.format( - instance_id=instance_id)) - - def check_traits(self, event_type, traits): - trait_desc = [desc.name for desc in - self.ceilometer_client.trait_descriptions.list( - event_type)] - for trait in traits: - if trait not in trait_desc: - self.fail('Trait "{trait}" not found in trait list.'.format( - trait=trait)) - - def identity_helper(self): - user_pass = rand_name("ceilo-user-pass") - user_name = rand_name("ceilo-user-update") - tenant_name = rand_name("ceilo-tenant-update") - tenant = self.identity_client.tenants.create(rand_name("ceilo-tenant")) - self.objects_for_delete.append(( - self.identity_client.tenants.delete, tenant)) - self.identity_client.tenants.update(tenant.id, name=tenant_name) - user = self.identity_client.users.create( - rand_name("ceilo-user"), user_pass, tenant.id) - self.objects_for_delete.append(( - self.identity_client.users.delete, user)) - self.identity_client.users.update(user, name=user_name) - role = self.identity_v3_client.roles.create(rand_name("ceilo-role")) - self.identity_v3_client.roles.update( - role, user=user.id, project=tenant.id) - self.identity_v3_client.roles.grant( - role, user=user.id, project=tenant.id) - self.objects_for_delete.append(( - self.identity_client.roles.delete, role)) - user_client = self.manager_class()._get_identity_client( - user_name, user_pass, tenant_name, 3) - trust = user_client.trusts.create( - self.identity_v3_client.user_id, user.id, [role.name], tenant.id) - self.objects_for_delete.append((user_client.trusts.delete, trust)) - group = self.identity_v3_client.groups.create(rand_name("ceilo-group")) - self.objects_for_delete.append(( - self.identity_v3_client.groups.delete, group)) - self.identity_v3_client.groups.update( - group, name=rand_name("ceilo-group-update")) - self.identity_v3_client.groups.delete(group) - user_client.trusts.delete(trust) - self.identity_v3_client.roles.revoke( - role, user=user.id, project=tenant.id) - self.identity_client.roles.delete(role) - self.identity_client.users.delete(user) - self.identity_client.tenants.delete(tenant) - return tenant, user, role, group, trust - - def neutron_helper(self): - net = self.neutron_client.create_network( - {"network": {"name": rand_name("ceilo-net")}})["network"] - self.addCleanup(self.cleanup_resources, - [(self.neutron_client.delete_network, net["id"])]) - self.neutron_client.update_network( - net["id"], {"network": {"name": rand_name("ceilo-net-update")}}) - - subnet = self.neutron_client.create_subnet( - {"subnet": {"name": rand_name("ceilo-subnet"), - "network_id": net["id"], - "ip_version": 4, - "cidr": "10.0.7.0/24"}})["subnet"] - self.addCleanup(self.cleanup_resources, - [(self.neutron_client.delete_subnet, subnet["id"])]) - self.neutron_client.update_subnet( - subnet["id"], {"subnet": {"name": rand_name("ceilo-subnet")}}) - - port = self.neutron_client.create_port({ - "port": {"name": rand_name("ceilo-port"), - "network_id": net["id"]}})['port'] - self.addCleanup(self.cleanup_resources, - [(self.neutron_client.delete_port, port["id"])]) - self.neutron_client.update_port( - port["id"], {"port": {"name": rand_name("ceilo-port-update")}}) - - router = self.neutron_client.create_router( - {"router": {"name": rand_name("ceilo-router")}})['router'] - self.addCleanup(self.cleanup_resources, - [(self.neutron_client.delete_router, router["id"])]) - self.neutron_client.update_router( - router["id"], - {"router": {"name": rand_name("ceilo-router-update")}}) - - external_network = self.find_external_network() - try: - body = { - "floatingip": { - "floating_network_id": external_network["id"] - } - } - fl_ip = self.neutron_client.create_floatingip(body)["floatingip"] - except neutron_exc.IpAddressGenerationFailureClient: - self.fail('No more IP addresses available on external network.') - self.addCleanup(self.cleanup_resources, - [(self.neutron_client.delete_floatingip, fl_ip["id"])]) - self.neutron_client.update_floatingip( - fl_ip["id"], {"floatingip": {"port_id": None}}) - - self.neutron_client.delete_floatingip(fl_ip["id"]) - self.neutron_client.delete_router(router["id"]) - self.neutron_client.delete_port(port["id"]) - self.neutron_client.delete_subnet(subnet["id"]) - self.neutron_client.delete_network(net["id"]) - - return net, subnet, port, router, fl_ip - - def sahara_helper(self, image_id, plugin_name, hadoop_version): - # Find flavor id for sahara instances - flavor_id = next( - flavor.id for flavor in - self.compute_client.flavors.list() if flavor.name == 'm1.small') - - private_net_id, floating_ip_pool = self.create_network_resources() - # Create json for node group - node_group = {'name': 'all-in-one', - 'flavor_id': flavor_id, - 'node_processes': ['nodemanager', 'datanode', - 'resourcemanager', 'namenode', - 'historyserver'], - 'count': 1, - 'auto_security_group': True} - if floating_ip_pool: - node_group['floating_ip_pool'] = floating_ip_pool - - # Create json for Sahara cluster - cluster_json = {'name': rand_name("ceilo-cluster"), - 'plugin_name': plugin_name, - 'hadoop_version': hadoop_version, - 'default_image_id': image_id, - 'cluster_configs': {'HDFS': {'dfs.replication': 1}}, - 'node_groups': [node_group], - 'net_id': private_net_id} - - # Create Sahara cluster - cluster = self.sahara_client.clusters.create(**cluster_json) - self.addCleanup( - self.delete_resource, - delete_method=lambda: self.sahara_client.clusters.delete( - cluster.id), - get_method=lambda: self.sahara_client.clusters.get(cluster.id)) - - # Wait for change cluster state for metric: cluster.update - def check_status(): - cluster_state = self.sahara_client.clusters.get(cluster.id).status - return cluster_state in ['Waiting', 'Active', 'Error'] - fuel_health.test.call_until_true(check_status, 300, 1) - - # Delete cluster - self.sahara_client.clusters.delete(cluster.id) - - return cluster - - def glance_helper(self): - image = self.glance_client.images.create( - name=rand_name('ostf-ceilo-image')) - self.objects_for_delete.append((self.glance_client.images.delete, - image.id)) - self.glance_client.images.update(image.id, data='data', - disk_format='qcow2', - container_format='bare') - self.glance_client.images.upload(image.id, 'upload_data') - self.glance_client.images.data(image.id) - self.glance_client.images.delete(image.id) - return image - - def volume_helper(self, instance): - device = '/dev/vdb' - # Create a volume - volume = self.volume_client.volumes.create( - name=rand_name('ost1_test-ceilo-volume'), size=1) - self.addCleanup( - self.delete_resource, - delete_method=lambda: self.volume_client.volumes.delete(volume), - get_method=lambda: self.volume_client.volumes.get(volume.id)) - # Wait for "Available" status of the volume - self.wait_for_resource_status( - self.volume_client.volumes, volume.id, 'available') - # Resize the volume - self.volume_client.volumes.extend(volume, 2) - self.wait_for_resource_status( - self.volume_client.volumes, volume.id, 'available') - # Create a volume snapshot - snapshot = self.volume_client.volume_snapshots.create( - volume.id, name=rand_name('ost1_test-')) - self.addCleanup( - self.delete_resource, - delete_method=lambda: self.volume_client.volume_snapshots.delete( - snapshot), - get_method=lambda: self.volume_client.volume_snapshots.get( - snapshot.id)) - # Wait for "Available" status of the snapshot - self.wait_for_resource_status( - self.volume_client.volume_snapshots, snapshot.id, 'available') - # Update the volume name - self.volume_client.volumes.update(volume, name="ost1_test-update") - # Attach the volume to the instance - self.volume_client.volumes.attach(volume.id, instance.id, device) - # Detach the volume from the instance - self.volume_client.volumes.detach(volume.id) - # Delete the volume snapshot - self.delete_resource( - delete_method=lambda: self.volume_client.volume_snapshots.delete( - snapshot), - get_method=lambda: self.volume_client.volume_snapshots.get( - snapshot.id)) - # Delete the volume - self.delete_resource( - delete_method=lambda: self.volume_client.volumes.delete(volume), - get_method=lambda: self.volume_client.volumes.get(volume.id)) - return volume, snapshot - - @staticmethod - def cleanup_resources(object_list): - for method, resource in object_list: - try: - method(resource) - except Exception: - LOG.exception("") - - @classmethod - def tearDownClass(cls): - if cls.manager.clients_initialized: - cls.cleanup_resources(cls.objects_for_delete) - super(CeilometerBaseTest, cls).tearDownClass() diff --git a/fuel_health/cleanup.py b/fuel_health/cleanup.py deleted file mode 100755 index a947e6b6..00000000 --- a/fuel_health/cleanup.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env python -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys -import time - -path = os.getcwd() -sys.path.append(path) - -import logging -import requests - -from fuel_health import exceptions -import fuel_health.nmanager - - -LOG = logging.getLogger(__name__) - - -class CleanUpClientManager(fuel_health.nmanager.OfficialClientManager): - """Manager that provides access to the official python clients for - calling various OpenStack APIs. - """ - - def wait_for_server_termination(self, server, ignore_error=False): - """Waits for server to reach termination.""" - start_time = int(time.time()) - while True: - try: - self._get_compute_client().servers.get(server) - except exceptions.NotFound: - return - - server_status = server.status - if server_status == 'ERROR' and not ignore_error: - raise exceptions.BuildErrorException(server_id=server.id) - - if int(time.time()) - start_time >= self.build_timeout: - raise exceptions.TimeoutException - - time.sleep(self.build_interval) - - -def cleanup(cluster_deployment_info): - """Function performs cleaning up for current cluster. - - Because clusters can be deployed in different way - function uses cluster_deployment_info argument which - contains list of deployment tags of needed cluster. - - This approach that consists in using one cleanup function - for all possible testsets is not so good because of - constant checking of component presence in deployment info. - - More better way is to create separate functions for each - set of tests so refactoring of this chunk of code is higly - appreciated. - """ - manager = CleanUpClientManager() - - if 'sahara' in cluster_deployment_info: - try: - sahara_client = manager._get_sahara_client() - if sahara_client is not None: - _delete_it(client=sahara_client.clusters, - log_message='Start sahara cluster deletion', - name='ostf-test-', delete_type='id') - _delete_it(client=sahara_client.cluster_templates, - log_message='Start sahara cluster' - ' template deletion', - delete_type='id') - _delete_it(client=sahara_client.node_group_templates, - log_message='Start sahara node' - ' group template deletion', - delete_type='id') - except Exception: - LOG.exception('Failed sahara cluster cleanup') - - if 'murano' in cluster_deployment_info: - try: - murano_client = manager._get_murano_client() - compute_client = manager._get_compute_client() - - if murano_client is not None: - endpoint = manager.config.murano.api_url + '/v1/' - headers = {'X-Auth-Token': murano_client.auth_token, - 'content-type': 'application/json'} - environments = requests.get(endpoint + 'environments', - headers=headers).json() - for e in environments["environments"]: - if e['name'].startswith('ostf_test-'): - try: - LOG.info('Start environment deletion.') - requests.delete('{0}environments/{1}'.format( - endpoint, e['id']), headers=headers) - except Exception: - LOG.exception('Failed to delete murano \ - environment') - - if compute_client is not None: - flavors = compute_client.flavors.list() - for flavor in flavors: - if 'ostf_test_Murano' in flavor.name: - try: - LOG.info('Start flavor deletion.') - compute_client.flavors.delete(flavor.id) - except Exception: - LOG.exception('Failed to delete flavor') - - except Exception: - LOG.exception('Failed murano cluster cleanup') - - if 'ceilometer' in cluster_deployment_info: - try: - ceilometer_client = manager._get_ceilometer_client() - if ceilometer_client is not None: - alarms = ceilometer_client.alarms.list() - for a in alarms: - if a.name.startswith('ost1_test-'): - try: - LOG.info('Start alarms deletion.') - ceilometer_client.alarms.delete(a.id) - except Exception as exc: - LOG.debug(exc) - except Exception as exc: - LOG.warning('Something wrong with ceilometer client. ' - 'Exception: {0}'.format(exc)) - - if 'heat' in cluster_deployment_info: - try: - heat_client = manager._get_heat_client() - if heat_client is not None: - stacks = heat_client.stacks.list() - for s in stacks: - if s.stack_name.startswith('ost1_test-'): - try: - LOG.info('Start stacks deletion.') - heat_client.stacks.delete(s.id) - except Exception: - LOG.exception('Failed stacks deletion') - except Exception: - LOG.exception('Failed during heat cluster deletion') - - if 'ironic' in cluster_deployment_info: - try: - ironic_client = manager._get_ironic_client() - if ironic_client is not None: - nodes = ironic_client.node.list() - for n in nodes: - if "NodeTest" in ironic_client.node.extra.items(): - try: - LOG.info('Start nodes deletion.') - ironic_client.node.delete(n.uuid) - except Exception as exc: - LOG.debug(exc) - except Exception as exc: - LOG.warning('Something wrong with ironic client. ' - 'Exception: {0}'.format(exc)) - - instances_id = [] - servers = manager._get_compute_client().servers.list() - floating_ips = manager._get_compute_client().floating_ips.list() - - if servers: - for s in servers: - if s.name.startswith('ost1_test-'): - instances_id.append(s.id) - for f in floating_ips: - if f.instance_id in instances_id: - try: - LOG.info('Delete floating ip {0}'.format(f.ip)) - manager._get_compute_client().floating_ips.delete( - f.id) - except Exception: - LOG.exception('Failed during floating ip delete') - try: - LOG.info('Delete server with name {0}'.format(s.name)) - manager._get_compute_client().servers.delete(s.id) - except Exception: - LOG.exception("") - else: - LOG.info('No servers found') - - for s in servers: - try: - LOG.info('Wait for server terminations') - manager.wait_for_server_termination(s) - except Exception: - LOG.exception('Failure on waiting for server termination') - - _delete_it(manager._get_compute_client().keypairs, - 'Start keypair deletion') - _delete_it(manager._get_identity_client().users, 'Start deletion of users') - _delete_it(manager._get_identity_client().tenants, 'Start tenant deletion') - roles = manager._get_identity_client().roles.list() - if roles: - _delete_it(manager._get_identity_client().roles, - 'Start roles deletion') - else: - LOG.info('no roles') - _delete_it(manager._get_compute_client().images, 'Start images deletion') - _delete_it(manager._get_volume_client().volumes, 'Start volumes deletion') - _delete_it(manager._get_compute_client().flavors, 'start flavors deletion') - _delete_it(manager._get_volume_client().volume_types, - 'start deletion of volume types') - _delete_it(manager._get_compute_client().security_groups, - 'Start deletion of security groups', delete_type='id') - - -def _delete_it(client, log_message, name='ost1_test-', delete_type='name'): - try: - for item in client.list(): - try: - if item.name.startswith(name): - try: - LOG.info(log_message) - if delete_type == 'name': - client.delete(item) - else: - client.delete(item.id) - except Exception: - LOG.exception("") - except AttributeError: - if item.display_name.startswith(name): - client.delete(item) - except Exception: - LOG.exception("") - - -if __name__ == "__main__": - cleanup() diff --git a/fuel_health/cloudvalidation.py b/fuel_health/cloudvalidation.py deleted file mode 100644 index 131f5959..00000000 --- a/fuel_health/cloudvalidation.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -LOG = logging.getLogger(__name__) - -from fuel_health.common import ssh - -from fuel_health import nmanager - - -class CloudValidationTest(nmanager.OfficialClientTest): - """Base class for Cloud validation tests.""" - - @classmethod - def setUpClass(cls): - super(CloudValidationTest, cls).setUpClass() - if cls.manager.clients_initialized: - cls.controllers = cls.config.compute.online_controllers - cls.computes = cls.config.compute.online_computes - cls.usr = cls.config.compute.controller_node_ssh_user - cls.pwd = cls.config.compute.controller_node_ssh_password - cls.key = cls.config.compute.path_to_private_key - cls.timeout = cls.config.compute.ssh_timeout - - def setUp(self): - super(CloudValidationTest, self).setUp() - self.check_clients_state() - - def _run_ssh_cmd(self, host, cmd): - """Open SSH session with host and execute command.""" - try: - sshclient = ssh.Client(host, self.usr, self.pwd, - key_filename=self.key, timeout=self.timeout) - return sshclient.exec_longrun_command(cmd) - except Exception: - LOG.exception('Failure on ssh run cmd') - self.fail("%s command failed." % cmd) diff --git a/fuel_health/common/__init__.py b/fuel_health/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_health/common/facts.py b/fuel_health/common/facts.py deleted file mode 100644 index dc196ea6..00000000 --- a/fuel_health/common/facts.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import yaml - - -class Facts: - __default_config_path = '/var/lib/puppet/yaml/facts/' - - def __init__(self, config=None): - _config_path = config or self.__default_config_path - self.config = self._read_config(_config_path) - - @property - def amqp(self): - _amqp = self._get_rabbit_data() or self._get_qpid_data() - return _amqp - - @property - def amqp_user(self): - return 'nova' - - @property - def amqp_password(self): - return self.amqp['password'] - - def _read_config(self, path): - _file = None - for file in os.listdir(path): - if file.endswith('.yaml'): - _file = file - break - _file = open(os.path.join(path, _file)) - self._init_parser() - data = yaml.load(_file) - _file.close() - return data - - def _get_rabbit_data(self): - try: - return self.config['values']['rabbit'] - except KeyError: - return None - - def _get_qpid_data(self): - try: - return self.config['values']['qpid'] - except KeyError: - return None - - def _init_parser(self): - # Custom YAML constructs for ruby objects for puppet files parsing - def _construct_ruby_object(loader, suffix, node): - return loader.construct_yaml_map(node) - - def _construct_ruby_sym(loader, suffix, node): - return loader.construct_yaml_str(node) - - yaml.add_multi_constructor(u"!ruby/object:", _construct_ruby_object) - yaml.add_multi_constructor(u"!ruby/sym", _construct_ruby_sym) diff --git a/fuel_health/common/log.py b/fuel_health/common/log.py deleted file mode 100644 index b34015a4..00000000 --- a/fuel_health/common/log.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2013 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ConfigParser -import inspect -import logging -import logging.config -import os -import re - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - - -_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" -_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - -_loggers = {} - - -def getLogger(name='unknown'): - if len(_loggers) == 0: - loaded = _load_log_config() - getLogger.adapter = TestsAdapter if loaded else None - - if name not in _loggers: - logger = logging.getLogger(name) - if getLogger.adapter: - _loggers[name] = getLogger.adapter(logger, name) - else: - _loggers[name] = logger - - return _loggers[name] - - -def _load_log_config(): - conf_dir = os.environ.get('FUEL_LOG_CONFIG_DIR', None) - conf_file = os.environ.get('FUEL_LOG_CONFIG', None) - if not conf_dir or not conf_file: - return False - - log_config = os.path.join(conf_dir, conf_file) - try: - logging.config.fileConfig(log_config) - except ConfigParser.Error as exc: - raise cfg.ConfigFileParseError(log_config, str(exc)) - return True - - -class TestsAdapter(logging.LoggerAdapter): - - def __init__(self, logger, project_name): - self.logger = logger - self.project = project_name - self.regexp = re.compile(r"test_\w+\.py") - - def __getattr__(self, key): - return getattr(self.logger, key) - - def _get_test_name(self): - frames = inspect.stack() - for frame in frames: - binary_name = frame[1] - if self.regexp.search(binary_name) and 'self' in frame[0].f_locals: - return frame[0].f_locals.get('self').id() - elif frame[3] == '_run_cleanups': - # NOTE(myamazaki): method calling addCleanup - return frame[0].f_locals.get('self').case.id() - elif frame[3] in ['setUpClass', 'tearDownClass']: - # NOTE(myamazaki): setUpClass or tearDownClass - return "%s.%s.%s" % (frame[0].f_locals['cls'].__module__, - frame[0].f_locals['cls'].__name__, - frame[3]) - return None - - def process(self, msg, kwargs): - if 'extra' not in kwargs: - kwargs['extra'] = {} - extra = kwargs['extra'] - - test_name = self._get_test_name() - if test_name: - extra.update({'testname': test_name}) - extra['extra'] = extra.copy() - - return msg, kwargs - - -class TestsFormatter(logging.Formatter): - def __init__(self, fmt=None, datefmt=None): - super(TestsFormatter, self).__init__() - self.default_format = _DEFAULT_LOG_FORMAT - self.testname_format =\ - "%(asctime)s %(levelname)8s [%(testname)s] %(message)s" - self.datefmt = _DEFAULT_LOG_DATE_FORMAT - - def format(self, record): - extra = record.__dict__.get('extra', None) - if extra and 'testname' in extra: - self._fmt = self.testname_format - else: - self._fmt = self.default_format - return logging.Formatter.format(self, record) diff --git a/fuel_health/common/ssh.py b/fuel_health/common/ssh.py deleted file mode 100644 index e32088f8..00000000 --- a/fuel_health/common/ssh.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import select -import socket -import time -import warnings - -LOG = logging.getLogger(__name__) - -from fuel_health import exceptions - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - import paramiko - - -class Client(object): - - def __init__(self, host, username, password=None, timeout=300, pkey=None, - channel_timeout=70, look_for_keys=False, key_filename=None): - self.host = host - self.username = username - self.password = password - if isinstance(pkey, basestring): - if pkey: - pkey_file = self._get_key_from_file(pkey) - pkey = paramiko.RSAKey.from_private_key(pkey_file) - else: - pkey = None - self.pkey = pkey - self.look_for_keys = look_for_keys - self.key_filename = key_filename - self.timeout = int(timeout) - self.channel_timeout = float(channel_timeout) - self.buf_size = 1024 - - def _get_key_from_file(self, path): - f_path = os.popen('ls %s' % path, 'r').read().strip('\n') - file_key = file(f_path, 'r') - return file_key - - def _get_ssh_connection(self, sleep=1.5, backoff=1.01): - """Returns an ssh connection to the specified host.""" - _timeout = True - bsleep = sleep - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy( - paramiko.AutoAddPolicy()) - _start_time = time.time() - - while not self._is_timed_out(self.timeout, _start_time): - try: - ssh.connect(self.host, username=self.username, - password=self.password, - look_for_keys=self.look_for_keys, - key_filename=self.key_filename, - timeout=self.timeout, pkey=self.pkey) - _timeout = False - break - except (socket.error, - paramiko.AuthenticationException): - time.sleep(bsleep) - bsleep *= backoff - continue - if _timeout: - raise exceptions.SSHTimeout(host=self.host, - user=self.username, - password=self.password, - key_filename=self.key_filename) - return ssh - - def exec_longrun_command(self, cmd): - """Execute the specified command on the server. - - Unlike exec_command and exec_command_on_vm, this method allows - to start a process on VM in background and leave it alive - after it closes the session. - - :returns: data read from standard output of the command. - """ - s = self._get_ssh_connection() - _, stdout, stderr = s.exec_command(cmd) - res = stdout.read() - err_res = stderr.read() - s.close() - return res, err_res - - def _is_timed_out(self, timeout, start_time): - return (time.time() - timeout) > start_time - - def connect_until_closed(self): - """Connect to the server and wait until connection is lost.""" - try: - ssh = self._get_ssh_connection() - _transport = ssh.get_transport() - _start_time = time.time() - _timed_out = self._is_timed_out(self.timeout, _start_time) - while _transport.is_active() and not _timed_out: - time.sleep(5) - _timed_out = self._is_timed_out(self.timeout, _start_time) - ssh.close() - except (EOFError, paramiko.AuthenticationException, socket.error): - LOG.exception('Closed on connecting to server') - return - - def exec_command(self, command): - """Execute the specified command on the server. - - Note that this method is reading whole command outputs to memory, thus - shouldn't be used for large outputs. - - :returns: data read from standard output of the command. - :raises: SSHExecCommandFailed if command returns nonzero - status. The exception contains command status stderr content. - """ - ssh = self._get_ssh_connection() - transport = ssh.get_transport() - channel = transport.open_session() - channel.get_pty() - channel.fileno() # Register event pipe - channel.exec_command(command) - channel.shutdown_write() - out_data = [] - err_data = [] - - select_params = [channel], [], [], self.channel_timeout - while True: - ready = select.select(*select_params) - if not any(ready): - raise exceptions.TimeoutException( - "Command: '{0}' executed on host '{1}'.".format( - command, self.host)) - if not ready[0]: # If there is nothing to read. - continue - out_chunk = err_chunk = None - if channel.recv_ready(): - out_chunk = channel.recv(self.buf_size) - out_data += out_chunk, - if channel.recv_stderr_ready(): - err_chunk = channel.recv_stderr(self.buf_size) - err_data += err_chunk, - if channel.closed and not err_chunk and not out_chunk: - break - exit_status = channel.recv_exit_status() - if 0 != exit_status: - raise exceptions.SSHExecCommandFailed( - command=command, exit_status=exit_status, - strerror=''.join(err_data).join(out_data)) - return ''.join(out_data) - - def test_connection_auth(self): - """Returns true if ssh can connect to server.""" - try: - connection = self._get_ssh_connection() - connection.close() - except paramiko.AuthenticationException: - LOG.exception("") - return False - - return True - - def exec_command_on_vm(self, command, user, password, vm): - """Execute the specified command on the instance. - - Note that this method is reading whole command outputs to memory, thus - shouldn't be used for large outputs. - - :returns: data read from standard output of the command. - :raises: SSHExecCommandFailed if command returns nonzero - status. The exception contains command status stderr content. - """ - ssh = self._get_ssh_connection() - _intermediate_transport = ssh.get_transport() - _intermediate_channel = \ - _intermediate_transport.open_channel('direct-tcpip', - (vm, 22), - (self.host, 0)) - transport = paramiko.Transport(_intermediate_channel) - transport.start_client() - transport.auth_password(user, password) - channel = transport.open_session() - channel.exec_command(command) - exit_status = channel.recv_exit_status() - channel.shutdown_write() - out_data = [] - err_data = [] - LOG.debug("Run cmd {0} on vm {1}".format(command, vm)) - select_params = [channel], [], [], self.channel_timeout - while True: - ready = select.select(*select_params) - if not any(ready): - raise exceptions.TimeoutException( - "Command: '{0}' executed on host '{1}'.".format( - command, self.host)) - if not ready[0]: # If there is nothing to read. - continue - out_chunk = err_chunk = None - if channel.recv_ready(): - out_chunk = channel.recv(self.buf_size) - out_data += out_chunk, - if channel.recv_stderr_ready(): - err_chunk = channel.recv_stderr(self.buf_size) - err_data += err_chunk, - if channel.closed and not err_chunk and not out_chunk: - break - if 0 != exit_status: - LOG.warning( - 'Command {0} finishes with non-zero exit code {1}'.format( - command, exit_status)) - raise exceptions.SSHExecCommandFailed( - command=command, exit_status=exit_status, - strerror=''.join(err_data).join(out_data)) - LOG.debug('Current result {0} {1} {2}'.format( - command, err_data, out_data)) - return ''.join(out_data) - - def close_ssh_connection(self, connection): - connection.close() diff --git a/fuel_health/common/test_mixins.py b/fuel_health/common/test_mixins.py deleted file mode 100644 index 21b9f903..00000000 --- a/fuel_health/common/test_mixins.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import signal - -from fuel_health.common import log as logging - -LOG = logging.getLogger(__name__) - - -class FuelTestAssertMixin(object): - """Mixin class with a set of assert methods created to abstract - from unittest assertion methods and provide human - readable descriptions where possible - """ - def verify_response_status(self, status, - appl='Application', msg='', failed_step=''): - """Method provides human readable message - for the HTTP response status verification - - :param appl: the name of application requested - :param status: response status - :param msg: message to be used instead the default one - :failed_step: the step of the test scenario that has failed - """ - if status in [200, 201, 202]: - return - - human_readable_statuses = { - 400: ('Something changed in {appl} and request is no ' - 'longer recognized as valid. Please verify that you ' - 'are not sending an HTTP request to an HTTPS socket'), - 401: 'Unauthorized, please check Keystone and {appl} connectivity', - 403: ('Forbidden, please check if Keystone and {appl} ' - 'security policies have changed'), - 404: '{appl} server is running but the application is not found', - 500: '{appl} server is experiencing some problems', - 503: '{appl} server is experiencing problems' - } - - human_readable_status_groups = { - 3: ('Status {status}. Redirection. Please check that all {appl}' - ' proxy settings are correct'), - 4: ('Status {status}. Client error. Please verify that your {appl}' - ' configuration corresponds to the one defined in ' - 'Fuel configuration '), - 5: 'Status {status}. Server error. Please check {appl} logs' - } - - unknown_msg = '{appl} status - {status} is unknown' - - if status in human_readable_statuses: - status_msg = human_readable_statuses[status].format( - status=status, appl=appl) - else: - status_msg = human_readable_status_groups.get( - status / 100, unknown_msg).format(status=status, appl=appl) - - failed_step_msg = '' - if failed_step: - failed_step_msg = ('Step %s failed: ' % str(failed_step)) - - self.fail(''.join((failed_step_msg + - 'Status - {status} '.format( - status=status), status_msg, '\n', msg))) - - def verify_response_body(self, body, content='', msg='', failed_step=''): - """Method provides human readable message for the verification if - HTTP response body contains desired keyword - - :param body: response body - :param content: content type that should be present in response body - :param msg: message to be used instead the default one - """ - if content in body: - return - if failed_step: - msg = ('Step %s failed: ' % str(failed_step)) + msg - self.fail(msg) - - def verify_response_body_value(self, body_structure, value='', msg='', - failed_step=''): - """Method provides human readable message for verification if - HTTP response body element contains desired keyword. - - :param body_structure: body element value (e.g. body['name'], body); - :param value: expected value of body element (e.g. u'test-flavor'); - :param msg: message to be used instead of the default one. - """ - if type(body_structure) is dict: - if value in body_structure.values(): - return - else: - if body_structure == value: - return - failed_step_msg = '' - if failed_step: - failed_step_msg = ('Step {step} failed: {msg}{refer}'.format( - step=str(failed_step), - msg=msg, - refer=" Please refer to OpenStack" - " logs for more details.")) - self.fail(failed_step_msg) - - def verify_response_body_content(self, exp_content, act_content, msg='', - failed_step=''): - failed_step_msg = '' - if exp_content == act_content: - return - if failed_step: - failed_step_msg = ('Step %s failed: ' % str(failed_step)) - fail_msg = '{0} Actual value - {1}, \n {2}' - self.fail(fail_msg.format(failed_step_msg, act_content, msg)) - - def verify_elements_list(self, elements, attrs, msg='', failed_step=''): - """Method provides human readable message for the verification of - list of elements with specific parameters - :param elements: the list of elements from response - :param attrs: required attributes for each element - :param msg: message to be used instead the default one - :param failed_step: step with failed action - """ - msg = "Step {0} failed: {1}".format(failed_step, msg) - self.verify_response_true(elements, msg) - - for element in elements: - for attribute in attrs: - if not hasattr(element, attribute): - self.fail(msg) - - def verify_response_body_not_equal(self, exp_content, act_content, msg='', - failed_step=''): - failed_step_msg = '' - if exp_content != act_content: - return - if failed_step: - failed_step_msg = ('Step %s failed: ' % str(failed_step)) - fail_msg = '{0} {1}' - self.fail(fail_msg.format(failed_step_msg, msg)) - - def verify_response_true(self, resp, msg, failed_step=''): - if resp: - return - - failed_step_msg = '' - if failed_step: - failed_step_msg = ('Step %s failed: ' % str(failed_step)) - - message = "{0}{1}. Please refer to OpenStack logs for more details." - self.fail(message.format(failed_step_msg, msg)) - - def verify(self, secs, func, step='', msg='', action='', *args, **kwargs): - """Arguments: - :secs: timeout time; - :func: function to be verified; - :step: number of test step; - :msg: message that will be displayed if an exception occurs; - :action: action that is performed by the method. - """ - LOG.info("STEP:{0}, verify action: '{1}'".format(step, action)) - try: - with timeout(secs, action): - result = func(*args, **kwargs) - except Exception as exc: - LOG.exception(exc) - if type(exc) is AssertionError: - msg = str(exc) - self.fail("Step %s failed: " % step + msg + - " Please refer to OpenStack logs for more details.") - else: - return result - - -class TimeOutError(Exception): - def __init__(self): - Exception.__init__(self) - - -def _raise_TimeOut(sig, stack): - raise TimeOutError() - - -class timeout(object): - """Timeout context that will stop code running within context - if timeout is reached - - >>with timeout(2): - ... requests.get("http://msdn.com") - """ - def __init__(self, timeout, action): - self.timeout = timeout - self.action = action - - def __enter__(self): - signal.signal(signal.SIGALRM, _raise_TimeOut) - signal.alarm(self.timeout) - - def __exit__(self, exc_type, exc_val, exc_tb): - signal.alarm(0) # disable the alarm - if exc_type is not TimeOutError: - return False # never swallow other exceptions - else: - LOG.info("Timeout {timeout}s exceeded for {call}".format( - call=self.action, - timeout=self.timeout - )) - msg = ("Time limit exceeded while waiting for {call} to " - "finish.").format(call=self.action) - raise AssertionError(msg) diff --git a/fuel_health/common/utils/__init__.py b/fuel_health/common/utils/__init__.py deleted file mode 100644 index 38f3d38a..00000000 --- a/fuel_health/common/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -LAST_REBOOT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' -PING_IPV4_COMMAND = 'ping -c 3 ' -PING_IPV6_COMMAND = 'ping6 -c 3 ' -PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss' diff --git a/fuel_health/common/utils/data_utils.py b/fuel_health/common/utils/data_utils.py deleted file mode 100644 index 0959c9f2..00000000 --- a/fuel_health/common/utils/data_utils.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import random -import re -import urllib -import uuid - - -from fuel_health import exceptions - - -def rand_name(name='ost1_test-'): - return name + str(random.randint(1, 0x7fffffff)) - - -def rand_int_id(start=0, end=0x7fffffff): - return random.randint(start, end) - - -def build_url(host, port, api_version=None, path=None, - params=None, use_ssl=False): - """Build the request URL from given host, port, path and parameters.""" - - pattern = 'v\d\.\d' - if re.match(pattern, path): - message = 'Version should not be included in path.' - raise exceptions.InvalidConfiguration(message=message) - - if use_ssl: - url = "https://" + host - else: - url = "http://" + host - - if port is not None: - url += ":" + port - url += "/" - - if api_version is not None: - url += api_version + "/" - - if path is not None: - url += path - - if params is not None: - url += "?" - url += urllib.urlencode(params) - - return url - - -def arbitrary_string(size=4, base_text=None): - """Return size characters from base_text, repeating - the base_text infinitely if needed. - """ - if not base_text: - base_text = 'ost1_test-' - return ''.join(itertools.islice(itertools.cycle(base_text), size)) - - -def generate_uuid(): - return uuid.uuid4().hex diff --git a/fuel_health/common/utils/misc.py b/fuel_health/common/utils/misc.py deleted file mode 100644 index 4ba00453..00000000 --- a/fuel_health/common/utils/misc.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def singleton(cls): - """Simple wrapper for classes that should only have a single instance.""" - instances = {} - - def getinstance(): - if cls not in instances: - instances[cls] = cls() - return instances[cls] - return getinstance diff --git a/fuel_health/config.py b/fuel_health/config.py deleted file mode 100644 index e67e4eaf..00000000 --- a/fuel_health/config.py +++ /dev/null @@ -1,977 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# Copyright 2013 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import print_function - -import os -import sys -import unittest2 -import yaml - -import keystoneclient -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg -import requests - -from fuel_health.common import log as logging -from fuel_health import exceptions - - -LOG = logging.getLogger(__name__) - -identity_group = cfg.OptGroup(name='identity', - title="Keystone Configuration Options") - -IdentityGroup = [ - cfg.StrOpt('catalog_type', - default='identity', - help="Catalog type of the Identity service."), - cfg.StrOpt('uri', - default='http://localhost/', - help="Full URI of the OpenStack Identity API (Keystone), v2"), - cfg.StrOpt('uri_v3', - help='Full URI of the OpenStack Identity API (Keystone), v3'), - cfg.StrOpt('strategy', - default='keystone', - help="Which auth method does the environment use? " - "(basic|keystone)"), - cfg.StrOpt('region', - default='RegionOne', - help="The identity region name to use."), - cfg.StrOpt('admin_username', - default='nova', - help="Administrative Username to use for" - "Keystone API requests."), - cfg.StrOpt('admin_tenant_name', - default='service', - help="Administrative Tenant name to use for Keystone API " - "requests."), - cfg.StrOpt('admin_password', - default='nova', - help="API key to use when authenticating as admin.", - secret=True), - cfg.BoolOpt('disable_ssl_certificate_validation', - default=False), -] - - -def register_identity_opts(conf): - conf.register_group(identity_group) - for opt in IdentityGroup: - conf.register_opt(opt, group='identity') - -master_node_group = cfg.OptGroup(name='master', - title='Master Node Options') - -MasterGroup = [ - cfg.StrOpt('keystone_password', - default='admin', - help='Default keystone password on master node'), - cfg.StrOpt('keystone_user', - default='admin', - help='Default keystone user on master node'), - cfg.StrOpt('master_node_ssh_user', - default='root'), - cfg.StrOpt('master_node_ssh_password', - default='r00tme', - help='ssh user pass of master node'), - cfg.IntOpt('ssh_timeout', - default=50, - help="Timeout in seconds to wait for authentication to " - "succeed."), -] - - -def register_master_opts(conf): - conf.register_group(master_node_group) - for opt in MasterGroup: - conf.register_opt(opt, group='master') - -compute_group = cfg.OptGroup(name='compute', - title='Compute Service Options') - -ComputeGroup = [ - cfg.BoolOpt('allow_tenant_isolation', - default=False, - help="Allows test cases to create/destroy tenants and " - "users. This option enables isolated test cases and " - "better parallel execution, but also requires that " - "OpenStack Identity API admin credentials are known."), - cfg.BoolOpt('allow_tenant_reuse', - default=True, - help="If allow_tenant_isolation is True and a tenant that " - "would be created for a given test already exists (such " - "as from a previously-failed run), re-use that tenant " - "instead of failing because of the conflict. Note that " - "this would result in the tenant being deleted at the " - "end of a subsequent successful run."), - cfg.StrOpt('image_ssh_user', - default="root", - help="User name used to authenticate to an instance."), - cfg.StrOpt('image_alt_ssh_user', - default="root", - help="User name used to authenticate to an instance using " - "the alternate image."), - cfg.BoolOpt('create_image_enabled', - default=True, - help="Does the test environment support snapshots?"), - cfg.IntOpt('build_interval', - default=10, - help="Time in seconds between build status checks."), - cfg.IntOpt('build_timeout', - default=500, - help="Timeout in seconds to wait for an instance to build."), - cfg.BoolOpt('run_ssh', - default=False, - help="Does the test environment support snapshots?"), - cfg.StrOpt('ssh_user', - default='root', - help="User name used to authenticate to an instance."), - cfg.IntOpt('ssh_timeout', - default=50, - help="Timeout in seconds to wait for authentication to " - "succeed."), - cfg.IntOpt('ssh_channel_timeout', - default=20, - help="Timeout in seconds to wait for output from ssh " - "channel."), - cfg.IntOpt('ip_version_for_ssh', - default=4, - help="IP version used for SSH connections."), - cfg.StrOpt('catalog_type', - default='compute', - help="Catalog type of the Compute service."), - cfg.StrOpt('path_to_private_key', - default='/root/.ssh/id_rsa', - help="Path to a private key file for SSH access to remote " - "hosts"), - cfg.ListOpt('controller_nodes', - default=[], - help="IP addresses of controller nodes"), - cfg.ListOpt('controller_names', - default=[], - help="FQDNs of controller nodes"), - cfg.ListOpt('online_controllers', - default=[], - help="ips of online controller nodes"), - cfg.ListOpt('online_controller_names', - default=[], - help="FQDNs of online controller nodes"), - cfg.ListOpt('compute_nodes', - default=[], - help="IP addresses of compute nodes"), - cfg.ListOpt('online_computes', - default=[], - help="IP addresses of online compute nodes"), - cfg.ListOpt('ceph_nodes', - default=[], - help="IP addresses of nodes with ceph-osd role"), - cfg.StrOpt('controller_node_ssh_user', - default='root', - help="ssh user of one of the controller nodes"), - cfg.StrOpt('amqp_pwd', - default='root', - help="amqp_pwd"), - cfg.StrOpt('controller_node_ssh_password', - default='r00tme', - help="ssh user pass of one of the controller nodes"), - cfg.StrOpt('image_name', - default="TestVM", - help="Valid secondary image reference to be used in tests."), - cfg.StrOpt('deployment_mode', - default="ha", - help="Deployments mode"), - cfg.StrOpt('deployment_os', - default="RHEL", - help="Deployments os"), - cfg.IntOpt('flavor_ref', - default=42, - help="Valid primary flavor to use in tests."), - cfg.StrOpt('libvirt_type', - default='qemu', - help="Type of hypervisor to use."), - cfg.BoolOpt('use_vcenter', - default=False, - help="Usage of vCenter"), -] - - -def register_compute_opts(conf): - conf.register_group(compute_group) - for opt in ComputeGroup: - conf.register_opt(opt, group='compute') - -image_group = cfg.OptGroup(name='image', - title="Image Service Options") - -ImageGroup = [ - cfg.StrOpt('api_version', - default='1', - help="Version of the API"), - cfg.StrOpt('catalog_type', - default='image', - help='Catalog type of the Image service.'), - cfg.StrOpt('http_image', - default='http://download.cirros-cloud.net/0.3.1/' - 'cirros-0.3.1-x86_64-uec.tar.gz', - help='http accessable image') -] - - -def register_image_opts(conf): - conf.register_group(image_group) - for opt in ImageGroup: - conf.register_opt(opt, group='image') - - -network_group = cfg.OptGroup(name='network', - title='Network Service Options') - -NetworkGroup = [ - cfg.StrOpt('catalog_type', - default='network', - help='Catalog type of the Network service.'), - cfg.StrOpt('tenant_network_cidr', - default="10.100.0.0/16", - help="The cidr block to allocate tenant networks from"), - cfg.StrOpt('network_provider', - default="nova_network", - help="Value of network provider"), - cfg.IntOpt('tenant_network_mask_bits', - default=29, - help="The mask bits for tenant networks"), - cfg.BoolOpt('tenant_networks_reachable', - default=True, - help="Whether tenant network connectivity should be " - "evaluated directly"), - cfg.BoolOpt('neutron_available', - default=False, - help="Whether or not neutron is expected to be available"), - cfg.StrOpt('private_net', - default="net04", - help="Private network name"), -] - - -def register_network_opts(conf): - conf.register_group(network_group) - for opt in NetworkGroup: - conf.register_opt(opt, group='network') - - -volume_group = cfg.OptGroup(name='volume', - title='Block Storage Options') - -VolumeGroup = [ - cfg.IntOpt('build_interval', - default=10, - help='Time in seconds between volume availability checks.'), - cfg.IntOpt('build_timeout', - default=180, - help='Timeout in seconds to wait for a volume to become' - 'available.'), - cfg.StrOpt('catalog_type', - default='volume', - help="Catalog type of the Volume Service"), - cfg.BoolOpt('cinder_node_exist', - default=True, - help="Allow to run tests if cinder exist"), - cfg.BoolOpt('cinder_vmware_node_exist', - default=True, - help="Allow to run tests if cinder-vmware exist"), - cfg.BoolOpt('ceph_exist', - default=True, - help="Allow to run tests if ceph exist"), - cfg.BoolOpt('multi_backend_enabled', - default=False, - help="Runs Cinder multi-backend test (requires 2 backends)"), - cfg.StrOpt('backend1_name', - default='BACKEND_1', - help="Name of the backend1 (must be declared in cinder.conf)"), - cfg.StrOpt('backend2_name', - default='BACKEND_2', - help="Name of the backend2 (must be declared in cinder.conf)"), - cfg.StrOpt('cinder_vmware_storage_az', - default='vcenter', - help="Name of storage availability zone for cinder-vmware."), -] - - -def register_volume_opts(conf): - conf.register_group(volume_group) - for opt in VolumeGroup: - conf.register_opt(opt, group='volume') - - -object_storage_group = cfg.OptGroup(name='object-storage', - title='Object Storage Service Options') - -ObjectStoreConfig = [ - cfg.StrOpt('catalog_type', - default='object-store', - help="Catalog type of the Object-Storage service."), - cfg.StrOpt('container_sync_timeout', - default=120, - help="Number of seconds to time on waiting for a container" - "to container synchronization complete."), - cfg.StrOpt('container_sync_interval', - default=5, - help="Number of seconds to wait while looping to check the" - "status of a container to container synchronization"), -] - - -def register_object_storage_opts(conf): - conf.register_group(object_storage_group) - for opt in ObjectStoreConfig: - conf.register_opt(opt, group='object-storage') - - -sahara = cfg.OptGroup(name='sahara', - title='Sahara Service Options') - -SaharaConfig = [ - cfg.StrOpt('api_url', - default='10.20.0.131', - help="IP of sahara service."), - cfg.StrOpt('port', - default=8386, - help="Port of sahara service."), - cfg.StrOpt('api_version', - default='1.1', - help="API version of sahara service."), - cfg.StrOpt('plugin', - default='vanilla', - help="Plugin name of sahara service."), - cfg.StrOpt('plugin_version', - default='1.1.2', - help="Plugin version of sahara service."), - cfg.StrOpt('tt_config', - default={'Task Tracker Heap Size': 515}, - help="Task Tracker config of sahara service."), -] - - -def register_sahara_opts(conf): - conf.register_group(sahara) - for opt in SaharaConfig: - conf.register_opt(opt, group='sahara') - - -murano_group = cfg.OptGroup(name='murano', - title='Murano API Service Options') - -MuranoConfig = [ - cfg.StrOpt('api_url', - default=None, - help="Murano API Service URL."), - cfg.StrOpt('api_url_management', - default=None, - help="Murano API Service management URL."), - cfg.BoolOpt('insecure', - default=False, - help="This parameter allow to enable SSL encription"), - cfg.StrOpt('agListnerIP', - default='10.100.0.155', - help="Murano SQL Cluster AG IP."), - cfg.StrOpt('clusterIP', - default='10.100.0.150', - help="Murano SQL Cluster IP."), -] - - -def register_murano_opts(conf): - conf.register_group(murano_group) - for opt in MuranoConfig: - conf.register_opt(opt, group='murano') - - -heat_group = cfg.OptGroup(name='heat', - title='Heat Options') - -HeatConfig = [ - cfg.StrOpt('endpoint', - default=None, - help="Heat API Service URL."), -] - - -fuel_group = cfg.OptGroup(name='fuel', - title='Fuel options') - -FuelConf = [ - cfg.StrOpt('fuel_version', - default=None, - help="Fuel version"), - cfg.StrOpt('dns', - default=None, - help="dns"), - cfg.BoolOpt('horizon_ssl', - default=False, - help='ssl usage'), - cfg.BoolOpt('ssl_data', - default=False), - cfg.BoolOpt('development_mode', - default=False) -] - - -def register_fuel_opts(conf): - conf.register_group(fuel_group) - [conf.register_opt(opt, group='fuel') for opt in FuelConf] - - -def register_heat_opts(conf): - conf.register_group(heat_group) - for opt in HeatConfig: - conf.register_opt(opt, group='heat') - - -ironic_group = cfg.OptGroup(name='ironic', - title='Bare Metal Service Options') - -IronicConfig = [ - cfg.StrOpt('online_conductors', - default=[], - help="Ironic online conductors"), -] - - -def register_ironic_opts(conf): - conf.register_group(ironic_group) - for opt in IronicConfig: - conf.register_opt(opt, group='ironic') - - -def process_singleton(cls): - """Wrapper for classes... To be instantiated only one time per process.""" - instances = {} - - def wrapper(*args, **kwargs): - LOG.info('INSTANCE %s' % instances) - pid = os.getpid() - if pid not in instances: - instances[pid] = cls(*args, **kwargs) - return instances[pid] - - return wrapper - - -@process_singleton -class FileConfig(object): - """Provides OpenStack configuration information.""" - - DEFAULT_CONFIG_DIR = os.path.join(os.path.abspath( - os.path.dirname(__file__)), 'etc') - - DEFAULT_CONFIG_FILE = "test.conf" - - def __init__(self): - """Initialize a configuration from a conf directory and conf file.""" - config_files = [] - - failsafe_path = "/etc/fuel/" + self.DEFAULT_CONFIG_FILE - - # Environment variables override defaults... - custom_config = os.environ.get('CUSTOM_FUEL_CONFIG') - LOG.info('CUSTOM CONFIG PATH %s' % custom_config) - if custom_config: - path = custom_config - else: - conf_dir = os.environ.get('FUEL_CONFIG_DIR', - self.DEFAULT_CONFIG_DIR) - conf_file = os.environ.get('FUEL_CONFIG', self.DEFAULT_CONFIG_FILE) - - path = os.path.join(conf_dir, conf_file) - - if not (os.path.isfile(path) or 'FUEL_CONFIG_DIR' - in os.environ or 'FUEL_CONFIG' in os.environ): - path = failsafe_path - - LOG.info("Using fuel config file %s" % path) - - if not os.path.exists(path): - msg = "Config file {0} not found".format(path) - print(RuntimeError(msg), file=sys.stderr) - else: - config_files.append(path) - - cfg.CONF([], project='fuel', default_config_files=config_files) - - register_compute_opts(cfg.CONF) - register_identity_opts(cfg.CONF) - register_network_opts(cfg.CONF) - register_master_opts(cfg.CONF) - register_volume_opts(cfg.CONF) - register_murano_opts(cfg.CONF) - register_heat_opts(cfg.CONF) - register_sahara_opts(cfg.CONF) - register_fuel_opts(cfg.CONF) - register_ironic_opts(cfg.CONF) - self.compute = cfg.CONF.compute - self.identity = cfg.CONF.identity - self.network = cfg.CONF.network - self.master = cfg.CONF.master - self.volume = cfg.CONF.volume - self.murano = cfg.CONF.murano - self.heat = cfg.CONF.heat - self.sahara = cfg.CONF.sahara - self.fuel = cfg.CONF.fuel - self.ironic = cfg.CONF.ironic - - -class ConfigGroup(object): - # USE SLOTS - - def __init__(self, opts): - self.parse_opts(opts) - - def parse_opts(self, opts): - for opt in opts: - name = opt.name - self.__dict__[name] = opt.default - - def __setattr__(self, key, value): - self.__dict__[key] = value - - def __getitem__(self, key): - return self.__dict__[key] - - def __setitem(self, key, value): - self.__dict__[key] = value - - def __repr__(self): - return u"{0} WITH {1}".format( - self.__class__.__name__, - self.__dict__) - - -@process_singleton -class NailgunConfig(object): - - identity = ConfigGroup(IdentityGroup) - compute = ConfigGroup(ComputeGroup) - image = ConfigGroup(ImageGroup) - master = ConfigGroup(MasterGroup) - network = ConfigGroup(NetworkGroup) - volume = ConfigGroup(VolumeGroup) - object_storage = ConfigGroup(ObjectStoreConfig) - murano = ConfigGroup(MuranoConfig) - sahara = ConfigGroup(SaharaConfig) - heat = ConfigGroup(HeatConfig) - fuel = ConfigGroup(FuelConf) - ironic = ConfigGroup(IronicConfig) - - def __init__(self, parse=True): - LOG.info('INITIALIZING NAILGUN CONFIG') - self.nailgun_host = os.environ.get('NAILGUN_HOST', None) - self.nailgun_port = os.environ.get('NAILGUN_PORT', None) - self.nailgun_url = 'http://{0}:{1}'.format(self.nailgun_host, - self.nailgun_port) - token = os.environ.get('NAILGUN_TOKEN') - self.cluster_id = os.environ.get('CLUSTER_ID', None) - self.req_session = requests.Session() - self.req_session.trust_env = False - self.req_session.verify = False - if token: - self.req_session.headers.update({'X-Auth-Token': token}) - if parse: - self.prepare_config() - - @property - def development_mode(self): - with open('/etc/nailgun/settings.yaml') as nailgun_opts: - nailgun_settings = yaml.safe_load(nailgun_opts) - self.fuel.development_mode = nailgun_settings['DEVELOPMENT'] - return nailgun_settings['DEVELOPMENT'] - - def prepare_config(self, *args, **kwargs): - try: - self._parse_meta() - LOG.info('parse meta successful') - self._parse_cluster_attributes() - LOG.info('parse cluster attr successful') - self._parse_nodes_cluster_id() - LOG.info('parse node cluster successful') - self._parse_networks_configuration() - LOG.info('parse network configuration successful') - self.set_endpoints() - LOG.info('set endpoints successful') - self.set_proxy() - LOG.info('set proxy successful') - self._parse_cluster_generated_data() - LOG.info('parse generated successful') - self._parse_vmware_attributes() - LOG.info('parse vmware attributes successful') - except exceptions.SetProxy as exc: - raise exc - except Exception: - LOG.exception('Something wrong with endpoints') - - def _parse_cluster_attributes(self): - api_url = '/api/clusters/%s/attributes' % self.cluster_id - response = self.req_session.get(self.nailgun_url + api_url) - LOG.info('RESPONSE %s STATUS %s' % (api_url, response.status_code)) - data = response.json() - - if self.development_mode: - LOG.info('RESPONSE FROM %s - %s' % (api_url, data)) - - access_data = data['editable']['access'] - common_data = data['editable']['common'] - - self.identity.admin_tenant_name = \ - ( - os.environ.get('OSTF_OS_TENANT_NAME') or - access_data['tenant']['value'] - ) - self.identity.admin_username = \ - ( - os.environ.get('OSTF_OS_USERNAME') or - access_data['user']['value'] - ) - self.identity.admin_password = \ - ( - os.environ.get('OSTF_OS_PASSWORD') or - access_data['password']['value'] - ) - self.compute.libvirt_type = common_data['libvirt_type']['value'] - # After removing vmware support we have no attribute use_vcenter - self.compute.use_vcenter = common_data.get('use_vcenter', {}).get( - 'value', False) - self.compute.auto_assign_floating_ip = common_data[ - 'auto_assign_floating_ip']['value'] - - api_url = '/api/clusters/%s' % self.cluster_id - cluster_data = self.req_session.get(self.nailgun_url + api_url).json() - network_provider = cluster_data.get('net_provider', 'nova_network') - self.network.network_provider = network_provider - release_id = cluster_data.get('release_id', 'failed to get id') - self.fuel.fuel_version = cluster_data.get( - 'fuel_version', 'failed to get fuel version') - LOG.info('Release id is {0}'.format(release_id)) - release_data = self.req_session.get( - self.nailgun_url + '/api/releases/{0}'.format(release_id)).json() - deployment_os = release_data.get( - 'operating_system', 'failed to get os') - LOG.info('Deployment os is {0}'.format(deployment_os)) - if deployment_os != 'RHEL': - storage = data['editable']['storage']['volumes_ceph']['value'] - self.volume.ceph_exist = storage - self.fuel.dns = data['editable']['external_dns'].get('value', None) - ssl_data = data['editable'].get('public_ssl', - {'horizon': {'value': False}}) - self.fuel.ssl_data = ssl_data['services']['value'] - self.fuel.horizon_ssl = ssl_data['horizon']['value'] - - def _parse_nodes_cluster_id(self): - api_url = '/api/nodes?cluster_id=%s' % self.cluster_id - response = self.req_session.get(self.nailgun_url + api_url) - LOG.info('RESPONSE %s STATUS %s' % (api_url, response.status_code)) - data = response.json() - # to make backward compatible - if 'objects' in data: - data = data['objects'] - controller_nodes = filter(lambda node: 'controller' in node['roles'], - data) - online_controllers = filter( - lambda node: 'controller' in node['roles'] and - node['online'] is True, data) - - cinder_nodes = [] - cinder_roles = ['cinder', 'cinder-block-device'] - for cinder_role in cinder_roles: - cinder_nodes.extend( - filter(lambda node: cinder_role in node['roles'], data)) - - cinder_vmware_nodes = filter(lambda node: 'cinder-vmware' in - node['roles'], data) - controller_ips = [] - controller_names = [] - public_ips = [] - online_controllers_ips = [] - online_controller_names = [] - for node in controller_nodes: - public_network = next(network for network in node['network_data'] - if network['name'] == 'public') - ip = public_network['ip'].split('/')[0] - public_ips.append(ip) - controller_ips.append(node['ip']) - controller_names.append(node['fqdn']) - LOG.info("IP %s NAMES %s" % (controller_ips, controller_names)) - - for node in online_controllers: - online_controllers_ips.append(node['ip']) - online_controller_names.append(node['fqdn']) - LOG.info("Online controllers ips is %s" % online_controllers_ips) - - self.compute.nodes = data - self.compute.public_ips = public_ips - self.compute.controller_nodes = controller_ips - self.compute.controller_names = controller_names - self.compute.online_controllers = online_controllers_ips - self.compute.online_controller_names = online_controller_names - if not cinder_nodes: - self.volume.cinder_node_exist = False - if not cinder_vmware_nodes: - self.volume.cinder_vmware_node_exist = False - - compute_nodes = filter(lambda node: 'compute' in node['roles'], - data) - online_computes = filter( - lambda node: 'compute' in node['roles'] and - node['online'] is True, data) - online_computes_ips = [] - for node in online_computes: - online_computes_ips.append(node['ip']) - LOG.info('Online compute ips is {0}'.format(online_computes_ips)) - self.compute.online_computes = online_computes_ips - compute_ips = [] - for node in compute_nodes: - compute_ips.append(node['ip']) - LOG.info("COMPUTES IPS %s" % compute_ips) - - sriov_physnets = [] - compute_ids = [node['id'] for node in online_computes] - for compute_id in compute_ids: - api_url = '/api/nodes/{}/interfaces'.format(compute_id) - ifaces_resp = self.req_session.get( - self.nailgun_url + api_url).json() - for iface in ifaces_resp: - if 'interface_properties' in iface: - if ('sriov' in iface['interface_properties'] and - iface['interface_properties'][ - 'sriov']['enabled']): - sriov_physnets.append( - iface['interface_properties']['sriov']['physnet']) - else: - if ('sriov' in iface['attributes'] and - iface['attributes']['sriov']['enabled']['value']): - sriov_physnets.append( - iface['attributes']['sriov']['physnet']['value']) - - self.compute.sriov_physnets = sriov_physnets - - # Find first compute with enabled DPDK - for compute in online_computes: - api_url = '/api/nodes/{}/interfaces'.format(compute['id']) - ifaces_resp = self.req_session.get( - self.nailgun_url + api_url).json() - for iface in ifaces_resp: - if 'interface_properties' in iface: - if 'dpdk' in iface['interface_properties']: - if 'enabled' in iface['interface_properties']['dpdk']: - if iface['interface_properties'][ - 'dpdk']['enabled']: - self.compute.dpdk_compute_fqdn = compute[ - 'fqdn'] - break - else: - if 'dpdk' in iface['attributes']: - if 'enabled' in iface['attributes']['dpdk']: - if iface['attributes']['dpdk'][ - 'enabled']['value']: - self.compute.dpdk_compute_fqdn = compute[ - 'fqdn'] - break - - self.compute.compute_nodes = compute_ips - ceph_nodes = filter(lambda node: 'ceph-osd' in node['roles'], - data) - self.compute.ceph_nodes = ceph_nodes - - online_ironic = filter( - lambda node: 'ironic' in node['roles'] and - node['online'] is True, data) - self.ironic.online_conductors = [] - for node in online_ironic: - self.ironic.online_conductors.append(node['ip']) - LOG.info('Online Ironic conductors\' ips are {0}'.format( - self.ironic.online_conductors)) - - def _parse_meta(self): - api_url = '/api/clusters/%s' % self.cluster_id - data = self.req_session.get(self.nailgun_url + api_url).json() - self.mode = data['mode'] - self.compute.deployment_mode = self.mode - release_id = data.get('release_id', 'failed to get id') - LOG.info('Release id is {0}'.format(release_id)) - release_data = self.req_session.get( - self.nailgun_url + '/api/releases/{0}'.format(release_id)).json() - self.compute.deployment_os = release_data.get( - 'operating_system', 'failed to get os') - self.compute.release_version = release_data.get( - 'version', 'failed to get release version') - - def _parse_networks_configuration(self): - api_url = '/api/clusters/{0}/network_configuration/{1}'.format( - self.cluster_id, self.network.network_provider) - data = self.req_session.get(self.nailgun_url + api_url).json() - self.network.raw_data = data - net_params = self.network.raw_data.get('networking_parameters') - self.network.private_net = net_params.get( - 'internal_name', 'net04') - LOG.debug('Private network name is {0}'.format( - self.network.private_net)) - - def _parse_cluster_generated_data(self): - api_url = '/api/clusters/%s/generated' % self.cluster_id - data = self.req_session.get(self.nailgun_url + api_url).json() - self.generated_data = data - amqp_data = data['rabbit'] - self.amqp_pwd = amqp_data['password'] - if 'RHEL' in self.compute.deployment_os: - storage = data['storage']['volumes_ceph'] - self.volume.ceph_exist = storage - - def _parse_ostf_api(self): - api_url = '/api/ostf/%s' % self.cluster_id - response = self.req_session.get(self.nailgun_url + api_url) - data = response.json() - self.identity.url = data['horizon_url'] + 'dashboard' - self.identity.uri = data['keystone_url'] + 'v2.0/' - - def _parse_vmware_attributes(self): - if self.volume.cinder_vmware_node_exist: - api_url = '/api/clusters/%s/vmware_attributes' % self.cluster_id - data = self.req_session.get(self.nailgun_url + api_url).json() - az = data['editable']['value']['availability_zones'][0]['az_name'] - self.volume.cinder_vmware_storage_az = "{0}-cinder".format(az) - - def get_keystone_vip(self): - if 'service_endpoint' in self.network.raw_data \ - and not self.fuel.ssl_data: - keystone_vip = self.network.raw_data['service_endpoint'] - elif 'vips' in self.network.raw_data: - vips_data = self.network.raw_data['vips'] - keystone_vip = vips_data['public']['ipaddr'] - else: - keystone_vip = self.network.raw_data.get('public_vip', None) - - return keystone_vip - - def check_proxy_auth(self, proxy_ip, proxy_port, keystone_vip): - if self.fuel.ssl_data: - auth_url = 'https://{0}:{1}/{2}/'.format( - keystone_vip, 5000, 'v2.0') - os.environ['https_proxy'] = 'http://{0}:{1}'.format( - proxy_ip, proxy_port) - else: - auth_url = 'http://{0}:{1}/{2}/'.format( - keystone_vip, 5000, 'v2.0') - os.environ['http_proxy'] = 'http://{0}:{1}'.format( - proxy_ip, proxy_port) - try: - LOG.debug('Trying to authenticate at "{0}" using HTTP proxy "http:' - '//{1}:{2}" ...'.format(auth_url, proxy_ip, proxy_port)) - keystoneclient.v2_0.client.Client( - username=self.identity.admin_username, - password=self.identity.admin_password, - tenant_name=self.identity.admin_tenant_name, - auth_url=auth_url, - debug=True, - insecure=True, - timeout=10) - return True - except keystoneclient.exceptions.Unauthorized: - LOG.warning('Authorization failed at "{0}" using HTTP proxy "http:' - '//{1}:{2}"!'.format(auth_url, proxy_ip, proxy_port)) - return False - - def find_proxy(self, proxy_ips, proxy_port, keystone_vip): - online_proxies = [] - for proxy_ip in proxy_ips: - try: - LOG.info('Try to check proxy on {0}'.format(proxy_ip)) - if self.check_proxy_auth(proxy_ip, proxy_port, keystone_vip): - online_proxies.append({'ip': proxy_ip, - 'auth_passed': True}) - else: - online_proxies.append({'ip': proxy_ip, - 'auth_passed': False}) - except Exception: - LOG.exception('Can not connect to Keystone with proxy \ - on {0}'.format(proxy_ip)) - return online_proxies - - def set_proxy(self): - """Sets environment property for http_proxy: - To behave properly - method must be called after all nailgun params - is processed - """ - if not self.compute.online_controllers: - raise exceptions.OfflineControllers() - keystone_vip = self.get_keystone_vip() - proxy_port = 8888 - LOG.debug('Keystone VIP is: {0}'.format(keystone_vip)) - proxies = self.find_proxy(self.compute.online_controllers, - proxy_port, - keystone_vip) - if not proxies: - raise exceptions.SetProxy() - for proxy in proxies: - if proxy['auth_passed']: - os.environ['http_proxy'] = 'http://{0}:{1}'.format(proxy['ip'], - proxy_port) - return - raise exceptions.InvalidCredentials - - def set_endpoints(self): - # NOTE(dshulyak) this is hacky convention to allow granular deployment - # of keystone - keystone_vip = self.get_keystone_vip() - LOG.debug('Keystone vip in set endpoint is: {0}'.format(keystone_vip)) - if self.network.raw_data.get('vips', None): - vips_data = self.network.raw_data.get('vips') - management_vip = vips_data['management']['ipaddr'] - public_vip = vips_data['public']['ipaddr'] - LOG.debug( - 'Found vips in network roles data, management vip is : ' - '{0}, public vip is {1}'.format(management_vip, public_vip)) - else: - public_vip = self.network.raw_data.get('public_vip', None) - # management_vip = self.network.raw_data.get('management_vip', - # None) - - # workaround for api without management_vip for ha mode - if not keystone_vip and 'ha' in self.mode: - self._parse_ostf_api() - else: - endpoint = keystone_vip or self.compute.public_ips[0] - if self.fuel.ssl_data: - self.identity.uri = 'https://{0}:{1}/{2}/'.format( - endpoint, 5000, 'v2.0') - self.horizon_proto = 'https' - else: - self.identity.uri = 'http://{0}:{1}/{2}/'.format( - endpoint, 5000, 'v2.0') - self.horizon_proto = 'http' - - self.horizon_url = '{proto}://{host}/{path}/'.format( - proto=self.horizon_proto, host=public_vip, path='dashboard') - self.horizon_ubuntu_url = '{proto}://{host}/'.format( - proto=self.horizon_proto, host=public_vip) - - -def FuelConfig(): - if 'CUSTOM_FUEL_CONFIG' in os.environ: - return FileConfig() - else: - try: - return NailgunConfig() - except exceptions.SetProxy as e: - raise unittest2.TestCase.failureException(str(e)) diff --git a/fuel_health/etc/heat_autoscaling_neutron.yaml b/fuel_health/etc/heat_autoscaling_neutron.yaml deleted file mode 100644 index 1ccaea99..00000000 --- a/fuel_health/etc/heat_autoscaling_neutron.yaml +++ /dev/null @@ -1,75 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - KeyName: - type: string - InstanceType: - type: string - ImageId: - type: string - SecurityGroup: - type: string - Net: - type: string - -resources: - my_asg: - type: OS::Heat::AutoScalingGroup - properties: - resource: - type: OS::Nova::Server - properties: - metadata: {"metering.stack": {get_param: "OS::stack_id"}} - key_name: { get_param: KeyName } - image: { get_param: ImageId } - flavor: { get_param: InstanceType } - security_groups: - - get_param: SecurityGroup - networks: - - network: {get_param: Net} - min_size: 1 - max_size: 3 - - scale_up_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: my_asg} - cooldown: 60 - scaling_adjustment: 2 - - scale_down_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: my_asg} - cooldown: 60 - scaling_adjustment: '-1' - - cpu_alarm_high: - type: OS::Ceilometer::Alarm - properties: - description: Scale-up if count of instance <= 1 for 1 minute - meter_name: network.incoming.bytes - statistic: count - period: 60 - evaluation_periods: 1 - threshold: 1 - alarm_actions: - - {get_attr: [scale_up_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: le - - cpu_alarm_low: - type: OS::Ceilometer::Alarm - properties: - description: Scale-down if maximum count of instance > 2 for 1 minutes - meter_name: network.incoming.bytes - statistic: count - period: 60 - evaluation_periods: 1 - threshold: 2 - alarm_actions: - - {get_attr: [scale_down_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: gt diff --git a/fuel_health/etc/heat_autoscaling_nova.yaml b/fuel_health/etc/heat_autoscaling_nova.yaml deleted file mode 100644 index bd55bbce..00000000 --- a/fuel_health/etc/heat_autoscaling_nova.yaml +++ /dev/null @@ -1,71 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - KeyName: - type: string - InstanceType: - type: string - ImageId: - type: string - SecurityGroup: - type: string - -resources: - my_asg: - type: OS::Heat::AutoScalingGroup - properties: - resource: - type: OS::Nova::Server - properties: - metadata: {"metering.stack": {get_param: "OS::stack_id"}} - key_name: { get_param: KeyName } - image: { get_param: ImageId } - flavor: { get_param: InstanceType } - security_groups: - - get_param: SecurityGroup - min_size: 1 - max_size: 3 - - scale_up_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: my_asg} - cooldown: 60 - scaling_adjustment: 2 - - scale_down_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: my_asg} - cooldown: 60 - scaling_adjustment: '-1' - - cpu_alarm_high: - type: OS::Ceilometer::Alarm - properties: - description: Scale-up if count of instance <= 1 for 1 minute - meter_name: network.incoming.bytes - statistic: count - period: 60 - evaluation_periods: 1 - threshold: 1 - alarm_actions: - - {get_attr: [scale_up_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: le - - cpu_alarm_low: - type: OS::Ceilometer::Alarm - properties: - description: Scale-down if maximum count of instance > 2 for 1 minutes - meter_name: network.incoming.bytes - statistic: count - period: 60 - evaluation_periods: 1 - threshold: 2 - alarm_actions: - - {get_attr: [scale_down_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: gt diff --git a/fuel_health/etc/heat_create_neutron_stack_template.yaml b/fuel_health/etc/heat_create_neutron_stack_template.yaml deleted file mode 100644 index 61f203db..00000000 --- a/fuel_health/etc/heat_create_neutron_stack_template.yaml +++ /dev/null @@ -1,24 +0,0 @@ -heat_template_version: '2013-05-23' -description: | - Template which creates single instance -parameters: - InstanceType: - type: string - ImageId: - type: string - network: - type: string -resources: - Server: - type: OS::Nova::Server - properties: - name: ost1-test_heat - image: {get_param: ImageId} - flavor: {get_param: InstanceType} - flavor_update_policy: REPLACE - networks: - - network: {Ref: network} -outputs: - servers: - value: - get_resource: Server diff --git a/fuel_health/etc/heat_create_nova_stack_template.yaml b/fuel_health/etc/heat_create_nova_stack_template.yaml deleted file mode 100644 index 0bd67763..00000000 --- a/fuel_health/etc/heat_create_nova_stack_template.yaml +++ /dev/null @@ -1,20 +0,0 @@ -heat_template_version: '2013-05-23' -description: | - Template which creates single instance -parameters: - InstanceType: - type: string - ImageId: - type: string -resources: - Server: - type: OS::Nova::Server - properties: - name: ost1-test_heat - image: {get_param: ImageId} - flavor: {get_param: InstanceType} - flavor_update_policy: REPLACE -outputs: - servers: - value: - get_resource: Server diff --git a/fuel_health/etc/heat_update_neutron_stack_template.yaml b/fuel_health/etc/heat_update_neutron_stack_template.yaml deleted file mode 100644 index 70622b89..00000000 --- a/fuel_health/etc/heat_update_neutron_stack_template.yaml +++ /dev/null @@ -1,33 +0,0 @@ -heat_template_version: '2013-05-23' -description: | - Template which creates two instances -parameters: - InstanceType: - type: string - ImageId: - type: string - network: - type: string -resources: - Server1: - type: OS::Nova::Server - properties: - image: {get_param: ImageId} - flavor: {get_param: InstanceType} - networks: - - network: {Ref: network} - - Server2: - type: OS::Nova::Server - properties: - image: {get_param: ImageId} - flavor: {get_param: InstanceType} - networks: - - network: {Ref: network} -outputs: - server1: - value: - get_resource: Server1 - server2: - value: - get_resource: Server2 diff --git a/fuel_health/etc/heat_update_nova_stack_template.yaml b/fuel_health/etc/heat_update_nova_stack_template.yaml deleted file mode 100644 index 7ee05a24..00000000 --- a/fuel_health/etc/heat_update_nova_stack_template.yaml +++ /dev/null @@ -1,26 +0,0 @@ -heat_template_version: '2013-05-23' -description: | - Template which creates two instances -parameters: - InstanceType: - type: string - ImageId: - type: string -resources: - Server1: - type: OS::Nova::Server - properties: - image: {get_param: ImageId} - flavor: {get_param: InstanceType} - Server2: - type: OS::Nova::Server - properties: - image: {get_param: ImageId} - flavor: {get_param: InstanceType} -outputs: - server1: - value: - get_resource: Server1 - server2: - value: - get_resource: Server2 diff --git a/fuel_health/etc/heat_wait_condition_neutron.yaml b/fuel_health/etc/heat_wait_condition_neutron.yaml deleted file mode 100644 index 20fbf00d..00000000 --- a/fuel_health/etc/heat_wait_condition_neutron.yaml +++ /dev/null @@ -1,103 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - HOT template to demonstrate usage of the Heat native waitcondition resources - This is expected to work with any image containing curl and something which - runs the raw user-data script, e.g cirros or some image containing cloud-init -parameters: - key_name: - type: string - description: Name of keypair to assign to server - image: - type: string - description: Name of image to use for server - flavor: - type: string - description: Flavor to use for server - default: m1.tiny - timeout: - type: number - description: Timeout for WaitCondition, depends on your image and environment - default: 600 - net: - description: Name of net to use for server - type: string - floating_net: - description: Name of the net for floating ip - type: string - -resources: - wait_condition: - type: OS::Heat::WaitCondition - properties: - handle: {get_resource: wait_handle} - # Note, count of 5 vs 6 is due to duplicate signal ID 5 sent below - count: 5 - timeout: {get_param: timeout} - - wait_handle: - type: OS::Heat::WaitConditionHandle - - instance: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - key_name: {get_param: key_name} - networks: - - port: {get_resource: port} - user_data_format: RAW - user_data: - str_replace: - template: | - #!/bin/sh - # Below are some examples of the various ways signals - # can be sent to the Handle resource - # Simple success signal - wc_notify -k --data-binary '{"status": "SUCCESS"}' - # Or you optionally can specify any of the additional fields - wc_notify -k --data-binary '{"status": "SUCCESS", "reason": "signal2"}' - wc_notify -k --data-binary '{"status": "SUCCESS", "reason": "signal3", "data": "data3"}' - wc_notify -k --data-binary '{"status": "SUCCESS", "reason": "signal4", "data": "data4"}' - # If you require control of the ID, you can pass it. - # The ID should be unique, unless you intend for duplicate - # signals to overrite each other. The following two calls - # do the exact same thing, and will be treated as one signal - # (You can prove this by changing count above to 7) - wc_notify -k --data-binary '{"status": "SUCCESS", "id": "5"}' - wc_notify -k --data-binary '{"status": "SUCCESS", "id": "5"}' - # Example of sending a failure signal, optionally - # reason, id, and data can be specified as above - # wc_notify -k --data-binary '{"status": "FAILURE"}' - params: - wc_notify: { get_attr: ['wait_handle', 'curl_cli'] } - - floating_ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: {get_param: floating_net} - port_id: {get_resource: port} - - port: - type: OS::Neutron::Port - properties: - network_id: {get_param: net} - security_groups: [{ get_resource: server_security_group }] - - server_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: security-group - rules: - - remote_ip_prefix: 0.0.0.0/0 - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - remote_ip_prefix: 0.0.0.0/0 - protocol: icmp - -outputs: - curl_cli: - value: { get_attr: ['wait_handle', 'curl_cli'] } - wc_data: - value: { get_attr: ['wait_condition', 'data'] } diff --git a/fuel_health/etc/heat_wait_condition_nova.yaml b/fuel_health/etc/heat_wait_condition_nova.yaml deleted file mode 100644 index 045417dc..00000000 --- a/fuel_health/etc/heat_wait_condition_nova.yaml +++ /dev/null @@ -1,66 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - HOT template to demonstrate usage of the Heat native waitcondition resources - This is expected to work with any image containing curl and something which - runs the raw user-data script, e.g cirros or some image containing cloud-init - -parameters: - key_name: - type: string - description: Name of keypair to assign to server - image: - type: string - description: Name of image to use for server - flavor: - type: string - description: Flavor to use for server - default: m1.tiny - timeout: - type: number - description: Timeout for WaitCondition, depends on your image and environment - default: 600 - -resources: - wait_condition: - type: OS::Heat::WaitCondition - properties: - handle: {get_resource: wait_handle} - # Note, count of 5 vs 6 is due to duplicate signal ID 5 sent below - count: 5 - timeout: {get_param: timeout} - - wait_handle: - type: OS::Heat::WaitConditionHandle - - instance: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - key_name: {get_param: key_name} - user_data_format: RAW - user_data: - str_replace: - template: | - #!/bin/sh - # Below are some examples of the various ways signals - # can be sent to the Handle resource - # Simple success signal - wc_notify --data-binary '{"status": "SUCCESS"}' - # Or you optionally can specify any of the additional fields - wc_notify --data-binary '{"status": "SUCCESS", "reason": "signal2"}' - wc_notify --data-binary '{"status": "SUCCESS", "reason": "signal3", "data": "data3"}' - wc_notify --data-binary '{"status": "SUCCESS", "reason": "signal4", "data": "data4"}' - # If you require control of the ID, you can pass it. - # The ID should be unique, unless you intend for duplicate - # signals to overrite each other. The following two calls - # do the exact same thing, and will be treated as one signal - # (You can prove this by changing count above to 7) - wc_notify --data-binary '{"status": "SUCCESS", "id": "5"}' - wc_notify --data-binary '{"status": "SUCCESS", "id": "5"}' - # Example of sending a failure signal, optionally - # reason, id, and data can be specified as above - # wc_notify --data-binary '{"status": "FAILURE"}' - params: - wc_notify: { get_attr: ['wait_handle', 'curl_cli'] } diff --git a/fuel_health/etc/server.txt b/fuel_health/etc/server.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_health/etc/test.conf b/fuel_health/etc/test.conf deleted file mode 100644 index 295d2562..00000000 --- a/fuel_health/etc/test.conf +++ /dev/null @@ -1,171 +0,0 @@ -[identity] -# This section contains configuration options that a variety of -# test clients use when authenticating with different user/tenant -# combinations -url = http://localhost/ -# The type of endpoint for a Identity service. Unless you have a -# custom Keystone service catalog implementation, you probably want to leave -# this value as "identity" -catalog_type = identity -# Ignore SSL certificate validation failures? Use when in testing -# environments that have self-signed SSL certs. -disable_ssl_certificate_validation = False -# URL for where to find the OpenStack Identity API endpoint (Keystone) -uri = http://localhost:5000/v2.0/ -# URL for where to find the OpenStack V3 Identity API endpoint (Keystone) -#uri_v3 = http://127.0.0.1:5000/v3/ -# Should typically be left as keystone unless you have a non-Keystone -# authentication API service -strategy = keystone -# The identity region -region = RegionOne - -# This should be the username of a user WITH administrative privileges -admin_username = nova -# The above administrative user's password -admin_password = nova -# The above administrative user's tenant name -admin_tenant_name = service - -[compute] -# This section contains configuration options used when executing tests -# against the OpenStack Compute API. - -#One of the controller nodes -controller_nodes = localhost -controller_nodes_name = controller - -#Controller node user who able connect via ssh -controller_node_ssh_user = root - -#Controller node ssh user's password -controller_node_ssh_password = r00tme -controller_node_ssh_key_path = /root/.ssh/id_rsa - -#The list of the services should be enabled -enabled_services=nova-cert, nova-consoleauth, nova-scheduler, nova-conductor, nova-compute, nova-network, nova-compute, nova-network - -# Allows test cases to create/destroy tenants and users. This option -# enables isolated test cases and better parallel execution, -# but also requires that OpenStack Identity API admin credentials -# are known. -allow_tenant_isolation = True - -# Allows test cases to create/destroy tenants and users. This option -# enables isolated test cases and better parallel execution, -# but also requires that OpenStack Identity API admin credentials -# are known. -allow_tenant_reuse = true - -# Reference data for tests. The ref and ref_alt should be -# distinct images/flavors. -image_name = TestVM -flavor_ref = 1 - -# User names used to authenticate to an instance for a given image. -image_ssh_user = cirros -image_alt_ssh_user = cirros - -# Number of seconds to wait while looping to check the status of an -# instance that is building. -build_interval = 3 - -# Number of seconds to time out on waiting for an instance -# to build or reach an expected status -build_timeout = 300 - -# Run additional tests that use SSH for instance validation? -# This requires the instances be routable from the host -# executing the tests -run_ssh = false - -# Number of seconds to wait to authenticate to an instance -ssh_timeout = 300 - -# Number of seconds to wait for output from ssh channel -ssh_channel_timeout = 60 - -# The type of endpoint for a Compute API service. Unless you have a -# custom Keystone service catalog implementation, you probably want to leave -# this value as "compute" -catalog_type = compute - -# Does the Compute API support creation of images? -create_image_enabled = true - -[image] -# This section contains configuration options used when executing tests -# against the OpenStack Images API - -# The type of endpoint for an Image API service. Unless you have a -# custom Keystone service catalog implementation, you probably want to leave -# this value as "image" -catalog_type = image - -# The version of the OpenStack Images API to use -api_version = 1 - -# HTTP image to use for glance http image testing -http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz - -[network] -# This section contains configuration options used when executing tests -# against the OpenStack Network API. - -# Version of the Quantum API -api_version = 2.0 -# Catalog type of the Quantum Service -catalog_type = network - -# A large private cidr block from which to allocate smaller blocks for -# tenant networks. -tenant_network_cidr = 10.13.0.0/16 - -# The mask bits used to partition the tenant block. -tenant_network_mask_bits = 28 - -# If tenant networks are reachable, connectivity checks will be -# performed directly against addresses on those networks. -tenant_networks_reachable = true - -# Whether or not quantum is expected to be available -quantum_available = false - -[volume] -# This section contains the configuration options used when executing tests -# against the OpenStack Block Storage API service - -# The type of endpoint for a Cinder or Block Storage API service. -# Unless you have a custom Keystone service catalog implementation, you -# probably want to leave this value as "volume" -catalog_type = volume -# Number of seconds to wait while looping to check the status of a -# volume that is being made available -build_interval = 3 -# Number of seconds to time out on waiting for a volume -# to be available or reach an expected status -build_timeout = 300 -# Runs Cinder multi-backend tests (requires 2 backends declared in cinder.conf) -# They must have different volume_backend_name (backend1_name and backend2_name -# have to be different) -multi_backend_enabled = false -backend1_name = BACKEND_1 -backend2_name = BACKEND_2 - -[object-storage] -# This section contains configuration options used when executing tests -# against the OpenStack Object Storage API. - -# You can configure the credentials in the compute section - -# The type of endpoint for an Object Storage API service. Unless you have a -# custom Keystone service catalog implementation, you probably want to leave -# this value as "object-store" -catalog_type = object-store - -# Number of seconds to time on waiting for a container to container -# synchronization complete -container_sync_timeout = 120 -# Number of seconds to wait while looping to check the status of a -# container to container synchronization -container_sync_interval = 5 \ No newline at end of file diff --git a/fuel_health/exceptions.py b/fuel_health/exceptions.py deleted file mode 100644 index ad0b1b1a..00000000 --- a/fuel_health/exceptions.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest2 - - -class FuelException(Exception): - """Base Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - """ - message = "An unknown exception occurred" - - def __init__(self, *args, **kwargs): - super(FuelException, self).__init__() - try: - self._error_string = self.message % kwargs - except Exception: - # at least get the core message out if something happened - self._error_string = self.message - if len(args) > 0: - # If there is a non-kwarg parameter, assume it's the error - # message or reason description and tack it on to the end - # of the exception message - # Convert all arguments into their string representations... - args = ["%s" % arg for arg in args] - self._error_string = (self._error_string + - "\nDetails: %s" % '\n'.join(args)) - - def __str__(self): - return self._error_string - - -class InvalidConfiguration(FuelException): - message = "Invalid Configuration" - - -class InvalidCredentials(InvalidConfiguration): - message = ( - "Authorization failure. " - "Please provide the valid credentials for your OpenStack environment, " - "and reattempt." - ) - - -class SetProxy(InvalidConfiguration): - message = ("Can not set proxy for Health Check." - "Make sure that network configuration " - "for controllers is correct") - - -class OfflineControllers(InvalidConfiguration): - message = ('Can not check health of cluster.' - ' All controllers are offline') - - -class RestClientException(FuelException, - unittest2.TestCase.failureException): - pass - - -class NotFound(RestClientException): - message = "Object not found" - - -class Unauthorized(RestClientException): - message = 'Unauthorized' - - -class TimeoutException(FuelException): - message = "Request timed out" - - -class BuildErrorException(FuelException): - message = "Server %(server_id)s failed to build and is in ERROR status" - - -class AddImageException(FuelException): - message = "Image %(image_id)s failed to become ACTIVE in the allotted time" - - -class EC2RegisterImageException(FuelException): - message = ("Image %(image_id)s failed to become 'available' " - "in the allotted time") - - -class VolumeBuildErrorException(FuelException): - message = "Volume %(volume_id)s failed to build and is in ERROR status" - - -class SnapshotBuildErrorException(FuelException): - message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status" - - -class StackBuildErrorException(FuelException): - message = ("Stack %(stack_identifier)s is in %(stack_status)s status " - "due to '%(stack_status_reason)s'") - - -class BadRequest(RestClientException): - message = "Bad request" - - -class UnprocessableEntity(RestClientException): - message = "Unprocessable entity" - - -class AuthenticationFailure(RestClientException): - message = ("Authentication with user %(user)s and password " - "%(password)s failed") - - -class EndpointNotFound(FuelException): - message = "Endpoint not found" - - -class RateLimitExceeded(FuelException): - message = ("Rate limit exceeded.\nMessage: %(message)s\n" - "Details: %(details)s") - - -class OverLimit(FuelException): - message = "Quota exceeded" - - -class ComputeFault(FuelException): - message = "Got compute fault" - - -class ImageFault(FuelException): - message = "Image for tests not found" - - -class IdentityError(FuelException): - message = "Got identity error" - - -class Duplicate(RestClientException): - message = "An object with that identifier already exists" - - -class SSHTimeout(FuelException): - message = ("Connection to the %(host)s via SSH timed out.\n" - "User: %(user)s, Password: %(password)s") - - -class SSHExecCommandFailed(FuelException): - """Raised when remotely executed command returns nonzero status.""" - message = ("Command '%(command)s', exit status: %(exit_status)d, " - "Error:\n%(strerror)s") - - -class ServerUnreachable(FuelException): - message = "The server is not reachable via the configured network" - - -class SQLException(FuelException): - message = "SQL error: %(message)s" - - -class TearDownException(FuelException): - message = "%(num)d cleanUp operation failed" - - -class RFCViolation(RestClientException): - message = "RFC Violation" - - -class ResponseWithNonEmptyBody(RFCViolation): - message = ("RFC Violation! Response with %(status)d HTTP Status Code " - "MUST NOT have a body") - - -class ResponseWithEntity(RFCViolation): - message = ("RFC Violation! Response with 205 HTTP Status Code " - "MUST NOT have an entity") diff --git a/fuel_health/glancemanager.py b/fuel_health/glancemanager.py deleted file mode 100644 index 9692b235..00000000 --- a/fuel_health/glancemanager.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -try: - from oslo.serialization import jsonutils -except ImportError: - from oslo_serialization import jsonutils -import random -import StringIO - -import fuel_health.common.ssh -from fuel_health.common.utils.data_utils import rand_name -import fuel_health.nmanager -import fuel_health.test - - -LOG = logging.getLogger(__name__) - - -class GlanceTest(fuel_health.nmanager.NovaNetworkScenarioTest): - """Manager that provides access to the Glance python client for - calling Glance API. - """ - - @classmethod - def setUpClass(cls): - super(GlanceTest, cls).setUpClass() - cls.images = [] - if cls.manager.clients_initialized: - if not cls.glance_client_v1: - LOG.warning('Glance client v1 was not initialized') - if not cls.glance_client: - LOG.warning('Glance client v2 was not initialized') - if not cls.glance_client_v1 and not cls.glance_client: - cls.fail('Glance client v1 and v2 was not initialized') - - def tearDown(self): - LOG.debug("Deleting images created by Glance test") - self._clean_images() - super(GlanceTest, self).tearDown() - - def _list_images(self, client): - return client.images.list() - - def image_create(self, client, **kwargs): - container_format = 'bare' - data = StringIO.StringIO( - ''.join([chr(random.randint(0, 255)) for i in range(1024)])) - disk_format = 'raw' - image_name = rand_name('ostf_test-image_glance-') - if client is self.glance_client_v1: - image = client.images.create(name=image_name, - container_format=container_format, - data=data, - disk_format=disk_format, **kwargs) - self.images.append(image.id) - return image - elif client is self.glance_client: - # TODO(vryzhenkin): Rework this function using Glance Tasks v2, - # TODO(vryzhenkin) when Tasks will be supported by OpenStack Glance - image = client.images.create(name=image_name, - container_format=container_format, - disk_format=disk_format, **kwargs) - client.images.upload(image.id, 'dummy_data') - self.images.append(image.id) - return image - - def find_image_by_id(self, client, image_id): - return client.images.get(image_id) - - def delete_image(self, client, object): - client.images.delete(object) - if client is self.glance_client_v1: - return self.images.remove(object.id) - else: - return self.images.remove(object) - - def check_image_status(self, client, image, status='active'): - def image_status_comparison(): - if self.find_image_by_id(client, image.id).status == status: - return True - - if fuel_health.test.call_until_true(image_status_comparison, 180, 5): - return self.find_image_by_id(client, image.id) - else: - self.fail('Image has incorrect status {0}' - .format(self.find_image_by_id(client, image.id))) - - def update_image(self, client, object, group_props, prop, value_prop): - if client is self.glance_client_v1: - properties = object.properties - properties[group_props] = jsonutils.dumps({prop: value_prop}) - return client.images.update(object, properties=properties) - elif client is self.glance_client: - properties = '{0}: {1}'.format(prop, value_prop) - return client.images.update(object, group_props=properties) - - def find_props(self, client, object, group_props, prop, value_prop): - msg = 'Can not find created properties in image' - if client is self.glance_client_v1: - for group in object.properties: - if group == group_props: - for i in jsonutils.loads(object.properties[group]): - k = jsonutils.loads(object.properties[group])[prop] - if i == prop and k == unicode(value_prop): - return 'OK' - else: - self.fail(msg) - else: - self.fail(msg) - elif client is self.glance_client: - properties = '{0}: {1}'.format(prop, value_prop) - for key in object: - if object[key] == properties: - return 'OK' - self.fail(msg) diff --git a/fuel_health/ha_base.py b/fuel_health/ha_base.py deleted file mode 100644 index 55dd51d9..00000000 --- a/fuel_health/ha_base.py +++ /dev/null @@ -1,571 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from distutils import version -import json -import logging -from lxml import etree - -import fuel_health -from fuel_health.common import ssh -from fuel_health.common.utils import data_utils -from fuel_health.test import BaseTestCase - - -LOG = logging.getLogger(__name__) - - -class RabbitSanityClass(BaseTestCase): - """TestClass contains RabbitMQ sanity checks.""" - - @classmethod - def setUpClass(cls): - cls.config = fuel_health.config.FuelConfig() - cls._controllers = cls.config.compute.online_controllers - cls.nodes = cls.config.compute.nodes - cls._usr = cls.config.compute.controller_node_ssh_user - cls._pwd = cls.config.compute.controller_node_ssh_password - cls._key = cls.config.compute.path_to_private_key - cls._ssh_timeout = cls.config.compute.ssh_timeout - cls._password = None - cls._userid = None - cls.messages = [] - cls.queues = [] - cls.release_version = \ - cls.config.compute.release_version.split('-')[1] - - @property - def password(self): - if version.StrictVersion(self.release_version)\ - < version.StrictVersion('7.0'): - self._password = self.get_conf_values().strip() - return self._password - - if self._password is None: - self._password = self.get_hiera_values( - hiera_hash='rabbit', - hash_key='password' - ) - # FIXME(mattmymo): Remove this after merging - # https://review.openstack.org/#/c/276797/ - if not self._password: - self._password = self.get_hiera_values( - hiera_hash='rabbit_hash', - hash_key='password' - ) - - return self._password - - @property - def amqp_hosts_name(self): - amqp_hosts_name = {} - if version.StrictVersion(self.release_version)\ - < version.StrictVersion('7.0'): - for controller_ip in self._controllers: - amqp_hosts_name[controller_ip] = [controller_ip, '5673'] - return amqp_hosts_name - - nodes = self.get_hiera_values(hiera_hash='network_metadata', - hash_key='nodes', - json_parse=True) - for ip, port in self.get_amqp_hosts(): - for node in nodes: - ips = [nodes[node]['network_roles'][role] - for role in nodes[node]['network_roles']] - if ip in ips: - nailgun_nodes = [n for n in self.nodes - if nodes[node]['name'] == n['hostname'] - and n['online']] - if len(nailgun_nodes) == 1: - amqp_hosts_name[nodes[node]['name']] = [ip, port] - return amqp_hosts_name - - @property - def userid(self): - if version.StrictVersion(self.release_version)\ - < version.StrictVersion('7.0'): - self._userid = 'nova' - return self._userid - - if self._userid is None: - self._userid = self.get_hiera_values( - hiera_hash='rabbit', - hash_key='user' - ) - # FIXME(mattmymo): Remove this after merging - # https://review.openstack.org/#/c/276797/ - if not self._userid: - self._userid = self.get_hiera_values( - hiera_hash='rabbit_hash', - hash_key='user' - ) - return self._userid - - def get_ssh_connection_to_controller(self, controller): - remote = ssh.Client(host=controller, - username=self._usr, - password=self._pwd, - key_filename=self._key, - timeout=self._ssh_timeout) - return remote - - def list_nodes(self): - if not self.amqp_hosts_name: - self.fail('There are no online rabbit nodes') - remote = \ - self.get_ssh_connection_to_controller( - self.amqp_hosts_name.keys()[0]) - output = remote.exec_command("rabbitmqctl cluster_status") - substring_ind = output.find('{running_nodes') - sub_end_ind = output.find('cluster_name') - result_str = output[substring_ind: sub_end_ind] - num_node = result_str.count("rabbit@") - return num_node - - def pick_rabbit_master(self): - if not self.amqp_hosts_name: - self.fail('There are no online rabbit nodes') - remote = \ - self.get_ssh_connection_to_controller( - self.amqp_hosts_name.keys()[0]) - LOG.info('ssh session to node {0} was open'.format( - self.amqp_hosts_name.keys()[0])) - LOG.info('Try to execute command ') - output = remote.exec_command( - "crm resource status master_p_rabbitmq-server") - LOG.debug('Output is {0}'.format(output)) - substring_ind = output.find( - 'resource master_p_rabbitmq-server is running on:') - sub_end_ind = output.find('Master') - LOG.debug('Start index is {0} end' - ' index is {1}'.format(substring_ind, sub_end_ind)) - result_str = output[substring_ind: sub_end_ind] - LOG.debug('Result string is {0}'.format(result_str)) - return result_str - - def list_channels(self): - if not self.amqp_hosts_name: - self.fail('There are no online rabbit nodes') - remote = \ - self.get_ssh_connection_to_controller( - self.amqp_hosts_name.keys()[0]) - output = remote.exec_command("rabbitmqctl list_channels") - - LOG.debug('Result of executing command rabbitmqctl' - ' list_channels is {0}'.format(output)) - return output - - def get_hiera_values(self, hiera_hash="rabbit", - hash_key=None, - conf_path="/etc/hiera.yaml", - json_parse=False): - - if hash_key is not None: - lookup_cmd = ('value = hiera.lookup("{0}", {{}}, ' - '{{}}, nil, :hash)["{1}"]').format(hiera_hash, - hash_key) - else: - lookup_cmd = ('value = hiera.lookup("{0}", {{}},' - ' {{}}, nil, :hash)').format(hiera_hash) - if json_parse: - print_cmd = 'require "json"; puts JSON.dump(value)' - else: - print_cmd = 'puts value' - - cmd = ('ruby -e \'require "hiera"; ' - 'hiera = Hiera.new(:config => "{0}"); ' - '{1}; {2};\'').format(conf_path, lookup_cmd, print_cmd) - - LOG.debug("Try to execute cmd {0}".format(cmd)) - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - try: - res = remote.exec_command(cmd) - LOG.debug("result is {0}".format(res)) - if json_parse: - return json.loads(res.strip()) - return res.strip() - except Exception: - LOG.exception("Fail to get data from Hiera DB!") - self.fail("Fail to get data from Hiera DB!") - - def get_conf_values(self, variable="rabbit_password", - sections="DEFAULT", - conf_path="/etc/nova/nova.conf"): - cmd = ("python -c 'import ConfigParser; " - "cfg=ConfigParser.ConfigParser(); " - "cfg.readfp(open('\"'{0}'\"')); " - "print cfg.get('\"'{1}'\"', '\"'{2}'\"')'") - LOG.debug("Try to execute cmd {0}".format(cmd)) - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - try: - res = remote.exec_command(cmd.format( - conf_path, sections, variable)) - LOG.debug("result is {0}".format(res)) - return res - except Exception: - LOG.exception("Fail to get data from config") - self.fail("Fail to get data from config") - - def get_amqp_hosts(self): - if not self._controllers: - self.fail('There are no online controllers') - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - cmd = 'hiera amqp_hosts' - LOG.debug("Try to execute cmd '{0}' on controller...".format(cmd)) - result = remote.exec_command(cmd) - LOG.debug("Result: {0}".format(result)) - hosts = result.strip().split(',') - return [host.lstrip().split(':')[0:2] for host in hosts] - - def check_rabbit_connections(self): - if not self._controllers: - self.fail('There are no online controllers') - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - for key in self.amqp_hosts_name.keys(): - ip, port = self.amqp_hosts_name[key] - cmd = ("python -c 'import kombu;" - " c = kombu.Connection(\"amqp://{1}:{2}@{0}:{3}//\");" - " c.connect()'".format(ip, self.userid, - self.password, port)) - try: - LOG.debug('Checking AMQP host "{0}"...'.format(ip)) - remote.exec_command(cmd) - except Exception: - LOG.exception("Failed to establish AMQP connection") - self.fail("Failed to establish AMQP connection to {1}/tcp " - "port on {0} from controller node!".format(ip, port)) - - def create_queue(self): - if not self._controllers: - self.fail('There are no online controllers') - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - for key in self.amqp_hosts_name.keys(): - ip, port = self.amqp_hosts_name[key] - test_queue = 'test-rabbit-{0}-{1}'.format( - data_utils.rand_name() + data_utils.generate_uuid(), - ip - ) - cmd = ("python -c 'import kombu;" - " c = kombu.Connection(\"amqp://{1}:{2}@{0}:{3}//\");" - " c.connect(); ch = c.channel(); q = kombu.Qu" - "eue(\"{4}\", channel=ch, durable=False, queue_arguments={{" - "\"x-expires\": 15 * 60 * 1000}}); q.declare()'".format( - ip, self.userid, self.password, port, test_queue)) - try: - LOG.debug("Declaring queue {0} on host {1}".format( - test_queue, ip)) - self.queues.append(test_queue) - remote.exec_command(cmd) - except Exception: - LOG.exception("Failed to declare queue on host") - self.fail("Failed to declare queue on host {0}".format(ip)) - - def publish_message(self): - if not self._controllers: - self.fail('There are no online controllers') - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - for key in self.amqp_hosts_name.keys(): - ip, port = self.amqp_hosts_name[key] - queues = [q for q in self.queues if ip in q] - if not len(queues) > 0: - self.fail("Can't publish message, queue created on host '{0}' " - "doesn't exist!".format(ip)) - test_queue = queues[0] - id = data_utils.generate_uuid() - cmd = ("python -c 'import kombu;" - " c = kombu.Connection(\"amqp://{1}:{2}@{0}:{3}//\");" - " c.connect(); ch = c.channel(); producer = " - "kombu.Producer(channel=ch, routing_key=\"{4}\"); " - "producer.publish(\"{5}\")'".format( - ip, self.userid, self.password, port, test_queue, id)) - try: - LOG.debug('Try to publish message {0}'.format(id)) - remote.exec_command(cmd) - except Exception: - LOG.exception("Failed to publish message!") - self.fail("Failed to publish message!") - self.messages.append({'queue': test_queue, 'id': id}) - - def check_queue_message_replication(self): - if not self._controllers: - self.fail('There are no online controllers') - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - for key in self.amqp_hosts_name.keys(): - ip, port = self.amqp_hosts_name[key] - for message in self.messages: - if ip in message['queue']: - continue - cmd = ("python -c 'import kombu;" - " c = kombu.Connection(\"amqp://{1}:{2}@{0}:{3}//\");" - " c.connect(); " - "ch = c.channel(); q = kombu.Queue(\"{4}\", channel=ch)" - "; msg = q.get(True); retval = 0 if msg.body in \"{5}\"" - " else 1; exit(retval)'".format( - ip, self.userid, self.password, port, - message['queue'], message['id'])) - try: - LOG.debug('Checking that message with ID "{0}" was ' - 'replicated over the cluster...'.format(id)) - remote.exec_command(cmd) - except Exception: - LOG.exception('Failed to check message replication!') - self.fail('Failed to check message replication!') - self.messages.remove(message) - break - - def delete_queue(self): - if not self._controllers: - self.fail('There are no online controllers') - remote = self.get_ssh_connection_to_controller(self._controllers[0]) - LOG.debug('Try to deleting queues {0}... '.format(self.queues)) - if not self.queues: - return - host_key = self.amqp_hosts_name.keys()[0] - ip, port = self.amqp_hosts_name[host_key] - for test_queue in self.queues: - cmd = ("python -c 'import kombu;" - " c = kombu.Connection(\"amqp://{1}:{2}@{0}:{3}//\");" - " c.connect(); ch = c.channel(); q = kombu.Qu" - "eue(\"{4}\", channel=ch); q.delete();'".format( - ip, self.userid, self.password, port, test_queue)) - try: - LOG.debug("Removing queue {0} on host {1}".format( - test_queue, ip)) - remote.exec_command(cmd) - self.queues.remove(test_queue) - except Exception: - LOG.exception('Failed to delete queue') - self.fail('Failed to delete queue "{0}"!'.format(test_queue)) - - -class TestPacemakerBase(BaseTestCase): - """TestPacemakerStatus class base methods.""" - - @classmethod - def setUpClass(cls): - super(TestPacemakerBase, cls).setUpClass() - cls.config = fuel_health.config.FuelConfig() - cls.controller_names = cls.config.compute.controller_names - cls.online_controller_names = ( - cls.config.compute.online_controller_names) - cls.offline_controller_names = list( - set(cls.controller_names) - set(cls.online_controller_names)) - - cls.online_controller_ips = cls.config.compute.online_controllers - cls.controller_key = cls.config.compute.path_to_private_key - cls.controller_user = cls.config.compute.ssh_user - cls.controllers_pwd = cls.config.compute.controller_node_ssh_password - cls.timeout = cls.config.compute.ssh_timeout - - def setUp(self): - super(TestPacemakerBase, self).setUp() - if 'ha' not in self.config.mode: - self.skipTest('Cluster is not HA mode, skipping tests') - if not self.online_controller_names: - self.skipTest('There are no controller nodes') - - def _run_ssh_cmd(self, host, cmd): - """Open SSH session with host and execute command.""" - try: - sshclient = ssh.Client(host, self.controller_user, - self.controllers_pwd, - key_filename=self.controller_key, - timeout=self.timeout) - return sshclient.exec_longrun_command(cmd) - except Exception: - LOG.exception("Failed on run ssh cmd") - self.fail("%s command failed." % cmd) - - def _register_resource(self, res, res_name, resources): - if res_name not in resources: - resources[res_name] = { - 'master': [], - 'nodes': [], - 'started': 0, - 'stopped': 0, - 'active': False, - 'managed': False, - 'failed': False, - } - - if 'true' in res.get('active'): - resources[res_name]['active'] = True - - if 'true' in res.get('managed'): - resources[res_name]['managed'] = True - - if 'true' in res.get('failed'): - resources[res_name]['failed'] = True - - res_role = res.get('role') - num_nodes = int(res.get('nodes_running_on')) - - if num_nodes: - resources[res_name]['started'] += num_nodes - - for rnode in res.iter('node'): - if 'Master' in res_role: - resources[res_name]['master'].append( - rnode.get('name')) - resources[res_name]['nodes'].append( - rnode.get('name')) - else: - resources[res_name]['stopped'] += 1 - - def get_pcs_resources(self, pcs_status): - """Get pacemaker resources status to a python dict: - return: - { - str: { # Resource name - 'started': int, # count of Master/Started - 'stopped': int, # count of Stopped resources - 'nodes': [node_name, ...], # All node names where the - # resource is started - 'master': [node_name, ...], # Node names for 'Master' - # ('master' is also in 'nodes') - 'active': bool, # Is resource active? - 'managed': bool, # Is resource managed? - 'failed': bool, # Has resource failed? - }, - ... - } - """ - root = etree.fromstring(pcs_status) - resources = {} - - for res_group in root.iter('resources'): - for res in res_group: - res_name = res.get('id') - if 'resource' in res.tag: - self._register_resource(res, res_name, resources) - elif 'clone' in res.tag: - for r in res: - self._register_resource(r, res_name, resources) - elif 'group' in res.tag: - for r in res: - res_name_ingroup = r.get('id') - self._register_resource(r, res_name_ingroup, resources) - self._register_resource(r, res_name, resources) - - return resources - - def get_pcs_nodes(self, pcs_status): - root = etree.fromstring(pcs_status) - nodes = {'Online': [], 'Offline': []} - for nodes_group in root.iter('nodes'): - for node in nodes_group: - if 'true' in node.get('online'): - nodes['Online'].append(node.get('name')) - else: - nodes['Offline'].append(node.get('name')) - return nodes - - def get_pcs_constraints(self, constraints_xml): - """Parse pacemaker constraints - - :param constraints_xml: XML string contains pacemaker constraints - :return dict: - {string: # Resource name, - {'attrs': list, # List of dicts for resource - # attributes on each node, - 'enabled': list # List of strings with node names where - # the resource allowed to start, - 'with-rsc': string # Name of an another resource - # from which this resource depends on. - } - } - - """ - - root = etree.fromstring(constraints_xml) - constraints = {} - # 1. Get all attributes from constraints for each resource - for con_group in root.iter('constraints'): - for con in con_group: - if 'rsc_location' in con.tag or 'rsc_colocation' in con.tag: - if 'score' not in con.keys(): - # TODO(ddmitriev): process resource dependences - # for 'rule' section - continue - - rsc = con.get('rsc') - if rsc not in constraints: - constraints[rsc] = {'attrs': [con.attrib]} - else: - constraints[rsc]['attrs'].append(con.attrib) - - # 2. Make list of nodes for each resource where it is allowed to start. - # Remove from 'enabled' list all nodes with score '-INFINITY' - for rsc in constraints: - attrs = constraints[rsc]['attrs'] - enabled = [] - disabled = [] - for attr in attrs: - if 'with-rsc' in attr: - constraints[rsc]['with-rsc'] = attr['with-rsc'] - elif 'node' in attr: - if attr['score'] == '-INFINITY': - disabled.append(attr['node']) - else: - enabled.append(attr['node']) - constraints[rsc]['enabled'] = list(set(enabled) - set(disabled)) - - return constraints - - def get_resource_nodes(self, rsc, constraints, cluster_resources, - orig_rsc): - if rsc in orig_rsc: - # Constraints loop detected! - msg = ('There is a dependency loop in constraints configuration: ' - 'resource "{0}" depends on the resource "{1}". Please check' - ' the pacemaker configuration!' - .format(orig_rsc[-1], rsc)) - raise fuel_health.exceptions.InvalidConfiguration(msg) - else: - orig_rsc.append(rsc) - - # Nodes where the resource is allowed to start - allowed = constraints[rsc]['enabled'] - # Nodes where the parent resource is actually started - if rsc in cluster_resources: - started = cluster_resources[rsc]['nodes'] - else: - started = [] - - if 'with-rsc' in constraints[rsc]: - # Recursively get nodes for the parent resource - (parent_allowed, - parent_started, - parent_disallowed) = self.get_resource_nodes( - constraints[rsc]['with-rsc'], - constraints, - cluster_resources, - orig_rsc) - if 'score' in constraints[rsc]: - if constraints[rsc]['score'] == '-INFINITY': - # If resource banned to start on the same nodes where - # parent resource is started, then nodes where parent - # resource is started should be removed from 'allowed' - allowed = (set(allowed) - set(parent_started)) - else: - # Reduce 'allowed' list to only those nodes where - # the parent resource is allowed and running - allowed = list(set(parent_started) & - set(parent_allowed) & - set(allowed)) - # List of nodes, where resource is started, but not allowed to start - disallowed = list(set(started) - set(allowed)) - - return allowed, started, disallowed diff --git a/fuel_health/heatmanager.py b/fuel_health/heatmanager.py deleted file mode 100644 index 0be4c875..00000000 --- a/fuel_health/heatmanager.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# Copyright 2013 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os - -import fuel_health.common.ssh -from fuel_health.common.utils.data_utils import rand_name -import fuel_health.nmanager -import fuel_health.test - - -LOG = logging.getLogger(__name__) - - -class HeatBaseTest(fuel_health.nmanager.PlatformServicesBaseClass): - """Base class for Heat sanity and platform tests.""" - - @classmethod - def setUpClass(cls): - super(HeatBaseTest, cls).setUpClass() - - cls.wait_interval = cls.config.compute.build_interval - cls.wait_timeout = cls.config.compute.build_timeout - - def setUp(self): - super(HeatBaseTest, self).setUp() - - self.check_clients_state() - - if self.heat_client is None: - self.fail('Heat is unavailable.') - if not self.find_micro_flavor(): - self.fail('m1.micro flavor was not created.') - - def create_flavor(self, ram=256, vcpus=1, disk=2): - """This method creates a flavor for Heat tests.""" - - LOG.debug('Creation of Heat tests flavor...') - name = rand_name('ost1_test-heat-flavor-') - flavor = self.compute_client.flavors.create(name, ram, vcpus, disk) - self.addCleanup(self.compute_client.flavors.delete, flavor.id) - LOG.debug('Flavor for Heat tests has been created.') - - return flavor - - def get_stack(self, stack_id): - """This method returns desired stack.""" - - LOG.debug("Getting desired stack: {0}.".format(stack_id)) - return self.heat_client.stacks.get(stack_id) - - def create_stack(self, template, disable_rollback=True, parameters={}): - """This method creates stack by given template.""" - - LOG.debug('Creation of desired stack...') - stack_name = rand_name('ost1_test-heat-stack-') - stack_id = self.heat_client.stacks.create( - stack_name=stack_name, - template=template, - parameters=parameters, - disable_rollback=disable_rollback - )['stack']['id'] - - self.addCleanup(self.delete_stack, stack_id) - - # heat client doesn't return stack details after creation - # so need to request them - stack = self.get_stack(stack_id) - LOG.debug('Stack "{0}" creation finished.'.format(stack_name)) - - return stack - - def _is_stack_deleted(self, stack_id): - """This method checks whether or not stack deleted.""" - - stack = self.get_stack(stack_id) - if stack.stack_status in ('DELETE_COMPLETE', 'ROLLBACK_COMPLETE'): - return True - return False - - def delete_stack(self, stack_id): - """This method deletes stack if it exists.""" - - LOG.debug('Deletion of specified stack: {0}'.format(stack_id)) - if self._is_stack_deleted(stack_id): - LOG.debug('Stack "{0}" already deleted.'.format(stack_id)) - return - try: - self.heat_client.stacks.delete(stack_id) - except Exception: - self.fail('Cleanup failed. ' - 'Impossibly to delete stack "{0}".'.format(stack_id)) - self.wait_for_stack_deleted(stack_id) - LOG.debug('Stack "{0}" has been deleted.'.format(stack_id)) - - def wait_for_stack_deleted(self, stack_id): - """This method waits stack deletion.""" - - if not fuel_health.test.call_until_true(self._is_stack_deleted, - self.wait_timeout, - self.wait_interval, - stack_id): - self.fail('Timed out waiting for stack to be deleted.') - - def update_stack(self, stack_id, template, parameters={}): - """This method updates specified stack.""" - - self.heat_client.stacks.update(stack_id=stack_id, - template=template, - parameters=parameters) - return self.get_stack(stack_id) - - def wait_for_stack_status(self, stack_id, expected_status, - timeout=None, interval=None): - """The method is a customization of test.status_timeout(). - - It addresses `stack_status` instead of `status` field and - checks for FAILED instead of ERROR status. - The rest is the same. - """ - if timeout is None: - timeout = self.wait_timeout - if interval is None: - interval = self.wait_interval - - def check_status(): - stack = self.get_stack(stack_id) - new_status = stack.stack_status - if 'FAIL' in new_status: - self.fail('Failed to get to expected status. ' - 'Currently in {0} status.'.format(new_status)) - elif new_status == expected_status: - return True - LOG.debug('Waiting for {0} to get to {1} status. ' - 'Currently in {2} status.'.format( - stack, expected_status, new_status)) - - if not fuel_health.test.call_until_true(check_status, - timeout, - interval): - self.fail('Timeout exceeded while waiting for ' - 'stack status becomes {0}'.format(expected_status)) - - def get_instances_by_name_mask(self, mask_name): - """This method retuns list of instances with certain names.""" - - instances = [] - - instance_list = self.compute_client.servers.list() - LOG.debug('Instances list is {0}'.format(instance_list)) - LOG.debug('Expected instance name should inlude {0}'.format(mask_name)) - - for inst in instance_list: - LOG.debug('Instance name is {0}'.format(inst.name)) - if inst.name.startswith(mask_name): - instances.append(inst) - - return instances - - def wait_for_autoscaling(self, exp_count, - timeout, interval, reduced_stack_name): - """This method checks whether autoscaling finished or not. - - It checks number of instances owned by stack, instances - belonging to stack are defined by special name pattern - (reduced_stack_name). It is not possible to get stack instances - using get_stack_objects, because instances are created as part of - autoscaling group resource. - """ - - LOG.debug('Expected number of instances' - ' owned by stack is {0}'.format(exp_count)) - - def count_instances(reduced_stack_name): - instances = self.get_instances_by_name_mask(reduced_stack_name) - return len(instances) == exp_count - - return fuel_health.test.call_until_true( - count_instances, timeout, interval, reduced_stack_name) - - def wait_for_vm_ready_for_load(self, conn_string, timeout, interval): - """Wait for fake file to be created on the instance. - - Creation of fake file tells that vm is ready. - """ - cmd = (conn_string + - " 'touch /tmp/ostf-heat.txt; " - "test -f /tmp/ostf-heat.txt && echo -ne YES || echo -ne NO'") - - def check(): - return self._run_ssh_cmd(cmd)[0] == 'YES' - - return fuel_health.test.call_until_true( - check, timeout, interval) - - def save_key_to_file(self, key): - return self._run_ssh_cmd( - "KEY=`mktemp`; echo '{0}' > $KEY; " - "chmod 600 $KEY; echo -ne $KEY;".format(key))[0] - - def delete_key_file(self, filepath): - self._run_ssh_cmd('rm -f {0}'.format(filepath)) - - def load_vm_cpu(self, connection_string): - self._run_ssh_cmd(connection_string + " 'rm -f /tmp/ostf-heat.txt'") - return self._run_ssh_cmd(connection_string + - " 'cat /dev/urandom |" - " gzip -9 > /dev/null &'")[0] - - def release_vm_cpu(self, connection_string): - pid = self._run_ssh_cmd(connection_string + - ' ps -ef | grep \"cat /dev/urandom\" ' - '| grep -v grep | awk \"{print $1}\"')[0] - - return self._run_ssh_cmd(connection_string + - ' kill -9 {0}'.format(pid.strip()))[0] - - @staticmethod - def load_template(file_name): - """Load specified template file from etc directory.""" - - filepath = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'etc', file_name) - with open(filepath) as f: - return f.read() - - def get_stack_objects(self, objects_call, stack_id, **kwargs): - """This method returns list of desired stack objects. - - It gets all defined objects of stack and returns all - of them or just needed based on the specified criteria. - """ - - LOG.debug('Getting stack objects.') - try: - objects = objects_call.list(stack_id) - except Exception: - self.fail('Failed to get list of stack objects.') - - if kwargs.get('key') and kwargs.get('value'): - objects = [ob for ob in objects - if getattr(ob, kwargs['key']) == kwargs['value']] - - LOG.debug('List of fetched objects: {0}'.format(objects)) - - return objects - - def check_required_resources(self, min_required_ram_mb=4096, - hdd=40, vCpu=2): - vms_count = self.get_info_about_available_resources( - min_required_ram_mb, hdd, vCpu) - if vms_count < 1: - msg = ('This test requires more hardware resources of your ' - 'OpenStack cluster: your cloud should allow to create ' - 'at least 1 VM with {0} MB of RAM, {1} HDD and {2} vCPUs. ' - 'You need to remove some resources or add compute nodes ' - 'to have an ability to run this OSTF test.' - .format(min_required_ram_mb, hdd, vCpu)) - LOG.debug(msg) - self.skipTest(msg) diff --git a/fuel_health/hooks.py b/fuel_health/hooks.py deleted file mode 100644 index 2c905bde..00000000 --- a/fuel_health/hooks.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def setup_hook(config): - import pbr.packaging - - # this monkey patch is to avoid appending git version to version - pbr.packaging._get_version_from_git = lambda pre_version: pre_version diff --git a/fuel_health/ironicmanager.py b/fuel_health/ironicmanager.py deleted file mode 100644 index cbd1a633..00000000 --- a/fuel_health/ironicmanager.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health import nmanager -import fuel_health.test - -from fuel_health.common import ssh -from ironicclient.common import utils -from ironicclient import exc as ironic_exc - -LOG = logging.getLogger(__name__) - - -class IronicTest(nmanager.SanityChecksTest): - """Provide access to the python-ironicclient for calling Ironic API.""" - - @classmethod - def setUpClass(cls): - """Setup Ironic client and credentials.""" - super(IronicTest, cls).setUpClass() - if cls.manager.clients_initialized: - cls.usr = cls.config.compute.controller_node_ssh_user - cls.pwd = cls.config.compute.controller_node_ssh_password - cls.key = cls.config.compute.path_to_private_key - cls.timeout = cls.config.compute.ssh_timeout - if not cls.ironic_client: - LOG.warning('Ironic client was not initialized') - - def node_create(self, **kwargs): - """Create a new node.""" - node = self.ironic_client.node.create(**kwargs) - self.addCleanup(self.node_delete, node) - return node - - def node_delete(self, node): - """Delete particular node.""" - try: - self.ironic_client.node.delete(node.uuid) - except ironic_exc.NotFound: - LOG.exception('Failed on node delete {0}'.format(node.uuid)) - - def node_update(self, node, prop, value_prop, row='properties'): - """Add property with value to node properties.""" - args = ['{0}/{1}={2}'.format(row, prop, value_prop)] - patch = utils.args_array_to_patch('add', args) - return self.ironic_client.node.update(node.uuid, patch) - - def node_show(self, node): - """Show detailed information about a node.""" - if node.instance_uuid: - n = self.ironic_client.node.get_by_instance_uuid( - node.instance_uuid) - else: - n = self.ironic_client.node.get(node.uuid) - return n - - def check_service_availability(self, nodes, cmd, expected, - succeed_nodes=1): - """Check running processes on nodes. - - Check that output from specified command contain expected part. - - :param nodes: List of nodes to check command. - :param cmd: Command that is executed. - :param expected: Expected output. - :param succeed_nodes: Indicates if check should succeed on specified - number of nodes. - """ - def check_services(): - succeed_count = 0 - for node in nodes: - remote = ssh.Client(node, self.usr, self.pwd, - key_filename=self.key, - timeout=self.timeout) - try: - output = remote.exec_command(cmd) - LOG.debug(output) - if expected in output: - succeed_count += 1 - except Exception: - pass - if succeed_count == succeed_nodes: - return True - else: - return False - - if not fuel_health.test.call_until_true(check_services, 30, - self.timeout): - self.fail('Failed to discover service {0} ' - 'within specified timeout'.format(expected)) - return True - - def list_nodes(self): - """Get list of nodes.""" - return self.ironic_client.node.list() - - def list_ports(self): - """Get list of ports.""" - return self.ironic_client.port.list() - - def list_drivers(self): - """Get list of drivers.""" - return self.ironic_client.driver.list() - - def list_chassis(self): - """Get list of chassis.""" - return self.ironic_client.chassis.list() - - def get_driver(self, driver): - """Get specified driver.""" - return self.ironic_client.driver.get(driver) diff --git a/fuel_health/manager.py b/fuel_health/manager.py deleted file mode 100644 index fc25a06d..00000000 --- a/fuel_health/manager.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# Copyright 2013 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fuel_health.config - - -class Manager(object): - """Base manager class - - Manager objects are responsible for providing a configuration object - and a client object for a test case to use in performing actions. - """ - - def __init__(self): - self.config = fuel_health.config.FuelConfig() - self.client_attr_names = [] diff --git a/fuel_health/muranomanager.py b/fuel_health/muranomanager.py deleted file mode 100644 index e5f49b26..00000000 --- a/fuel_health/muranomanager.py +++ /dev/null @@ -1,481 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import logging -import os -import time -import zipfile - -try: - from oslo.serialization import jsonutils -except ImportError: - from oslo_serialization import jsonutils - -import muranoclient.common.exceptions as exceptions -import requests - -from fuel_health.common.utils.data_utils import rand_name -import fuel_health.nmanager - -LOG = logging.getLogger(__name__) - - -class MuranoTest(fuel_health.nmanager.PlatformServicesBaseClass): - """Manager that provides access to the Murano python client for - calling Murano API. - """ - @classmethod - def setUpClass(cls): - super(MuranoTest, cls).setUpClass() - cls.packages = [] - cls.environments = [] - - def setUp(self): - super(MuranoTest, self).setUp() - self.check_clients_state() - self.env_name = rand_name("ostf_test-Murano_env") - - if not self.config.compute.compute_nodes: - self.skipTest('There are no compute nodes to run tests') - - self.min_required_ram_mb = 4096 - - self.murano_available = True - self.endpoint = '{0}/v1/'.format( - self.identity_client.service_catalog.url_for( - service_type='application-catalog', - endpoint_type='publicURL')) - - self.headers = { - 'X-Auth-Token': self.murano_client.http_client.auth_token, - 'content-type': 'application/json' - } - - try: - self.list_environments() - except exceptions.CommunicationError: - self.murano_available = False - self.skipTest("Murano service is not available") - - def tearDown(self): - """This method allows to clean up the OpenStack environment - after the Murano OSTF tests. - """ - - if self.murano_available: - if self.environments: - for environment_id in self.environments: - try: - self.delete_environment(environment_id) - except Exception: - LOG.exception('Failure to delete environment \ - {0}'.format(environment_id)) - if self.packages: - for package in self.packages: - try: - self.delete_package(package.id) - except Exception: - LOG.exception('Failure to delete package \ - {0}'.format(package.id)) - - super(MuranoTest, self).tearDown() - - def zip_dir(self, parent_dir, app_dir): - """This method allows to zip directory with application - :param parent_dir: Directory, where application lives - :param app_dir: Directory with application - :return: - """ - abs_path = os.path.join(parent_dir, app_dir) - path_len = len(abs_path) + 1 - zip_file = abs_path + ".zip" - with contextlib.closing(zipfile.ZipFile(zip_file, "w")) as zf: - for dir_name, _, files in os.walk(abs_path): - for filename in files: - fn = os.path.join(dir_name, filename) - zf.write(fn, fn[path_len:]) - return zip_file - - def find_murano_image(self, image_type): - """This method allows to find Windows images with Murano tag. - - Returns the image object or None - - image_type should be in [linux, windows.2012, cirros.demo] - """ - - tag = 'murano_image_info' - - for image in self.compute_client.images.list(): - if tag in image.metadata and image.status.lower() == 'active': - metadata = jsonutils.loads(image.metadata[tag]) - if image_type == metadata['type']: - return image - - def list_environments(self): - """This method allows to get the list of environments. - - Returns the list of environments. - """ - - resp = requests.get(self.endpoint + 'environments', - headers=self.headers, verify=False) - return resp.json() - - def create_environment(self, name): - """This method allows to create environment. - - Input parameters: - name - Name of new environment - - Returns new environment. - """ - - environment = self.murano_client.environments.create({'name': name}) - self.environments.append(environment.id) - return environment - - def get_environment(self, environment_id): - """This method allows to get specific environment by ID. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment (optional) - - Returns specific environment. - """ - - return self.murano_client.environments.get(environment_id) - - def update_environment(self, environment_id, new_name): - """This method allows to update specific environment by ID. - - Input parameters: - environment_id - ID of environment - new_name - New name for environment - - Returns new environment. - """ - - return self.murano_client.environments.update(environment_id, new_name) - - def delete_environment(self, environment_id): - """This method allows to delete specific environment by ID. - - Input parameters: - environment_id - ID of environment - - Returns None. - """ - - self.murano_client.environments.delete(environment_id) - return self.environments.remove(environment_id) - - def environment_delete_check(self, environment_id, timeout=120): - resp = requests.get('{0}environments/{1}'.format(self.endpoint, - environment_id), - headers=self.headers, verify=False) - self.delete_environment(environment_id) - point = time.time() - while resp.status_code == 200: - if time.time() - point > timeout: - self.fail("Can't delete environment more than {0} seconds". - format(timeout)) - resp = requests.get('{0}environments/{1}'.format(self.endpoint, - environment_id), - headers=self.headers, verify=False) - try: - env = resp.json() - if env["status"] == "delete failure": - self.fail("Environment status: {0}".format(env["status"])) - except Exception: - LOG.debug("Failed to get environment status " - "or environment no more exists") - time.sleep(1) - - def create_session(self, environment_id): - """This method allows to create session for environment. - - Input parameters: - environment_id - ID of environment - - Returns new session. - """ - - return self.murano_client.sessions.configure(environment_id) - - def get_session(self, environment_id, session_id): - """This method allows to get specific session. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment - - Returns specific session. - """ - - return self.murano_client.sessions.get(environment_id, session_id) - - def delete_session(self, environment_id, session_id): - """This method allows to delete session for environment. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment - - Returns None. - """ - - return self.murano_client.sessions.delete(environment_id, session_id) - - def deploy_session(self, environment_id, session_id): - """This method allows to deploy session for environment. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment - - Returns specific session. - """ - - endpoint = '{0}environments/{1}/sessions/{2}/deploy'.format( - self.endpoint, environment_id, session_id) - return requests.post(endpoint, data=None, headers=self.headers, - verify=False) - - def create_service(self, environment_id, session_id, json_data): - """This method allows to create service. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment - json_data - JSON with service description - - Returns specific service. - """ - headers = self.headers.copy() - headers.update({'x-configuration-session': session_id}) - endpoint = '{0}environments/{1}/services'.format(self.endpoint, - environment_id) - return requests.post(endpoint, data=jsonutils.dumps(json_data), - headers=headers, verify=False).json() - - def list_services(self, environment_id, session_id=None): - """This method allows to get list of services. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment (optional) - - Returns list of services. - """ - - return self.murano_client.services.get(environment_id, '/', session_id) - - def get_service(self, environment_id, session_id, service_id): - """This method allows to get service by ID. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment - service_id - ID of service in this environment - - Returns specific service. - """ - - return self.murano_client.services.get(environment_id, - '/{0}'.format(service_id), - session_id) - - def delete_service(self, environment_id, session_id, service_id): - """This method allows to delete specific service. - - Input parameters: - environment_id - ID of environment - session_id - ID of session for this environment - service_id - ID of service in this environment - - Returns None. - """ - - return self.murano_client.services.delete(environment_id, - '/{0}'.format(service_id), - session_id) - - def deploy_check(self, environment): - """This method allows to wait for deployment of Murano evironments. - - Input parameters: - environment - Murano environment - - Returns environment. - """ - - environment = self.get_environment(environment.id) - while environment.status != 'ready': - time.sleep(5) - environment = self.get_environment(environment.id) - if environment.status == 'deploy failure': - LOG.error( - 'Environment has incorrect status' - ' %s' % environment.status) - self.fail( - 'Environment has incorrect status' - ' %s .' % environment.status) - return environment - - def deployments_status_check(self, environment_id): - """This method allows to check that deployment status is 'success'. - - Input parameters: - environment_id - ID of environment - - Returns 'OK'. - """ - - endpoint = '{0}environments/{1}/deployments'.format(self.endpoint, - environment_id) - deployments = requests.get(endpoint, - headers=self.headers, - verify=False).json()['deployments'] - for deployment in deployments: - # Save the information about all deployments - LOG.debug("Environment state: {0}".format(deployment['state'])) - r = requests.get('{0}/{1}'.format(endpoint, deployment['id']), - headers=self.headers, verify=False).json() - LOG.debug("Reports: {0}".format(r)) - - self.assertEqual('success', deployment['state']) - return 'OK' - - def check_port_access(self, ip, port): - output = '' - start_time = time.time() - while time.time() - start_time < 600: - # Check VM port availability from controller node: - output, err = self._run_ssh_cmd("nc -z {0} {1}; echo $?" - .format(ip, port)) - if '0' in output: - break - time.sleep(5) - self.assertIn('0', output, '%s port is closed on instance' % port) - - def port_status_check(self, environment, configurations): - """Function which gives opportunity to check multiple instances - :param environment: Murano environment - :param configurations: Array of configurations. - - Example: [[instance_name, *ports], [instance_name, *ports]] ... - """ - for configuration in configurations: - inst_name = configuration[0] - ports = configuration[1:] - ip = self.get_ip_by_instance_name(environment, inst_name) - if ip and ports: - for port in ports: - self.check_port_access(ip, port) - else: - self.fail('Instance does not have floating IP') - - def get_ip_by_instance_name(self, environment, inst_name): - """Returns ip of instance using instance name - :param environment: Murano environment - :param name: String, which is substring of name of instance or name of - instance - :return: - """ - for service in environment.services: - if inst_name in service['instance']['name']: - return service['instance']['floatingIpAddress'] - - def get_list_packages(self, artifacts=False): - try: - if artifacts: - packages_list = self.murano_art_client.packages.list() - packages = [] - for package in packages_list: - packages.append(package) - else: - packages_list = self.murano_client.packages.list() - packages = list(packages_list) - except exceptions.ClientException: - self.fail("Can not get list of packages") - LOG.debug('Packages List: {0}'.format(packages)) - self.assertIsInstance(packages, list) - return packages - - def generate_fqn_list(self, artifacts=False): - fqn_list = [] - packages = self.get_list_packages(artifacts) - for package in packages: - fqn_list.append(package.to_dict()['fully_qualified_name']) - LOG.debug('FQN List: {0}'.format(fqn_list)) - return fqn_list - - def upload_package(self, package_name, body, app, artifacts=False): - files = {'%s' % package_name: open(app, 'rb')} - if artifacts: - package = self.murano_art_client.packages.create(body, files) - else: - package = self.murano_client.packages.create(body, files) - self.packages.append(package) - return package - - def package_exists(self, artifacts=False, *packages): - fqn_list = self.generate_fqn_list(artifacts) - LOG.debug("Response for packages is {0}".format(fqn_list)) - for package in packages: - if package not in fqn_list: - return False - return True - - def get_package(self, package_id, artifacts=False): - if artifacts: - package = self.murano_art_client.packages.get(package_id) - else: - package = self.murano_client.packages.get(package_id) - return package - - def get_package_by_fqdn(self, package_name, artifacts=False): - package_list = self.get_list_packages(artifacts) - for package in package_list: - if package.to_dict()["fully_qualified_name"] == package_name: - return package - - def delete_package(self, package_id, artifacts=False): - if artifacts: - self.murano_art_client.packages.delete(package_id) - else: - self.murano_client.packages.delete(package_id) - - def get_list_categories(self): - resp = requests.get(self.endpoint + 'catalog/packages/categories', - headers=self.headers, verify=False) - - self.assertEqual(200, resp.status_code) - self.assertIsInstance(resp.json()['categories'], list) - - def check_path(self, env, path, inst_name=None): - environment = env.manager.get(env.id) - if inst_name: - ip = self.get_ip_by_instance_name(environment, inst_name) - else: - ip = environment.services[0]['instance']['floatingIpAddress'] - uri = 'http://{0}/{1}'.format(ip, path) - cmd = "curl --connect-timeout 1 --head {0}".format(uri) - stdout, stderr = self._run_ssh_cmd(cmd) - if '404' in stdout: - self.fail("Service path unavailable") diff --git a/fuel_health/neutronmanager.py b/fuel_health/neutronmanager.py deleted file mode 100644 index 7c24d090..00000000 --- a/fuel_health/neutronmanager.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import fuel_health.nmanager - -LOG = logging.getLogger(__name__) - - -class NeutronBaseTest(fuel_health.nmanager.NovaNetworkScenarioTest): - - @classmethod - def setUpClass(cls): - super(NeutronBaseTest, cls).setUpClass() - cls.routers = {} - cls.subnets = [] - cls.networks = [] - cls.floating_ips = [] - cls.security_groups = {} - cls.ports = [] - - def setUp(self): - super(NeutronBaseTest, self).setUp() - self.check_clients_state() - self.tenant_id = self.identity_client.tenant_id - if not self.neutron_client: - self.skipTest('Neutron is unavailable.') - - def create_router(self, name): - external_network = None - for network in self.neutron_client.list_networks()["networks"]: - if network.get("router:external"): - external_network = network - - if not external_network: - self.fail('Cannot find the external network.') - - gw_info = { - "network_id": external_network["id"], - "enable_snat": True - } - - router_info = { - "router": { - "name": name, - "external_gateway_info": gw_info, - "tenant_id": self.tenant_id - } - } - - router = self.neutron_client.create_router(router_info)['router'] - self.routers.setdefault(router['id'], []) - - return router - - def create_network(self, name): - internal_network_info = { - "network": { - "name": name, - "tenant_id": self.tenant_id - } - } - - network = self.neutron_client.create_network( - internal_network_info)['network'] - self.networks.append(network) - - return network - - def create_subnet(self, internal_network): - subnet_info = { - "subnet": { - "network_id": internal_network['id'], - "ip_version": 4, - "cidr": "10.0.7.0/24", - "tenant_id": self.tenant_id - } - } - - subnet = self.neutron_client.create_subnet(subnet_info)['subnet'] - self.subnets.append(subnet) - - return subnet - - def uplink_subnet_to_router(self, router, subnet): - if not self.routers.get(router['id'], None): - self.routers[router['id']].append(subnet['id']) - - return self.neutron_client.add_interface_router( - router["id"], {"subnet_id": subnet["id"]}) - - def _remove_router(self, router, subnets_id=[]): - self.neutron_client.remove_gateway_router(router['id']) - - for subnet_id in subnets_id: - self.neutron_client.remove_interface_router( - router['id'], {"subnet_id": subnet_id}) - - self.neutron_client.delete_router(router['id']) - self.routers.pop(router['id']) - - def _remove_subnet(self, subnet): - self.neutron_client.delete_subnet(subnet['id']) - self.subnets.remove(subnet) - - def _remove_network(self, network): - self.neutron_client.delete_network(network['id']) - self.networks.remove(network) - - @classmethod - def _clean_floating_ips(cls): - if cls.floating_ips: - for ip in cls.floating_ips: - try: - cls.compute_client.floating_ips.delete(ip) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - @classmethod - def _clear_networks(cls): - try: - [cls.compute_client.servers.delete(srv) - for srv in cls.compute_client.servers.list() - if 'ost1_' in srv.name] - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - for router in cls.routers: - try: - cls.neutron_client.remove_gateway_router( - router) - for subnet in cls.subnets: - cls.neutron_client.remove_interface_router( - router, - {"subnet_id": subnet['id']}) - cls.neutron_client.delete_router(router) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - for subnet in cls.subnets: - try: - cls.neutron_client.delete_subnet(subnet['id']) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - for network in cls.networks: - try: - cls.neutron_client.delete_network(network['id']) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - try: - sec_groups = cls.compute_client.security_groups.list() - [cls.compute_client.security_groups.delete(group) - for group in sec_groups - if 'ost1_test-secgroup-smoke' in group.name] - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - @classmethod - def _cleanup_ports(cls): - for port in cls.ports: - try: - cls.neutron_client.delete_port(port['port']['id']) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - @classmethod - def tearDownClass(cls): - super(NeutronBaseTest, cls).tearDownClass() - cls._clean_floating_ips() - cls._clear_networks() - cls._cleanup_ports() diff --git a/fuel_health/nmanager.py b/fuel_health/nmanager.py deleted file mode 100644 index 7ba904ff..00000000 --- a/fuel_health/nmanager.py +++ /dev/null @@ -1,1477 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# Copyright 2015 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import time - -from keystoneauth1.identity import V2Password -from keystoneauth1.session import Session as KeystoneSession - -import fuel_health.common.utils.data_utils as data_utils - -LOG = logging.getLogger(__name__) - -# Default client libs -try: - import heatclient.v1.client -except Exception: - LOG.exception("") - LOG.warning('Heatclient could not be imported.') -try: - import muranoclient.v1.client -except Exception: - LOG.exception("") - LOG.warning('Muranoclient could not be imported.') -try: - import saharaclient.client -except Exception: - LOG.exception("") - LOG.warning('Sahara client could not be imported.') -try: - import ceilometerclient.v2.client -except Exception: - LOG.exception("") - LOG.warning('Ceilometer client could not be imported.') -try: - import neutronclient.neutron.client -except Exception: - LOG.exception("") - LOG.warning('Neutron client could not be imported.') -try: - import glanceclient -except Exception: - LOG.exception("") - LOG.warning('Glance client could not be imported') -try: - import ironicclient -except Exception: - LOG.exception("") - LOG.warning('Ironic client could not be imported') -try: - import muranoclient.glance.client as art_client -except Exception: - LOG.exception("") - LOG.warning('Artifacts client could not be imported') - -import aodhclient.client -import cinderclient.client -import glanceclient.client -import keystoneclient -import novaclient.client -import novaclient.exceptions as nova_exc - -from fuel_health.common import ssh as f_ssh -from fuel_health.common.utils.data_utils import rand_int_id -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import exceptions -import fuel_health.manager -import fuel_health.test -import keystoneauth1.identity -import keystoneauth1.session - - -class OfficialClientManager(fuel_health.manager.Manager): - """Manager that provides access to the official python clients for - calling various OpenStack APIs. - """ - - NOVACLIENT_VERSION = '2' - CINDERCLIENT_VERSION = '2' - - def __init__(self): - super(OfficialClientManager, self).__init__() - self.clients_initialized = False - self.traceback = '' - self.keystone_error_message = None - - self._keystone_session = None - self.compute_client = self._get_compute_client() - try: - self.identity_client = self._get_identity_client() - self.identity_v3_client = self._get_identity_client(version=3) - self.clients_initialized = True - except (keystoneclient.exceptions.AuthorizationFailure, - keystoneclient.exceptions.Unauthorized): - self.keystone_error_message = \ - exceptions.InvalidCredentials.message - except Exception as e: - LOG.error( - "Unexpected error durring intialize keystoneclient: {0}" - .format(e) - ) - LOG.exception("Unexpected error durring intialize keystoneclient") - - if self.clients_initialized: - self.glance_client = self._get_glance_client() - self.volume_client = self._get_volume_client() - self.heat_client = self._get_heat_client() - self.murano_client = self._get_murano_client() - self.sahara_client = self._get_sahara_client() - self.ceilometer_client = self._get_ceilometer_client() - self.neutron_client = self._get_neutron_client() - self.glance_client_v1 = self._get_glance_client(version=1) - self.ironic_client = self._get_ironic_client() - self.aodh_client = self._get_aodh_client() - self.artifacts_client = self._get_artifacts_client() - self.murano_art_client = self._get_murano_client(artifacts=True) - self.client_attr_names = [ - 'compute_client', - 'identity_client', - 'identity_v3_client', - 'glance_client', - 'glance_client_v1', - 'volume_client', - 'heat_client', - 'murano_client', - 'sahara_client', - 'ceilometer_client', - 'neutron_client', - 'ironic_client', - 'aodh_client', - 'artifacts_client', - 'murano_art_client' - ] - - @property - def keystone_session(self): - if not self._keystone_session: - auth = V2Password( - auth_url=self.config.identity.uri, - username=self.config.identity.admin_username, - password=self.config.identity.admin_password, - tenant_name=self.config.identity.admin_tenant_name) - - self._keystone_session = KeystoneSession(auth=auth, verify=False) - return self._keystone_session - - def _get_compute_client(self): - service_type = self.config.compute.catalog_type - return novaclient.client.Client(self.NOVACLIENT_VERSION, - session=self.keystone_session, - service_type=service_type, - no_cache=True, - insecure=True, - endpoint_type='publicURL') - - def _get_glance_client(self, version=2, username=None, password=None, - tenant_name=None): - if not username: - username = self.config.identity.admin_username - if not password: - password = self.config.identity.admin_password - if not tenant_name: - tenant_name = self.config.identity.admin_tenant_name - - keystone = self._get_identity_client(username, password, tenant_name) - try: - endpoint = keystone.service_catalog.url_for( - service_type='image', - endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Can not initialize glance client') - return None - return glanceclient.client.Client(version, endpoint=endpoint, - token=keystone.auth_token, - insecure=True) - - def _get_volume_client(self, username=None, password=None, - tenant_name=None): - if not username: - username = self.config.identity.admin_username - if not password: - password = self.config.identity.admin_password - if not tenant_name: - tenant_name = self.config.identity.admin_tenant_name - - auth_url = self.config.identity.uri - return cinderclient.client.Client(self.CINDERCLIENT_VERSION, - username, - password, - tenant_name, - auth_url, - insecure=True, - endpoint_type='publicURL') - - def _get_identity_client(self, username=None, password=None, - tenant_name=None, version=None): - if not username: - username = self.config.identity.admin_username - if not password: - password = self.config.identity.admin_password - if not tenant_name: - tenant_name = self.config.identity.admin_tenant_name - - if None in (username, password, tenant_name): - msg = ("Missing required credentials for identity client. " - "username: {username}, password: {password}, " - "tenant_name: {tenant_name}").format( - username=username, - password=password, - tenant_name=tenant_name, ) - raise exceptions.InvalidConfiguration(msg) - - auth_url = self.config.identity.uri - - if not version or version == 2: - return keystoneclient.v2_0.client.Client(username=username, - password=password, - tenant_name=tenant_name, - auth_url=auth_url, - insecure=True) - elif version == 3: - helper_list = auth_url.rstrip("/").split("/") - helper_list[-1] = "v3/" - auth_url = "/".join(helper_list) - - return keystoneclient.v3.client.Client(username=username, - password=password, - project_name=tenant_name, - auth_url=auth_url, - insecure=True) - else: - LOG.warning("Version:{0} for keystoneclient is not " - "supported with OSTF".format(version)) - - def _get_heat_client(self, username=None, password=None, - tenant_name=None): - if not username: - username = self.config.identity.admin_username - if not password: - password = self.config.identity.admin_password - if not tenant_name: - tenant_name = self.config.identity.admin_tenant_name - keystone = self._get_identity_client(username, password, tenant_name) - token = keystone.auth_token - try: - endpoint = keystone.service_catalog.url_for( - service_type='orchestration', - endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Can not initialize heat client, endpoint not found') - return None - else: - return heatclient.v1.client.Client(endpoint=endpoint, - token=token, - insecure=True) - - def _get_murano_client(self, artifacts=False): - """This method returns Murano API client - """ - keystone = self._get_identity_client( - self.config.identity.admin_username, - self.config.identity.admin_password, - self.config.identity.admin_tenant_name) - # Get xAuth token from Keystone - self.token_id = keystone.auth_token - - try: - endpoint = keystone.service_catalog.url_for( - service_type='application-catalog', - endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Endpoint for Murano service ' - 'not found. Murano client cannot be initialized.') - return - - if artifacts: - return muranoclient.v1.client.Client( - endpoint, - token=self.token_id, - insecure=True, artifacts_client=self.artifacts_client) - else: - return muranoclient.v1.client.Client( - endpoint, - token=self.token_id, - insecure=True) - - def _get_sahara_client(self): - sahara_api_version = self.config.sahara.api_version - - keystone = self._get_identity_client() - try: - sahara_url = keystone.service_catalog.url_for( - service_type='data-processing', endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Endpoint for Sahara service ' - 'not found. Sahara client cannot be initialized.') - return None - auth_token = keystone.auth_token - - return saharaclient.client.Client(sahara_api_version, - sahara_url=sahara_url, - input_auth_token=auth_token, - insecure=True) - - def _get_ceilometer_client(self): - keystone = self._get_identity_client() - try: - endpoint = keystone.service_catalog.url_for( - service_type='metering', - endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Can not initialize ceilometer client') - return None - - return ceilometerclient.v2.Client(endpoint=endpoint, insecure=True, - verify=False, - token=lambda: keystone.auth_token) - - def _get_neutron_client(self, version='2.0'): - keystone = self._get_identity_client() - - try: - endpoint = keystone.service_catalog.url_for( - service_type='network', - endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Can not initialize neutron client') - return None - - return neutronclient.neutron.client.Client(version, - token=keystone.auth_token, - endpoint_url=endpoint, - insecure=True) - - def _get_ironic_client(self, version='1'): - keystone = self._get_identity_client() - try: - endpoint = keystone.service_catalog.url_for( - service_type='baremetal', - endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Can not initialize ironic client') - return None - - return ironicclient.client.get_client( - version, - os_auth_token=keystone.auth_token, - ironic_url=endpoint, insecure=True) - - def _get_artifacts_client(self, version='1'): - keystone = self._get_identity_client() - try: - endpoint = keystone.service_catalog.url_for( - service_type='artifact', - endpoint_type='publicURL') - except keystoneclient.exceptions.EndpointNotFound: - LOG.warning('Can not initialize artifacts client') - return None - return art_client.Client(endpoint=endpoint, - type_name='murano', - type_version=version, - token=keystone.auth_token, - insecure=True) - - def _get_aodh_client(self, version='2'): - username = self.config.identity.admin_username - password = self.config.identity.admin_password - tenant = self.config.identity.admin_tenant_name - auth_url = self.config.identity.uri - auth = keystoneauth1.identity.v2.Password( - auth_url=auth_url, username=username, - password=password, tenant_name=tenant) - sess = keystoneauth1.session.Session(auth=auth, verify=False) - return aodhclient.client.Client(version, sess) - - -class OfficialClientTest(fuel_health.test.TestCase): - manager_class = OfficialClientManager - - @classmethod - def find_micro_flavor(cls): - return [flavor for flavor in cls.compute_client.flavors.list() - if flavor.name == 'm1.micro'] - - def _create_volume(self, client, expected_state=None, **kwargs): - kwargs.setdefault('name', rand_name('ostf-test-volume')) - kwargs.setdefault('size', 1) - volume = client.volumes.create(**kwargs) - self.set_resource(kwargs['name'], volume) - if expected_state: - def await_state(): - if client.volumes.get(volume.id).status == expected_state: - return True - - fuel_health.test.call_until_true(await_state, 50, 1) - - return volume - - def _create_snapshot(self, client, volume_id, expected_state=None, - **kwargs): - kwargs.setdefault('name', rand_name('ostf-test-volume')) - snapshot = client.volume_snapshots.create(volume_id, **kwargs) - self.set_resource(kwargs['name'], snapshot) - if expected_state: - def await_state(): - if client.volume_snapshots.get( - snapshot.id).status == expected_state: - return True - fuel_health.test.call_until_true(await_state, 50, 1) - - return snapshot - - def get_image_from_name(self, img_name=None): - if img_name: - image_name = img_name - else: - image_name = self.manager.config.compute.image_name - images = [i for i in self.compute_client.images.list() - if i.status.lower() == 'active'] - image_id = '' - LOG.debug(images) - if images: - for im in images: - LOG.debug(im.name) - if (im.name and - im.name.strip().lower() == - image_name.strip().lower()): - image_id = im.id - if not image_id: - raise exceptions.ImageFault - return image_id - - def _delete_server(self, server): - LOG.debug("Deleting server.") - self.compute_client.servers.delete(server) - - def is_deletion_complete(): - try: - server.get() - except Exception as exc: - if exc.__class__.__name__ == 'NotFound': - return True - LOG.exception(exc) - return False - - fuel_health.test.call_until_true( - is_deletion_complete, 20, 10) - - def retry_command(self, retries, timeout, method, *args, **kwargs): - for i in range(retries): - try: - result = method(*args, **kwargs) - LOG.debug("Command execution successful. " - "Result {0}".format(result)) - if 'False' in result: - raise exceptions.SSHExecCommandFailed( - 'Command {0} finishes with False'.format( - kwargs.get('command'))) - else: - return result - except Exception as exc: - LOG.debug("%s. Another effort needed." % exc) - time.sleep(timeout) - if 'ping' not in kwargs.get('command'): - self.fail('Execution command on Instance fails ' - 'with unexpected result. ') - self.fail("Instance is not reachable by IP.") - - def get_availability_zone(self, image_id=None): - disk = self.glance_client_v1.images.get(image_id).disk_format - if disk == 'vmdk': - az_name = 'vcenter' - else: - az_name = 'nova' - return az_name - - def check_clients_state(self): - if not self.manager.clients_initialized: - LOG.debug("Unable to initialize Keystone client: {trace}".format( - trace=self.manager.traceback)) - if self.manager.keystone_error_message: - self.fail(self.manager.keystone_error_message) - else: - self.fail("Keystone client is not available. Please, refer" - " to OpenStack logs to fix this problem") - - def check_image_exists(self): - try: - self.get_image_from_name() - except exceptions.ImageFault as exc: - LOG.debug(exc) - self.fail("{image} image not found. Please, download " - "http://download.cirros-cloud.net/0.3.1/" - "cirros-0.3.1-x86_64-disk.img image and " - "register it in Glance with name '{image}' as " - "'admin' tenant." - .format(image=self.manager.config.compute.image_name) - ) - except nova_exc.ClientException: - LOG.exception("") - self.fail("Image can not be retrieved. " - "Please refer to OpenStack logs for more details") - - @classmethod - def _clean_flavors(cls): - if cls.flavors: - for flavor in cls.flavors: - try: - cls.compute_client.flavors.delete(flavor) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - @classmethod - def _clean_images(cls): - if cls.images: - for image_id in cls.images: - try: - cls.glance_client.images.delete(image_id) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - @classmethod - def tearDownClass(cls): - cls.error_msg = [] - while cls.os_resources: - thing = cls.os_resources.pop() - LOG.debug("Deleting %r from shared resources of %s" % - (thing, cls.__name__)) - - try: - # OpenStack resources are assumed to have a delete() - # method which destroys the resource... - thing.delete() - except Exception as exc: - # If the resource is already missing, mission accomplished. - if exc.__class__.__name__ == 'NotFound': - continue - cls.error_msg.append(exc) - LOG.exception(exc) - - def is_deletion_complete(): - # Deletion testing is only required for objects whose - # existence cannot be checked via retrieval. - if isinstance(thing, dict): - return True - try: - thing.get() - except Exception as exc: - # Clients are expected to return an exception - # called 'NotFound' if retrieval fails. - if exc.__class__.__name__ == 'NotFound': - return True - cls.error_msg.append(exc) - LOG.exception(exc) - return False - - # Block until resource deletion has completed or timed-out - fuel_health.test.call_until_true(is_deletion_complete, 20, 10) - - -class NovaNetworkScenarioTest(OfficialClientTest): - """Base class for nova network scenario tests.""" - - @classmethod - def setUpClass(cls): - super(NovaNetworkScenarioTest, cls).setUpClass() - if cls.manager.clients_initialized: - cls.host = cls.config.compute.online_controllers - cls.usr = cls.config.compute.controller_node_ssh_user - cls.pwd = cls.config.compute.controller_node_ssh_password - cls.key = cls.config.compute.path_to_private_key - cls.timeout = cls.config.compute.ssh_timeout - cls.tenant_id = cls.manager._get_identity_client( - cls.config.identity.admin_username, - cls.config.identity.admin_password, - cls.config.identity.admin_tenant_name).tenant_id - cls.network = [] - cls.floating_ips = [] - cls.error_msg = [] - cls.flavors = [] - cls.images = [] - cls.ports = [] - cls.private_net = cls.config.network.private_net - - def setUp(self): - super(NovaNetworkScenarioTest, self).setUp() - self.check_clients_state() - - def _run_ssh_cmd(self, cmd): - """Open SSH session with Controller and execute command.""" - if not self.host: - self.fail('Wrong test configuration: ' - '"online_controllers" parameter is empty.') - - try: - sshclient = f_ssh.Client(self.host[0], self.usr, self.pwd, - key_filename=self.key, - timeout=self.timeout) - return sshclient.exec_longrun_command(cmd) - except Exception: - LOG.exception("") - self.fail("%s command failed." % cmd) - - def _create_keypair(self, client, namestart='ost1_test-keypair-smoke-'): - kp_name = rand_name(namestart) - keypair = client.keypairs.create(kp_name) - self.set_resource(kp_name, keypair) - self.verify_response_body_content(keypair.id, - kp_name, - 'Keypair creation failed') - return keypair - - def _create_security_group( - self, client, namestart='ost1_test-secgroup-smoke-netw'): - # Create security group - sg_name = rand_name(namestart) - sg_desc = sg_name + " description" - secgroup = client.security_groups.create(sg_name, sg_desc) - self.verify_response_body_content(secgroup.name, - sg_name, - "Security group creation failed") - self.verify_response_body_content(secgroup.description, - sg_desc, - "Security group creation failed") - - # Add rules to the security group - - # These rules are intended to permit inbound ssh and icmp - # traffic from all sources, so no group_id is provided. - # Setting a group_id would only permit traffic from ports - # belonging to the same security group. - rulesets = [ - { - # ssh - 'ip_protocol': 'tcp', - 'from_port': 22, - 'to_port': 22, - 'cidr': '0.0.0.0/0', - }, - { - # ping - 'ip_protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'cidr': '0.0.0.0/0', - } - ] - for ruleset in rulesets: - try: - client.security_group_rules.create(secgroup.id, **ruleset) - except Exception: - LOG.exception("") - self.fail("Failed to create rule in security group.") - - return secgroup - - def _create_network(self, label='ost1_test-network-smoke-'): - n_label = rand_name(label) - cidr = self.config.network.tenant_network_cidr - networks = self.compute_client.networks.create( - label=n_label, cidr=cidr) - self.set_resource(n_label, networks) - self.network.append(networks) - self.verify_response_body_content(networks.label, - n_label, - "Network creation failed") - return networks - - def _create_port(self, net_id, vnic_type, label='ost1_test-port-'): - n_label = rand_name(label) - port_data = { - 'name': n_label, - 'binding:vnic_type': vnic_type, - 'network_id': net_id, - } - port = self.neutron_client.create_port({'port': port_data}) - self.set_resource(n_label, port) - self.ports.append(port) - LOG.debug(port) - self.verify_response_body_content(port['port']['name'], - n_label, - "Port creation failed") - return port - - @classmethod - def _clear_networks(cls): - try: - for net in cls.network: - cls.compute_client.networks.delete(net) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - @classmethod - def _clear_security_groups(cls): - try: - sec_groups = cls.compute_client.security_groups.list() - [cls.compute_client.security_groups.delete(group) - for group in sec_groups - if 'ost1_test-' in group.name] - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - def _list_networks(self): - nets = self.compute_client.networks.list() - return nets - - def _create_server(self, client, name, security_groups=None, - flavor_id=None, net_id=None, img_name=None, - data_file=None, az_name=None, port=None): - create_kwargs = {} - - if img_name: - base_image_id = self.get_image_from_name(img_name=img_name) - else: - base_image_id = self.get_image_from_name() - - if not az_name: - az_name = self.get_availability_zone(image_id=base_image_id) - - if not flavor_id: - if not self.find_micro_flavor(): - self.fail("Flavor for tests was not created. Seems that " - "something is wrong with nova services.") - else: - flavor = self.find_micro_flavor()[0] - - flavor_id = flavor.id - if not security_groups: - security_groups = [self._create_security_group( - self.compute_client).name] - if 'neutron' in self.config.network.network_provider: - create_kwargs['nics'] = [] - if net_id: - network = [net_id] - else: - network = [net.id for net in - self.compute_client.networks.list() - if net.label == self.private_net] - - if port: - create_kwargs['nics'].append({'port-id': port['port']['id']}) - else: - if network: - create_kwargs['nics'].append({'net-id': network[0]}) - else: - self.fail("Default private network '{0}' isn't present. " - "Please verify it is properly created.". - format(self.private_net)) - - create_kwargs['security_groups'] = security_groups - - server = client.servers.create(name, base_image_id, - flavor_id, files=data_file, - availability_zone=az_name, - **create_kwargs) - self.verify_response_body_content(server.name, - name, - "Instance creation failed") - self.set_resource(name, server) - self.status_timeout(client.servers, server.id, 'ACTIVE') - # The instance retrieved on creation is missing network - # details, necessitating retrieval after it becomes active to - # ensure correct details. - server = client.servers.get(server.id) - self.set_resource(name, server) - return server - - def _load_file(self, file_name): - path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "etc", file_name) - with open(path) as f: - return f.read() - - def _create_floating_ip(self): - floating_ips_pool = self.compute_client.floating_ip_pools.list() - - if floating_ips_pool: - floating_ip = self.compute_client.floating_ips.create( - pool=floating_ips_pool[0].name) - return floating_ip - else: - self.fail('No available floating IP found') - - def _assign_floating_ip_to_instance(self, client, server, floating_ip): - try: - client.servers.add_floating_ip(server, floating_ip) - except Exception: - LOG.exception("") - self.fail('Can not assign floating ip to instance') - - @classmethod - def _clean_floating_ips(cls): - if cls.floating_ips: - for ip in cls.floating_ips: - LOG.info('Floating_ip_for_deletion{0}'.format( - cls.floating_ips)) - try: - cls.compute_client.floating_ips.delete(ip) - except Exception as exc: - cls.error_msg.append(exc) - LOG.exception(exc) - - def _ping_ip_address(self, ip_address, timeout, retries): - def ping(): - cmd = "ping -q -c1 -w10 %s" % ip_address - - if self.host: - try: - ssh = f_ssh.Client(self.host[0], - self.usr, self.pwd, - key_filename=self.key, - timeout=timeout) - except Exception: - LOG.exception("") - - return self.retry_command(retries=retries[0], - timeout=retries[1], - method=ssh.exec_command, - command=cmd) - - else: - self.fail('Wrong tests configurations, one from the next ' - 'parameters are empty controller_node_name or ' - 'controller_node_ip ') - - # TODO(???) Allow configuration of execution and sleep duration. - return fuel_health.test.call_until_true(ping, 40, 1) - - def _ping_ip_address_from_instance(self, ip_address, timeout, - retries, viaHost=None): - def ping(): - if not (self.host or viaHost): - self.fail('Wrong tests configurations, one from the next ' - 'parameters are empty controller_node_name or ' - 'controller_node_ip ') - try: - host = viaHost or self.host[0] - LOG.debug('Get ssh to instance') - ssh = f_ssh.Client(host, - self.usr, self.pwd, - key_filename=self.key, - timeout=timeout) - - except Exception: - LOG.exception("") - - command = "ping -q -c1 -w10 8.8.8.8" - - return self.retry_command(retries[0], retries[1], - ssh.exec_command_on_vm, - command=command, - user='cirros', - password='cubswin:)', - vm=ip_address) - - # TODO(???) Allow configuration of execution and sleep duration. - return fuel_health.test.call_until_true(ping, 40, 1) - - def _run_command_on_instance(self, ip_address, timeout, retries, cmd, - viaHost=None): - def run_cmd(): - if not (self.host or viaHost): - self.fail('Wrong tests configurations, one from the next ' - 'parameters are empty controller_node_name or ' - 'controller_node_ip ') - try: - host = viaHost or self.host[0] - LOG.debug('Get ssh to instance') - ssh = f_ssh.Client(host, - self.usr, self.pwd, - key_filename=self.key, - timeout=timeout) - LOG.debug('Host is {0}'.format(host)) - - except Exception: - LOG.exception("") - - return self.retry_command(retries[0], retries[1], - ssh.exec_command_on_vm, - command=cmd, - user='cirros', - password='cubswin:)', - vm=ip_address) - - # TODO(???) Allow configuration of execution and sleep duration. - - return fuel_health.test.call_until_true(run_cmd, 40, 1) - - def _check_vm_connectivity(self, ip_address, timeout, retries): - self.assertTrue(self._ping_ip_address(ip_address, timeout, retries), - "Timed out waiting for %s to become " - "reachable. Please, check Network " - "configuration" % ip_address) - - def _check_connectivity_from_vm(self, ip_address, - timeout, retries, - viaHost=None): - self.assertTrue(self._ping_ip_address_from_instance(ip_address, - timeout, retries, - viaHost=viaHost), - "Timed out waiting for %s to become " - "reachable. Please, check Network " - "configuration" % ip_address) - - def _run_command_from_vm(self, ip_address, - timeout, retries, cmd, viaHost=None): - self.assertTrue( - self._run_command_on_instance( - ip_address, timeout, retries, cmd, viaHost=viaHost), - "Timed out waiting for %s to become reachable. " - "Please, check Network configuration" % ip_address) - - def get_compute_hostname(self): - return self.compute_client.hypervisors.list() - - def get_instance_details(self, instance): - return self.compute_client.servers.get(instance) - - def get_instance_host(self, instance): - return getattr(self.get_instance_details(instance), - "OS-EXT-SRV-ATTR:host") - - def get_free_host(self, instance): - current_host = self.get_instance_host(instance) - LOG.debug('Current host is {0}'.format(current_host)) - available_hosts = self.get_compute_hostname() - for host in available_hosts: - hostname = host.service.get('host') - if host.hypervisor_type != 'VMware vCenter Server' and \ - hostname != current_host: - return hostname - - def migrate_instance(self, instance, host_to): - instance.live_migrate(host_to) - self.status_timeout(self.compute_client.servers, - instance.id, 'ACTIVE') - return instance - - @classmethod - def tearDownClass(cls): - super(NovaNetworkScenarioTest, cls).tearDownClass() - if cls.manager.clients_initialized: - cls._clean_floating_ips() - cls._clear_security_groups() - cls._clear_networks() - - -class PlatformServicesBaseClass(NovaNetworkScenarioTest): - - def get_max_free_compute_node_ram(self, min_required_ram_mb): - max_free_ram_mb = 0 - for hypervisor in self.compute_client.hypervisors.list(): - if hypervisor.free_ram_mb >= min_required_ram_mb: - return hypervisor.free_ram_mb - else: - if hypervisor.free_ram_mb > max_free_ram_mb: - max_free_ram_mb = hypervisor.free_ram_mb - - return max_free_ram_mb - - # Methods for creating network resources. - def create_network_resources(self): - """This method creates network resources. - - It creates a network, an internal subnet on the network, a router and - links the network to the router. All resources created by this method - will be automatically deleted. - """ - - private_net_id = None - floating_ip_pool = None - - if self.config.network.network_provider == 'neutron': - ext_net = self.find_external_network() - net_name = data_utils.rand_name('ostf-platform-service-net-') - net = self._create_net(net_name) - subnet = self._create_internal_subnet(net) - router_name = data_utils.rand_name('ostf-platform-service-router-') - router = self._create_router(router_name, ext_net) - self.neutron_client.add_interface_router( - router['id'], {'subnet_id': subnet['id']}) - self.addCleanup(self.neutron_client.remove_interface_router, - router['id'], {'subnet_id': subnet['id']}) - self.addCleanup( - self.neutron_client.remove_gateway_router, router['id']) - - private_net_id = net['id'] - floating_ip_pool = ext_net['id'] - else: - if not self.config.compute.auto_assign_floating_ip: - fl_ip_pools = self.compute_client.floating_ip_pools.list() - floating_ip_pool = next(fl_ip_pool.name - for fl_ip_pool in fl_ip_pools - if fl_ip_pool.is_loaded()) - - return private_net_id, floating_ip_pool - - def find_external_network(self): - """This method finds the external network.""" - - LOG.debug('Finding external network...') - for net in self.neutron_client.list_networks()['networks']: - if net['router:external']: - LOG.debug('External network found. Ext net: {0}'.format(net)) - return net - - self.fail('Cannot find the external network.') - - def _create_net(self, name): - """This method creates a network. - - All resources created by this method will be automatically deleted. - """ - - LOG.debug('Creating network with name "{0}"...'.format(name)) - net_body = { - 'network': { - 'name': name, - 'tenant_id': self.tenant_id - } - } - net = self.neutron_client.create_network(net_body)['network'] - self.addCleanup(self.neutron_client.delete_network, net['id']) - LOG.debug('Network "{0}" has been created. Net: {1}'.format(name, net)) - - return net - - def _create_internal_subnet(self, net): - """This method creates an internal subnet on the network. - - All resources created by this method will be automatically deleted. - """ - - LOG.debug('Creating subnet...') - subnet_body = { - 'subnet': { - 'network_id': net['id'], - 'ip_version': 4, - 'cidr': '10.1.7.0/24', - 'tenant_id': self.tenant_id - } - } - subnet = self.neutron_client.create_subnet(subnet_body)['subnet'] - self.addCleanup(self.neutron_client.delete_subnet, subnet['id']) - LOG.debug('Subnet has been created. Subnet: {0}'.format(subnet)) - - return subnet - - def _create_router(self, name, ext_net): - """This method creates a router. - - All resources created by this method will be automatically deleted. - """ - - LOG.debug('Creating router with name "{0}"...'.format(name)) - router_body = { - 'router': { - 'name': name, - 'external_gateway_info': { - 'network_id': ext_net['id'] - }, - 'tenant_id': self.tenant_id - } - } - router = self.neutron_client.create_router(router_body)['router'] - self.addCleanup(self.neutron_client.delete_router, router['id']) - LOG.debug('Router "{0}" has been created. ' - 'Router: {1}'.format(name, router)) - - return router - - def get_info_about_available_resources(self, min_ram, min_hdd, min_vcpus): - """This function allows to get the information about resources. - - We need to collect the information about available RAM, HDD and vCPUs - on all compute nodes for cases when we will create more than 1 VM. - - This function returns the count of VMs with required parameters which - we can successfully run on existing cloud. - """ - vms_count = 0 - for hypervisor in self.compute_client.hypervisors.list(): - if hypervisor.free_ram_mb >= min_ram: - if hypervisor.free_disk_gb >= min_hdd: - if hypervisor.vcpus - hypervisor.vcpus_used >= min_vcpus: - # We need to determine how many VMs we can run - # on this hypervisor - free_cpu = hypervisor.vcpus - hypervisor.vcpus_used - k1 = int(hypervisor.free_ram_mb / min_ram) - k2 = int(hypervisor.free_disk_gb / min_hdd) - k3 = int(free_cpu / min_vcpus) - vms_count += min(k1, k2, k3) - return vms_count - - # Methods for finding and checking Sahara images. - def find_and_check_image(self, tag_plugin, tag_version): - """This method finds a correctly registered Sahara image. - - It finds a Sahara image by specific tags and checks whether the image - is correctly registered or not. - """ - - LOG.debug('Finding and checking image for Sahara...') - image = self._find_image_by_tags(tag_plugin, tag_version) - if image is not None: - self.ssh_username = image.metadata.get('_sahara_username', None) - msg = 'Image "{0}" is registered for Sahara with username "{1}".' - - if self.ssh_username is not None: - LOG.debug(msg.format(image.name, self.ssh_username)) - return image.id - - LOG.debug('Image is not correctly registered or it is not ' - 'registered at all. Correct image for Sahara not found.') - - def _find_image_by_tags(self, tag_plugin, tag_version): - """This method finds a Sahara image by specific tags.""" - - tag_plug = '_sahara_tag_' + tag_plugin - tag_ver = '_sahara_tag_' + tag_version - msg = 'Image with tags "{0}" and "{1}" found. Image name is "{2}".' - - for image in self.compute_client.images.list(): - if image.status.lower() == 'active': - if tag_plug in image.metadata and tag_ver in image.metadata: - LOG.debug(msg.format(tag_plugin, tag_version, image.name)) - return image - LOG.debug('Image with tags "{0}" and "{1}" ' - 'not found.'.format(tag_plugin, tag_version)) - - # Method for checking whether or not resource is deleted. - def is_resource_deleted(self, get_method): - """This method checks whether or not the resource is deleted. - - The API request is wrapped in the try/except block to correctly handle - the "404 Not Found" exception. If the resource doesn't exist, this - method will return True. Otherwise it will return False. - """ - - try: - get_method() - except Exception as exc: - exc_msg = exc.message.lower() - if ('not found' in exc_msg) or ('could not be found' in exc_msg): - return True - self.fail(exc.message) - - return False - - # Methods for deleting resources. - def delete_resource(self, delete_method, get_method=None, timeout=300, - sleep=5): - """This method deletes the resource by its ID and checks whether - the resource is really deleted or not. - """ - - try: - delete_method() - except Exception as exc: - LOG.warning(exc.message) - return - if get_method: - self._wait_for_deletion(get_method, timeout, sleep) - - def _wait_for_deletion(self, get_method, timeout, sleep): - """This method waits for the resource deletion.""" - - start = time.time() - while time.time() - start < timeout: - if self.is_resource_deleted(get_method): - return - time.sleep(sleep) - - self.fail('Request timed out. ' - 'Timed out while waiting for one of the test resources ' - 'to delete within {0} seconds.'.format(timeout)) - - -class SanityChecksTest(OfficialClientTest): - """Base class for openstack sanity tests.""" - - _enabled = True - - @classmethod - def check_preconditions(cls): - cls._enabled = True - if cls.config.network.neutron_available: - cls._enabled = False - else: - cls._enabled = True - # ensure the config says true - try: - cls.compute_client.networks.list() - except exceptions.EndpointNotFound: - cls._enabled = False - - def setUp(self): - super(SanityChecksTest, self).setUp() - self.check_clients_state() - if not self._enabled: - self.skipTest('Nova Networking is not available') - - @classmethod - def setUpClass(cls): - super(SanityChecksTest, cls).setUpClass() - if cls.manager.clients_initialized: - cls.tenant_id = cls.manager._get_identity_client( - cls.config.identity.admin_username, - cls.config.identity.admin_password, - cls.config.identity.admin_tenant_name).tenant_id - cls.network = [] - cls.floating_ips = [] - - @classmethod - def tearDownClass(cls): - pass - - def _list_instances(self, client): - instances = client.servers.list() - return instances - - def _list_images(self, client): - images = client.images.list() - return images - - def _list_volumes(self, client): - volumes = client.volumes.list(detailed=False) - return volumes - - def _list_snapshots(self, client): - snapshots = client.volume_snapshots.list(detailed=False) - return snapshots - - def _list_flavors(self, client): - flavors = client.flavors.list() - return flavors - - def _list_limits(self, client): - limits = client.limits.get() - return limits - - def _list_services(self, client, host=None, binary=None): - services = client.services.list(host=host, binary=binary) - return services - - def _list_users(self, client): - users = client.users.list() - return users - - def _list_networks(self, client): - if hasattr(client, 'list_networks'): - return client.list_networks() - else: - return client.networks.list() - - def _list_stacks(self, client): - return client.stacks.list() - - -class SmokeChecksTest(OfficialClientTest): - """Base class for openstack smoke tests.""" - - @classmethod - def setUpClass(cls): - super(SmokeChecksTest, cls).setUpClass() - if cls.manager.clients_initialized: - cls.tenant_id = cls.manager._get_identity_client( - cls.config.identity.admin_username, - cls.config.identity.admin_password, - cls.config.identity.admin_tenant_name).tenant_id - cls.build_interval = cls.config.volume.build_interval - cls.build_timeout = cls.config.volume.build_timeout - cls.created_flavors = [] - cls.error_msg = [] - cls.private_net = cls.config.network.private_net - else: - cls.proceed = False - - def setUp(self): - super(SmokeChecksTest, self).setUp() - self.check_clients_state() - - def _create_flavors(self, client, ram, disk, vcpus=1, use_huge_page=False): - name = rand_name('ost1_test-flavor-') - flavorid = rand_int_id() - exist_ids = [flavor.id for flavor - in self.compute_client.flavors.list()] - - if flavorid in exist_ids: - flavorid = name + rand_int_id() - flavor = client.flavors.create(name=name, ram=ram, disk=disk, - vcpus=vcpus, flavorid=flavorid) - self.created_flavors.append(flavor) - - if use_huge_page: - # change flavor settings use hugepage - flavor_metadata = flavor.get_keys() - logging.debug(flavor_metadata) - flavor_metadata['hw:mem_page_size'] = '2048' - flavor.set_keys(flavor_metadata) - - return flavor - - def _delete_flavors(self, client, flavor): - self.created_flavors.remove(flavor) - client.flavors.delete(flavor) - - def _create_tenant(self, client): - name = rand_name('ost1_test-tenant-') - tenant = client.tenants.create(name) - self.set_resource(name, tenant) - return tenant - - def _create_user(self, client, tenant_id): - password = "123456" - email = "test@test.com" - name = rand_name('ost1_test-user-') - user = client.users.create(name, password, email, tenant_id) - self.set_resource(name, user) - return user - - def _create_role(self, client): - name = rand_name('ost1_test-role-') - role = client.roles.create(name) - self.set_resource(name, role) - return role - - def _create_boot_volume(self, client, img_name=None, **kwargs): - name = rand_name('ost1_test-bootable-volume') - - imageRef = self.get_image_from_name(img_name=img_name) - - LOG.debug( - 'Image ref is {0} for volume {1}'.format(imageRef, name)) - return self._create_volume( - client, name=name, imageRef=imageRef, **kwargs) - - def create_instance_from_volume(self, client, volume): - if not self.find_micro_flavor(): - self.fail("m1.micro flavor was not created.") - - name = rand_name('ost1_test-boot-volume-instance') - base_image_id = self.get_image_from_name() - bd_map = {'vda': volume.id + ':::0'} - az_name = self.get_availability_zone(image_id=base_image_id) - if 'neutron' in self.config.network.network_provider: - network = [net.id for net in - self.compute_client.networks.list() - if net.label == self.private_net] - if network: - create_kwargs = {'block_device_mapping': bd_map, - 'nics': [{'net-id': network[0]}]} - else: - self.fail("Default private network '{0}' isn't present. " - "Please verify it is properly created.". - format(self.private_net)) - server = client.servers.create( - name, base_image_id, self.find_micro_flavor()[0].id, - availability_zone=az_name, - **create_kwargs) - else: - create_kwargs = {'block_device_mapping': bd_map} - server = client.servers.create(name, base_image_id, - self.find_micro_flavor()[0].id, - availability_zone=az_name, - **create_kwargs) - - self.verify_response_body_content(server.name, - name, - "Instance creation failed") - # The instance retrieved on creation is missing network - # details, necessitating retrieval after it becomes active to - # ensure correct details. - server = self._wait_server_param(client, server, 'addresses', 5, 1) - self.set_resource(name, server) - return server - - def _create_server(self, client, img_name=None): - if not self.find_micro_flavor(): - self.fail("m1.micro flavor was not created.") - - name = rand_name('ost1_test-volume-instance') - - base_image_id = self.get_image_from_name(img_name=img_name) - az_name = self.get_availability_zone(image_id=base_image_id) - - if 'neutron' in self.config.network.network_provider: - network = [net.id for net in - self.compute_client.networks.list() - if net.label == self.private_net] - if network: - create_kwargs = {'nics': [{'net-id': network[0]}]} - else: - self.fail("Default private network '{0}' isn't present. " - "Please verify it is properly created.". - format(self.private_net)) - server = client.servers.create( - name, base_image_id, self.find_micro_flavor()[0].id, - availability_zone=az_name, - **create_kwargs) - else: - server = client.servers.create(name, base_image_id, - self.micro_flavors[0].id, - availability_zone=az_name) - - self.verify_response_body_content(server.name, - name, - "Instance creation failed") - # The instance retrieved on creation is missing network - # details, necessitating retrieval after it becomes active to - # ensure correct details. - server = self._wait_server_param(client, server, 'addresses', 5, 1) - self.set_resource(name, server) - return server - - def _wait_server_param(self, client, server, param_name, - tries=1, timeout=1, expected_value=None): - while tries: - val = getattr(server, param_name, None) - if val: - if (not expected_value) or (expected_value == val): - return server - time.sleep(timeout) - server = client.servers.get(server.id) - tries -= 1 - return server - - def _attach_volume_to_instance(self, volume, instance): - device = '/dev/vdb' - attached_volume = self.compute_client.volumes.create_server_volume( - volume_id=volume.id, server_id=instance, device=device) - return attached_volume - - def _detach_volume(self, server, volume): - volume = self.compute_client.volumes.delete_server_volume( - server_id=server, attachment_id=volume) - return volume - - def verify_volume_deletion(self, volume): - - def is_volume_deleted(): - try: - self.compute_client.volumes.get(volume.id) - except Exception as e: - if e.__class__.__name__ == 'NotFound': - return True - return False - - fuel_health.test.call_until_true(is_volume_deleted, 20, 10) - - @classmethod - def tearDownClass(cls): - super(SmokeChecksTest, cls).tearDownClass() - if cls.manager.clients_initialized: - if cls.created_flavors: - try: - cls.compute_client.flavors.delete(cls.created_flavors) - except Exception: - LOG.exception("OSTF test flavor cannot be deleted.") diff --git a/fuel_health/saharamanager.py b/fuel_health/saharamanager.py deleted file mode 100644 index 0c5217b3..00000000 --- a/fuel_health/saharamanager.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import time - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import nmanager - -LOG = logging.getLogger(__name__) - - -class SaharaTestsManager(nmanager.PlatformServicesBaseClass): - - def setUp(self): - super(SaharaTestsManager, self).setUp() - - self.check_clients_state() - - # Timeout (in seconds) to wait for cluster deployment. - self.cluster_timeout = 3000 - # Timeout (in seconds) to wait for cluster deletion. - self.delete_timeout = 300 - # Timeout (in seconds) between status checks. - self.request_timeout = 5 - # Timeout (in seconds) to wait for starting a Hadoop process - # on a cluster node. - self.process_timeout = 300 - # The minimum amount of available RAM for one of the compute nodes - # to run Sahara platform tests. - self.min_required_ram_mb = 4096 - # The path to the file where a SSH private key for Sahara tests - # will be located. - self.path_to_private_key = '/tmp/sahara-ostf.pem' - - def create_flavor(self, ram=1024, vcpus=1, disk=20): - """This method creates a flavor for Sahara tests. - - All resources created by this method will be automatically deleted. - """ - - LOG.debug('Creating flavor for Sahara tests...') - name = rand_name('sahara-flavor-') - flavor = self.compute_client.flavors.create(name, ram, vcpus, disk) - self.addCleanup(self.compute_client.flavors.delete, flavor.id) - LOG.debug('Flavor for Sahara tests has been created.') - - return flavor.id - - def _create_key_pair(self): - """This method creates a key pair for Sahara platform tests. - - All resources created by this method will be automatically deleted. - """ - - LOG.debug('Creating key pair for Sahara tests...') - name = rand_name('sahara-key-pair-') - key_pair = self.compute_client.keypairs.create(name) - self.addCleanup(key_pair.delete) - self._run_ssh_cmd('echo "{0}" > {1}'.format(key_pair.private_key, - self.path_to_private_key)) - LOG.debug('Key pair for Sahara tests has been created.') - - return name - - # Methods for creating Sahara resources. - def create_cluster_template(self, name, plugin, - hadoop_version, node_groups, **kwargs): - """This method creates a cluster template. - - It supports passing additional params using **kwargs and returns ID - of created resource. All resources created by this method will be - automatically deleted. - """ - - LOG.debug('Creating cluster template with name "{0}"...'.format(name)) - # TODO(ylobankov): remove this loop after fixing bug #1314578 - for node_group in node_groups: - if 'floating_ip_pool' in node_group: - if node_group['floating_ip_pool'] is None: - del node_group['floating_ip_pool'] - cl_template = self.sahara_client.cluster_templates.create( - name, plugin, hadoop_version, node_groups=node_groups, **kwargs) - self.addCleanup( - self.delete_resource, - delete_method=lambda: self.sahara_client.cluster_templates.delete( - cl_template.id), - get_method=lambda: self.sahara_client.cluster_templates.get( - cl_template.id), - timeout=self.delete_timeout, sleep=self.request_timeout) - LOG.debug('Cluster template "{0}" has been created.'.format(name)) - - return cl_template.id - - def create_cluster(self, name, plugin, hadoop_version, - default_image_id, node_groups=None, **kwargs): - """This method creates a cluster. - - It supports passing additional params using **kwargs and returns ID - of created resource. All resources created by this method will be - automatically deleted. - """ - - key_pair_name = self._create_key_pair() - LOG.debug('Creating cluster with name "{0}"...'.format(name)) - cluster = self.sahara_client.clusters.create( - name, plugin, hadoop_version, default_image_id=default_image_id, - user_keypair_id=key_pair_name, node_groups=node_groups, **kwargs) - self.addCleanup( - self.delete_resource, - delete_method=lambda: self.sahara_client.clusters.delete( - cluster.id), - get_method=lambda: self.sahara_client.clusters.get(cluster.id), - timeout=self.delete_timeout, sleep=self.request_timeout) - LOG.debug('Cluster "{0}" has been created.'.format(name)) - - return cluster.id - - # Methods for checking cluster deployment. - def poll_cluster_status(self, cluster_id): - """This method polls cluster status. - - It polls cluster every seconds for some timeout and - waits for when cluster gets to "Active" status. - """ - - LOG.debug('Waiting for cluster to build and get to "Active" status...') - previous_cluster_status = 'An unknown cluster status' - start = time.time() - while time.time() - start < self.cluster_timeout: - cluster = self.sahara_client.clusters.get(cluster_id) - if cluster.status != previous_cluster_status: - LOG.debug('Currently cluster is ' - 'in "{0}" status.'.format(cluster.status)) - previous_cluster_status = cluster.status - if cluster.status == 'Active': - return - if cluster.status == 'Error': - self.fail('Cluster failed to build and is in "Error" status.') - time.sleep(self.request_timeout) - - self.fail('Cluster failed to get to "Active" ' - 'status within {0} seconds.'.format(self.cluster_timeout)) - - def check_hadoop_services(self, cluster_id, processes_map): - """This method checks deployment of Hadoop services on cluster. - - It checks whether all Hadoop processes are running on cluster nodes - or not. - """ - - LOG.debug('Checking deployment of Hadoop services on cluster...') - node_ips_and_processes = self._get_node_ips_and_processes(cluster_id) - for node_ip, processes in node_ips_and_processes.items(): - LOG.debug('Checking Hadoop processes ' - 'on node {0}...'.format(node_ip)) - for process in processes: - if process in processes_map: - LOG.debug('Checking process "{0}"...'.format(process)) - for port in processes_map[process]: - self._check_port(node_ip, port) - LOG.debug('Process "{0}" is running and listening ' - 'to port {1}.'.format(process, port)) - LOG.debug('All Hadoop processes are ' - 'running on node {0}.'.format(node_ip)) - LOG.debug( - 'All Hadoop services have been successfully deployed on cluster.') - - def _check_port(self, node_ip, port): - """This method checks accessibility of specific port on cluster node. - - It tries to establish connection to the process on specific port every - second for some timeout. - """ - - start = time.time() - while time.time() - start < self.process_timeout: - cmd = ("timeout {0} bash -c 'telnet {1} {2}'".format( - self.request_timeout, node_ip, port)) - output, output_err = self._run_ssh_cmd(cmd) - if 'Connected to {0}'.format(node_ip) in output: - return - time.sleep(self.request_timeout) - - self.fail('Port {0} on node {1} is unreachable for ' - '{2} seconds.'.format(port, node_ip, self.process_timeout)) - - def _get_node_ips_and_processes(self, cluster_id): - """This method makes dictionary with information of cluster nodes. - - Each key of dictionary is IP of cluster node, value is list of Hadoop - processes that must be started on node. - """ - - data = self.sahara_client.clusters.get(cluster_id) - node_ips_and_processes = {} - for node_group in data.node_groups: - for instance in node_group['instances']: - node_ip = instance['management_ip'] - node_ips_and_processes[node_ip] = node_group['node_processes'] - - return node_ips_and_processes - - def check_node_access_via_ssh(self, cluster_id): - """This method checks ability to log into cluster nodes via SSH.""" - - LOG.debug('Checking ability ' - 'to log into cluster nodes via SSH...') - cmd = ('ssh -i {0} ' - '-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null {1}@' - .format(self.path_to_private_key, self.ssh_username)) - for node_ip in self._get_node_ips_and_processes(cluster_id): - LOG.debug('Trying to log into node {0} via SSH...'.format(node_ip)) - self._run_ssh_cmd(cmd + node_ip + ' ls -a') - LOG.debug('Node {0} is accessible via SSH.'.format(node_ip)) - LOG.debug('All cluster nodes are accessible via SSH.') diff --git a/fuel_health/test.py b/fuel_health/test.py deleted file mode 100644 index 594d6c22..00000000 --- a/fuel_health/test.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2012 OpenStack, LLC -# Copyright 2013 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -import testresources -import unittest2 - -from fuel_health.common import log as logging -from fuel_health.common import ssh -from fuel_health.common import test_mixins -from fuel_health import config - - -LOG = logging.getLogger(__name__) - - -class BaseTestCase(unittest2.TestCase, - testresources.ResourcedTestCase, - test_mixins.FuelTestAssertMixin): - - def __init__(self, *args, **kwargs): - super(BaseTestCase, self).__init__(*args, **kwargs) - - @classmethod - def setUpClass(cls): - if hasattr(super(BaseTestCase, cls), 'setUpClass'): - super(BaseTestCase, cls).setUpClass() - cls.config = config.FuelConfig() - - -def call_until_true(func, duration, sleep_for, *args): - """Call the given function until it returns True (and return True) or - until the specified duration (in seconds) elapses (and return - False). - - :param func: A zero argument callable that returns True on success. - :param duration: The number of seconds for which to attempt a - successful call of the function. - :param sleep_for: The number of seconds to sleep after an unsuccessful - invocation of the function. - """ - now = time.time() - timeout = now + duration - while now < timeout: - if args: - if func(*args): - return True - elif func(): - return True - LOG.debug("Sleeping for %d seconds", sleep_for) - time.sleep(sleep_for) - now = time.time() - return False - - -class TestCase(BaseTestCase): - """Base test case class for all tests - - Contains basic setup and convenience methods - """ - - manager_class = None - - @classmethod - def setUpClass(cls): - super(TestCase, cls).setUpClass() - cls.manager = cls.manager_class() - for attr_name in cls.manager.client_attr_names: - # Ensure that pre-existing class attributes won't be - # accidentally overridden. - assert not hasattr(cls, attr_name) - client = getattr(cls.manager, attr_name) - setattr(cls, attr_name, client) - cls.resource_keys = {} - cls.os_resources = [] - - def set_resource(self, key, thing): - LOG.debug("Adding %r to shared resources of %s" % - (thing, self.__class__.__name__)) - self.resource_keys[key] = thing - self.os_resources.append(thing) - - def get_resource(self, key): - return self.resource_keys[key] - - def remove_resource(self, key): - thing = self.resource_keys[key] - self.os_resources.remove(thing) - del self.resource_keys[key] - - def status_timeout(self, things, thing_id, expected_status): - """Given a thing and an expected status, do a loop, sleeping - for a configurable amount of time, checking for the - expected status to show. At any time, if the returned - status of the thing is ERROR, fail out. - """ - def check_status(): - # python-novaclient has resources available to its client - # that all implement a get() method taking an identifier - # for the singular resource to retrieve. - thing = things.get(thing_id) - new_status = thing.status.lower() - if new_status == 'error': - self.fail("Failed to get to expected status. " - "In error state.") - elif new_status == expected_status.lower(): - return True # All good. - LOG.debug("Waiting for %s to get to %s status. " - "Currently in %s status", - thing, expected_status, new_status) - conf = config.FuelConfig() - if not call_until_true(check_status, - conf.compute.build_timeout, - conf.compute.build_interval): - self.fail("Timed out waiting to become %s" - % expected_status) - - def run_ssh_cmd_with_exit_code(self, host, cmd): - """Open SSH session with host and execute command. - - Fail if exit code != 0 - """ - try: - sshclient = ssh.Client(host, self.usr, self.pwd, - key_filename=self.key, timeout=self.timeout) - return sshclient.exec_command(cmd) - except Exception: - LOG.exception("Failed while opening ssh session with host") - self.fail("{0} command failed.".format(cmd)) diff --git a/fuel_health/tests/__init__.py b/fuel_health/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_health/tests/cloudvalidation/__init__.py b/fuel_health/tests/cloudvalidation/__init__.py deleted file mode 100644 index ffab24bb..00000000 --- a/fuel_health/tests/cloudvalidation/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -Main purpose of following attribute is -to supply general information about test set. -This information will be stored in ostf database -in test_sets table. -''' -__profile__ = { - "test_runs_ordering_priority": 5, - "id": "cloudvalidation", - "driver": "nose", - "test_path": "fuel_health/tests/cloudvalidation", - "cleanup_path": "fuel_health.cleanup", - "description": "Cloud validation tests. Duration 30 sec - 2 min", - "exclusive_testsets": [], - "available_since_release": "2014.2-6.1", -} diff --git a/fuel_health/tests/cloudvalidation/test_disk_space_db.py b/fuel_health/tests/cloudvalidation/test_disk_space_db.py deleted file mode 100644 index 8750086f..00000000 --- a/fuel_health/tests/cloudvalidation/test_disk_space_db.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import cloudvalidation - - -class DBSpaceTest(cloudvalidation.CloudValidationTest): - """Cloud Validation Test class for free space for DB.""" - - def _check_db_disk_expectation_warning(self, host): - """Checks whether DB expects less free space than actually - is presented on the controller node - """ - scheduler_log = 'nova-scheduler.log' - - if self.config.compute.deployment_os.lower() == 'centos': - scheduler_log = 'scheduler.log' - - err_msg = "Cannot check {scheduler_log} at {host}".format( - host=host, scheduler_log=scheduler_log) - - warning_msg = "Host has more disk space than database expected" - cmd = "fgrep '{msg}' -q /var/log/nova/{scheduler_log}".format( - msg=warning_msg, scheduler_log=scheduler_log) - - out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg, - 'check nova-scheduler.log', host, cmd) - - self.verify_response_true(not err, err_msg, 1) - - return out - - def test_db_expectation_free_space(self): - """Check disk space allocation for databases on controller nodes - Target component: Nova - - Scenario: - 1. Check disk space allocation for databases on controller nodes - - Duration: 20 s. - - Deployment tags: disabled - - Available since release: 2014.2-6.1 - """ - - hosts = filter(self._check_db_disk_expectation_warning, - self.controllers) - - self.verify_response_true(not hosts, - ("Free disk space cannot be used " - "by database on node(s): {hosts}" - ).format(hosts=hosts), - 1) diff --git a/fuel_health/tests/cloudvalidation/test_disk_space_outage.py b/fuel_health/tests/cloudvalidation/test_disk_space_outage.py deleted file mode 100644 index c7d0a1b8..00000000 --- a/fuel_health/tests/cloudvalidation/test_disk_space_outage.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import cloudvalidation - -USED_SPACE_LIMIT_PERCENTS = 90 - - -class DiskSpaceTest(cloudvalidation.CloudValidationTest): - """Cloud Validation Test class for disk space checks.""" - - def _check_host_used_space(self, host): - """Returns used disk space in percentage on host.""" - - cmd = 'df --output=pcent | grep "[0-9]"' - err_msg = "Cannot check free space on host {host}".format(host=host) - - out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg, - 'check free space on host', host, cmd) - - partitions = [float(percent[:-1]) for percent in out.split()] - partitions = filter(lambda perc: perc >= USED_SPACE_LIMIT_PERCENTS, - partitions) - return partitions - - def test_disk_space_outage(self): - """Check disk space outage on controller and compute nodes - Target component: Nova - - Scenario: - 1. Check outage on controller and compute nodes - - Duration: 20 s. - - Available since release: 2014.2-6.1 - """ - usages = filter(self._check_host_used_space, - self.computes + self.controllers) - - err_msg = "Nearly disk outage state detected on host(s): %s" % usages - - self.verify_response_true(not usages, err_msg, 1) diff --git a/fuel_health/tests/cloudvalidation/test_keystone.py b/fuel_health/tests/cloudvalidation/test_keystone.py deleted file mode 100644 index b9c4e1e2..00000000 --- a/fuel_health/tests/cloudvalidation/test_keystone.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import cloudvalidation - - -class KeystoneTest(cloudvalidation.CloudValidationTest): - """Cloud Validation Test class for Keystone service.""" - - LOGFILE = '/var/log/keystone/keystone-all.log' - PATTERN_SSL = ('Signing error: Error opening signer certificate ' - '(.+)signing_cert.pem') - - def setUp(self): - super(KeystoneTest, self).setUp() - if self.config.identity.disable_ssl_certificate_validation: - self.skipTest('SSL certificate validation is disabled') - - def _check_ssl_issue(self, host): - """Check SSL issue on controller node.""" - - cmd = 'grep -E "{pattern}" "{logfile}"'.format( - pattern=self.PATTERN_SSL, - logfile=self.LOGFILE) - - err_msg = "Cannot check Keystone logs on host {host}".format(host=host) - - out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg, - 'check ssl certificate on host', host, cmd) - - return bool(out) - - def test_keystone_ssl_certificate(self): - """Check Keystone SSL certificate - Target component: Keystone - - Scenario: - 1. Check Keystone SSL certificate - - Duration: 20 s. - Deployment tags: disabled - - Available since release: 2015.1.0-8.0 - """ - - hosts = filter(self._check_ssl_issue, - self.controllers) - - err_msg = "Keystone SSL issue found on host(s): %s" % hosts - - self.verify_response_true(not hosts, err_msg, 1) diff --git a/fuel_health/tests/cloudvalidation/test_logrotate.py b/fuel_health/tests/cloudvalidation/test_logrotate.py deleted file mode 100644 index 9361ce53..00000000 --- a/fuel_health/tests/cloudvalidation/test_logrotate.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health import cloudvalidation - -LOG = logging.getLogger(__name__) - - -class LogRotationTest(cloudvalidation.CloudValidationTest): - """TestClass contains log rotation test.""" - - def test_logrotate(self): - """Check log rotation configuration on all nodes - Target component: Logging - - Scenario: - 1. Check logrotate cron job on all controller and compute nodes - Duration: 20 s. - - Available since release: 2014.2-6.1 - """ - cmd = ( - "find /etc/crontab /etc/cron.daily /etc/cron.hourly " - " /var/spool/cron/ -type f" - " | xargs grep -qP '^[^#]*logrotate'" - ) - - fail_msg = 'Logrotate is not configured on node(s) %s' - failed = set() - for host in self.controllers + self.computes: - try: - self.verify( - 5, self.run_ssh_cmd_with_exit_code, - 1, fail_msg % host, - 'checking logrotate', host, cmd) - except AssertionError: - failed.add(host) - - failed_hosts = ', '.join(failed) - self.verify_response_true(len(failed) == 0, fail_msg % failed_hosts, 1) diff --git a/fuel_health/tests/cloudvalidation/test_vm_auto_start.py b/fuel_health/tests/cloudvalidation/test_vm_auto_start.py deleted file mode 100644 index f157743c..00000000 --- a/fuel_health/tests/cloudvalidation/test_vm_auto_start.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import cloudvalidation - - -class VMBootTest(cloudvalidation.CloudValidationTest): - """Cloud Validation Test class for VMs.""" - - def _check_host_boot(self, host): - """Test resume_guest_state_on_host_boot option on compute node. - By default, this option is set to False. - """ - - err_msg = ('The option "resume_guest_state_on_host_boot" ' - 'is set to True at compute node {host}, so it can be ' - 'broken down by the paused VMs after host boot.' - ).format(host=host) - - cmd = ('grep ^[^#]*\s*resume_guests_state_on_host_boot\s*=\s*True ' - '/etc/nova/nova.conf') - - cmd_timeout = 5 - step = 1 - action = 'check host boot option' - - out, err = self.verify(cmd_timeout, self._run_ssh_cmd, step, err_msg, - action, host, cmd) - - auto_host_boot_disabled = not out and not err - self.verify_response_true(auto_host_boot_disabled, err_msg, 1) - - def test_guests_state_on_host_boot(self): - """Check host boot configuration on compute nodes - Target component: Nova - - Scenario: - 1. Check host boot configuration on compute nodes - - Duration: 20 s. - - Deployment tags: disabled - - Available since release: 2015.1.0-8.0 - """ - - for host in self.computes: - self._check_host_boot(host) diff --git a/fuel_health/tests/configuration/__init__.py b/fuel_health/tests/configuration/__init__.py deleted file mode 100644 index 16f7d4b2..00000000 --- a/fuel_health/tests/configuration/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -Main purpose of following attribute is -to supply general information about test set. -This information will be stored in ostf database -in test_sets table. -''' -__profile__ = { - "test_runs_ordering_priority": 6, - "id": "configuration", - "driver": "nose", - "test_path": "fuel_health/tests/configuration", - "cleanup_path": "fuel_health.cleanup", - "description": "Configuration tests. Duration 30 sec - 2 min", - "exclusive_testsets": [], - "available_since_release": "2014.2-6.1", -} diff --git a/fuel_health/tests/configuration/test_configuration.py b/fuel_health/tests/configuration/test_configuration.py deleted file mode 100644 index 0ba1c060..00000000 --- a/fuel_health/tests/configuration/test_configuration.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import paramiko.ssh_exception as exc - -from fuel_health.common import ssh -from fuel_health import exceptions -from fuel_health import nmanager -from keystoneclient import exceptions as k_exceptions -from keystoneclient.v2_0 import Client as keystoneclient - -LOG = logging.getLogger(__name__) - - -class SanityConfigurationTest(nmanager.SanityChecksTest): - """TestClass contains tests for default creadentials usage. - Special requirements: - 1. A controller's IP address should be specified. - 2. A compute's IP address should be specified. - 3. SSH user credentials for the controller and the compute - should be specified in the controller_node_ssh_user parameter - """ - - @classmethod - def setUpClass(cls): - super(SanityConfigurationTest, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - pass - - def test_001_check_default_master_node_credential_usage(self): - """Check usage of default credentials on master node - Target component: Configuration - - Scenario: - 1. Check user can not ssh on master node with default credentials. - Duration: 20 s. - Available since release: 2014.2-6.1 - """ - ip = self.config.nailgun_host - - ssh_client = ssh.Client(ip, - self.config.master.master_node_ssh_user, - self.config.master.master_node_ssh_password, - timeout=self.config.master.ssh_timeout) - cmd = "date" - output = [] - try: - output = ssh_client.exec_command(cmd) - LOG.debug(output) - except exceptions.SSHExecCommandFailed: - self.verify_response_true(len(output) == 0, - 'Step 1 failed: Default credentials for ' - 'ssh on master node were not changed') - except exceptions.TimeoutException: - self.verify_response_true(len(output) == 0, - 'Step 1 failed: Default credentials for ' - 'ssh on master node were not changed') - except exc.SSHException: - self.verify_response_true(len(output) == 0, - 'Step 1 failed: Default credentials for ' - 'ssh on master node were not changed') - - self.verify_response_true(len(output) == 0, - 'Step 1 failed: Default credentials for ' - 'ssh on master node were not changed') - - def test_002_check_default_openstack_credential_usage(self): - """Check if default credentials for OpenStack cluster have changed - Target component: Configuration - - Scenario: - 1. Check if default credentials for OpenStack cluster have changed. - Duration: 20 s. - Available since release: 2014.2-6.1 - """ - cluster_data = { - 'password': self.config.identity.admin_password, - 'username': self.config.identity.admin_username} - - default_data = { - 'password': 'admin', - 'username': 'admin'} - - self.verify_response_body_not_equal( - exp_content=default_data, - act_content=cluster_data, - msg='Default credentials values are used. ' - 'We kindly recommend that you changed all defaults.', - failed_step='1') - - def test_003_check_default_keystone_credential_usage(self): - """Check usage of default credentials for keystone on master node - Target component: Configuration - - Scenario: - 1. Check default credentials for keystone on master node are - changed. - Duration: 20 s. - Available since release: 2015.1.0-7.0 - """ - - usr = self.config.master.keystone_user - pwd = self.config.master.keystone_password - url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host) - - try: - keystone = keystoneclient(username=usr, - password=pwd, - auth_url=url) - keystone.authenticate() - except k_exceptions.Unauthorized: - pass - else: - self.fail('Step 1 failed: Default credentials ' - 'for keystone on master node were not changed') diff --git a/fuel_health/tests/ha/__init__.py b/fuel_health/tests/ha/__init__.py deleted file mode 100644 index 020ee42a..00000000 --- a/fuel_health/tests/ha/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Main purpose of following attribute is -to supply general information about test set. -This information will be stored in ostf database -in test_sets table. -""" -__profile__ = { - "test_runs_ordering_priority": 3, - "id": "ha", - "driver": "nose", - "test_path": "fuel_health/tests/ha", - "description": "HA tests. Duration 30 sec - 8 min", - "cleanup_path": "fuel_health.cleanup", - "deployment_tags": ["ha"], - "exclusive_testsets": [] -} diff --git a/fuel_health/tests/ha/test_haproxy.py b/fuel_health/tests/ha/test_haproxy.py deleted file mode 100644 index b09a2ec1..00000000 --- a/fuel_health/tests/ha/test_haproxy.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common import ssh -from fuel_health import test - - -LOG = logging.getLogger(__name__) - - -class HAProxyCheck(test.BaseTestCase): - """TestClass contains HAProxy checks.""" - @classmethod - def setUpClass(cls): - super(HAProxyCheck, cls).setUpClass() - cls.controllers = cls.config.compute.online_controllers - cls.controller_key = cls.config.compute.path_to_private_key - cls.controller_user = cls.config.compute.ssh_user - - def setUp(self): - super(HAProxyCheck, self).setUp() - if not self.controllers: - self.skipTest('There are no controller nodes') - - def _check_haproxy_backend(self, remote, - services=None, nodes=None, - ignore_services=None, ignore_nodes=None): - """Check DOWN state of HAProxy backends. Define names of service or - nodes if need check some specific service or node. Use ignore_services - for ignore service status on all nodes. Use ignore_nodes for ignore all - services on all nodes. Ignoring has a bigger priority. - :param remote: SSHClient - :param service: List - :param nodes: List - :param ignore_services: List - :param ignore_nodes: List - :return dict - """ - cmd = 'haproxy-status.sh | egrep -v "BACKEND|FRONTEND"' - - pos_filter = (services, nodes) - neg_filter = (ignore_services, ignore_nodes) - grep = ['|egrep "{0}"'.format('|'.join(n)) for n in pos_filter if n] - grep.extend( - ['|egrep -v "{0}"'.format('|'.join(n)) for n in neg_filter if n]) - - return remote.exec_command("{0}{1}".format(cmd, ''.join(grep))) - - def test_001_check_state_of_backends(self): - """Check state of haproxy backends on controllers - Target Service: HA haproxy - - Scenario: - 1. Ssh on each controller and get state of HAProxy backends - 2. Check backend state for availability - Duration: 10 s. - Available since release: 2015.1.0-8.0 - """ - LOG.info("Controllers nodes are %s" % self.controllers) - for controller in self.controllers: - remote = ssh.Client(controller, self.controller_user, - key_filename=self.controller_key, - timeout=100) - ignore_services = [] - if 'neutron' not in self.config.network.network_provider: - ignore_services.append('nova-metadata-api') - haproxy_status = self.verify( - 10, self._check_haproxy_backend, 1, - "Can't get state of backends.", - "Getting state of backends", - remote, - ignore_services=ignore_services) - - dead_backends = filter(lambda x: 'DOWN' in x, - haproxy_status.splitlines()) - backends_message = "Dead backends {0}"\ - .format(dead_backends) - LOG.debug(backends_message) - error_message = "Step 2 failed: " + backends_message - self.verify_response_true( - len(dead_backends) == 0, error_message) diff --git a/fuel_health/tests/ha/test_mysql_replication.py b/fuel_health/tests/ha/test_mysql_replication.py deleted file mode 100644 index 0df0cddd..00000000 --- a/fuel_health/tests/ha/test_mysql_replication.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common import ssh -from fuel_health.common.utils import data_utils -from fuel_health.tests.ha import test_mysql_status - -LOG = logging.getLogger(__name__) - - -class TestMysqlReplication(test_mysql_status.BaseMysqlTest): - @classmethod - def setUpClass(cls): - super(TestMysqlReplication, cls).setUpClass() - cls.database = 'ost1' + str(data_utils.rand_int_id(100, 999)) - cls.master_ip = None - - def setUp(self): - super(TestMysqlReplication, self).setUp() - if 'ha' not in self.config.compute.deployment_mode: - self.skipTest('Cluster is not HA mode, skipping tests') - - @classmethod - def tearDownClass(cls): - if cls.master_ip: - try: - cmd = "mysql -h localhost -e 'DROP DATABASE %s'" % cls.database - ssh.Client(cls.master_ip, cls.node_user, - key_filename=cls.node_key).exec_command(cmd) - except Exception: - LOG.exception("Failed to connect to mysql cmd:{0}".format(cmd)) - - def test_mysql_replication(self): - """Check data replication over mysql - Target Service: HA mysql - - Scenario: - 1. Check that mysql is running on all controller or database nodes. - 2. Create database on one node. - 3. Create table in created database - 4. Insert data to the created table - 5. Get replicated data from each database node. - 6. Verify that replicated data in the same from each database - 7. Drop created database - Duration: 10 s. - """ - LOG.info("'Test MySQL replication' started") - databases = self.verify(20, self.get_database_nodes, - 1, "Can not get database hostnames. Check that" - " at least one controller is operable", - "get database nodes", - self.controller_ip, - self.node_user, - key=self.node_key) - self.verify_response_body_not_equal(0, len(databases), - self.no_db_msg, 1) - if len(databases) == 1: - self.skipTest(self.one_db_msg) - - LOG.info("Database nodes are " + ", ".join(databases)) - self.master_ip = databases[0] - - # check that mysql is running on all hosts - cmd = 'mysql -h localhost -e "" ' - for db_node in databases: - ssh_client = ssh.Client( - db_node, self.node_user, - key_filename=self.node_key, timeout=100) - self.verify( - 20, ssh_client.exec_command, 1, - 'Can not connect to mysql. ' - 'Please check that mysql is running and there ' - 'is connectivity by management network', - 'detect mysql node', cmd) - - database_name = self.database - table_name = 'ost' + str(data_utils.rand_int_id(100, 999)) - record_data = str(data_utils.rand_int_id(1000000000, 9999999999)) - - create_database = ( - 'mysql -h localhost -e "CREATE DATABASE IF NOT EXISTS ' - '{database}" '.format(database=database_name) - ) - - create_table = ( - 'mysql -h localhost -e' - ' "CREATE TABLE IF NOT EXISTS {database}.{table}' - ' (data VARCHAR(100))" '.format(database=database_name, - table=table_name) - ) - - create_record = ( - 'mysql -h localhost -e "INSERT INTO {database}.{table} (data) ' - 'VALUES({data})" '.format(database=database_name, - table=table_name, - data=record_data) - ) - - get_record = ( - 'mysql -h localhost -e "SELECT * FROM {database}.{table} ' - 'WHERE data = \"{data}\"" '.format(database=database_name, - table=table_name, - data=record_data) - ) - - drop_db = "mysql -h localhost -e 'DROP DATABASE {database}'".format( - database=database_name - ) - - # create db, table, insert data on one node - LOG.info('target node ip/hostname: "{0}" '.format(self.master_ip)) - master_ssh_client = ssh.Client(self.master_ip, self.node_user, - key_filename=self.node_key, - timeout=100) - - self.verify(20, master_ssh_client.exec_command, 2, - 'Database creation failed', 'create database', - create_database) - LOG.info('create database') - self.verify(20, master_ssh_client.exec_command, 3, - 'Table creation failed', 'create table', create_table) - LOG.info('create table') - self.verify(20, master_ssh_client.exec_command, 4, - 'Can not insert data in created table', 'data insertion', - create_record) - LOG.info('create data') - - # Verify that data is replicated on other databases - for db_node in databases: - if db_node != self.master_ip: - client = ssh.Client(db_node, - self.node_user, - key_filename=self.node_key) - - output = self.verify( - 20, client.exec_command, 5, - 'Can not get data from database node %s' % db_node, - 'get_record', get_record) - - self.verify_response_body(output, record_data, - msg='Expected data missing', - failed_step='6') - - # Drop created db - ssh_client = ssh.Client(self.master_ip, self.node_user, - key_filename=self.node_key) - self.verify(20, ssh_client.exec_command, 7, - 'Can not delete created database', - 'database deletion', drop_db) - self.master_ip = None diff --git a/fuel_health/tests/ha/test_mysql_status.py b/fuel_health/tests/ha/test_mysql_status.py deleted file mode 100644 index 6c4c19dc..00000000 --- a/fuel_health/tests/ha/test_mysql_status.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from distutils import version -import logging - -from fuel_health.common import ssh -from fuel_health import test - -LOG = logging.getLogger(__name__) - - -class BaseMysqlTest(test.BaseTestCase): - """Base methods for MySQL DB tests - """ - @classmethod - def setUpClass(cls): - super(BaseMysqlTest, cls).setUpClass() - cls.nodes = cls.config.compute.nodes - cls.controllers = cls.config.compute.online_controllers - if cls.controllers: - cls.controller_ip = cls.controllers[0] - cls.node_key = cls.config.compute.path_to_private_key - cls.node_user = cls.config.compute.ssh_user - cls.mysql_user = 'root' - cls.master_ip = [] - cls.release_version = \ - cls.config.compute.release_version.split('-')[1] - cls.one_db_msg = "There is only one database online. Nothing to check" - cls.no_db_msg = ("Can not find any online database. " - "Check that at least one database is operable") - - def setUp(self): - super(BaseMysqlTest, self).setUp() - if 'ha' not in self.config.compute.deployment_mode: - self.skipTest('Cluster is not HA mode, skipping tests') - if not self.controllers: - self.skipTest('All cluster controllers are offline') - - @classmethod - def get_database_nodes(cls, controller_ip, username, key): - if version.StrictVersion(cls.release_version)\ - < version.StrictVersion('7.0'): - return cls.config.compute.online_controllers - # retrieve data from controller - ssh_client = ssh.Client(controller_ip, - username, - key_filename=key, - timeout=100) - - hiera_cmd = ('ruby -e \'require "hiera"; ' - 'db_h = Hiera.new().lookup("database_nodes", {}, {}); ' - 'db = db_h.keys.map{|k| db_h[k]["name"]}; ' - 'if db != [] then puts db else puts "None" end\'') - - database_nodes = ssh_client.exec_command(hiera_cmd) - # get online nodes - database_nodes = database_nodes.splitlines() - databases = [] - for node in cls.config.compute.nodes: - hostname = node['hostname'] - if hostname in database_nodes and node['online']: - databases.append(hostname) - return databases - - -class TestMysqlStatus(BaseMysqlTest): - @classmethod - def setUpClass(cls): - super(TestMysqlStatus, cls).setUpClass() - - def setUp(self): - super(TestMysqlStatus, self).setUp() - if 'ha' not in self.config.compute.deployment_mode: - self.skipTest('Cluster is not HA mode, skipping tests') - - def test_os_databases(self): - """Check if amount of tables in databases is the same on each node - Target Service: HA mysql - - Scenario: - 1. Detect there are online database nodes. - 2. Request list of tables for os databases on each node. - 3. Check if amount of tables in databases is the same on each node - Duration: 10 s. - """ - LOG.info("'Test OS Databases' started") - dbs = ['nova', 'glance', 'keystone'] - cmd = "mysql -h localhost -e 'SHOW TABLES FROM %(database)s'" - - databases = self.verify(20, self.get_database_nodes, - 1, "Can not get database hostnames. Check that" - " at least one controller is operable", - "get database nodes", - self.controller_ip, - self.node_user, - key=self.node_key) - self.verify_response_body_not_equal(0, len(databases), - self.no_db_msg, 1) - if len(databases) == 1: - self.skipTest(self.one_db_msg) - - for database in dbs: - LOG.info('Current database name is %s' % database) - temp_set = set() - for node in databases: - LOG.info('Current database node is %s' % node) - cmd1 = cmd % {'database': database} - LOG.info('Try to execute command %s' % cmd1) - tables = ssh.Client( - node, self.node_user, - key_filename=self.node_key, - timeout=self.config.compute.ssh_timeout) - output = self.verify(40, tables.exec_command, 2, - 'Can list tables', - 'get amount of tables for each database', - cmd1) - tables = set(output.splitlines()) - if len(temp_set) == 0: - temp_set = tables - self.verify_response_true( - len(tables.symmetric_difference(temp_set)) == 0, - "Step 3 failed: Tables in %s database are " - "different" % database) - - del temp_set - - @staticmethod - def get_variables_from_output(output, variables): - """Return dict with variables and their values extracted from mysql - Assume that output is "| Var_name | Value |" - """ - result = {} - LOG.debug('Expected variables: "{0}"'.format(str(variables))) - for line in output: - try: - var, value = line.strip("| ").split("|")[:2] - except ValueError: - continue - var = var.strip() - if var in variables: - result[var] = value.strip() - LOG.debug('Extracted values: "{0}"'.format(str(result))) - return result - - def test_state_of_galera_cluster(self): - """Check galera environment state - Target Service: HA mysql - - Scenario: - 1. Detect there are online database nodes. - 2. Ssh on each node containing database and request state of galera - node - 3. For each node check cluster size - 4. For each node check status is ready - 5. For each node check that node is connected to cluster - Duration: 10 s. - """ - databases = self.verify(20, self.get_database_nodes, - 1, "Can not get database hostnames. Check that" - " at least one controller is operable", - "get database nodes", - self.controller_ip, - self.node_user, - key=self.node_key) - self.verify_response_body_not_equal(0, len(databases), - self.no_db_msg, 1) - if len(databases) == 1: - self.skipTest(self.one_db_msg) - - for db_node in databases: - command = "mysql -h localhost -e \"SHOW STATUS LIKE 'wsrep_%'\"" - ssh_client = ssh.Client(db_node, self.node_user, - key_filename=self.node_key, - timeout=100) - output = self.verify( - 20, ssh_client.exec_command, 2, - "Verification of galera cluster node status failed", - 'get status from galera node', - command).splitlines() - - LOG.debug('mysql output from node "{0}" is \n"{1}"'.format( - db_node, output) - ) - - mysql_vars = [ - 'wsrep_cluster_size', - 'wsrep_ready', - 'wsrep_connected' - ] - result = self.get_variables_from_output(output, mysql_vars) - - self.verify_response_body_content( - result.get('wsrep_cluster_size', 0), - str(len(databases)), - msg='Cluster size on %s less ' - 'than databases count' % db_node, - failed_step='3') - - self.verify_response_body_content( - result.get('wsrep_ready', 'OFF'), 'ON', - msg='wsrep_ready on %s is not ON' % db_node, - failed_step='4') - - self.verify_response_body_content( - result.get('wsrep_connected', 'OFF'), 'ON', - msg='wsrep_connected on %s is not ON' % db_node, - failed_step='5') diff --git a/fuel_health/tests/ha/test_pacemaker_status.py b/fuel_health/tests/ha/test_pacemaker_status.py deleted file mode 100644 index c7702b4b..00000000 --- a/fuel_health/tests/ha/test_pacemaker_status.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health import ha_base - - -LOG = logging.getLogger(__name__) - - -class TestPacemakerStatus(ha_base.TestPacemakerBase): - """TestClass contains test for pacemaker status on cluster controllers.""" - - def test_check_pacemaker_resources(self): - """Check pacemaker status - - Scenario: - 1. Get pacemaker status for each online controller - 2. Check status of online/offline controllers in pacemaker - 3. Check status of nodes where resources are started - 4. Check that an active resource is started and not failed - 5. Check that list of resources is the same on all online controllers - 6. Check that list of nodes where a resource is started is the same - on all controllers - 7. Check controllers that pcs resources are started on the same nodes - Duration: 10 s. - Available since release: 2015.1.0-7.0 - """ - # 1. Get pacemaker status - cluster_resources = {} - nodes = {} - cmd = 'pcs status xml' - for i, ip in enumerate(self.online_controller_ips): - fqdn = self.online_controller_names[i] - err_msg = ('Cannot get pacemaker status. Execution of the "{0}" ' - 'failed on the controller {0}.'.format(cmd, fqdn)) - - pcs_status = self.verify(20, self._run_ssh_cmd, 1, err_msg, - 'get pacemaker status', ip, cmd)[0] - self.verify_response_true( - pcs_status, 'Step 1 failed: Cannot get pacemaker status. Check' - ' the pacemaker service on the controller {0}.'.format(fqdn)) - - cluster_resources[fqdn] = self.get_pcs_resources(pcs_status) - nodes[fqdn] = self.get_pcs_nodes(pcs_status) - LOG.debug("Pacemaker resources status on the controller {0}: {1}." - .format(fqdn, cluster_resources[fqdn])) - LOG.debug("Pacemaker nodes status on the controller {0}: {1}." - .format(fqdn, nodes[fqdn])) - - # 2. Compare online / offline nodes list in Nailgun and pacemaker - nailgun_online = set(self.online_controller_names) - nailgun_offline = set(self.offline_controller_names) - for i, ip in enumerate(self.online_controller_ips): - fqdn = self.online_controller_names[i] - self.verify_response_true( - set(nodes[fqdn]['Online']) == nailgun_online and - set(nodes[fqdn]['Offline']) == nailgun_offline, - 'Step 2 failed: Online/Offline nodes on the controller {0} ' - 'differs from the actual controllers status.'.format(fqdn)) - - # For each fqdn, perform steps 3 and 4 (checks that pacemaker - # is properly working with online controllers): - for fqdn in cluster_resources: - for res_name in cluster_resources[fqdn]: - resource = cluster_resources[fqdn][res_name] - # 3. Ensure that every resource uses only online controllers - not_updated = (set(self.offline_controller_names) & - set(resource['nodes'])) - self.verify_response_true( - not not_updated, - 'Step 3 failed: On the controller {0}, resource {1} is ' - 'started on the controller(s) {2} that marked as offline ' - 'in Nailgun.'.format(fqdn, res_name, not_updated)) - - # 4. Active resource should be started on controller(s) - if resource['active']: - self.verify_response_true( - resource['started'], - 'Step 4 failed: On the controller {0}, resource {1} is' - ' active but is not started on any controller.' - .format(fqdn, res_name)) - - self.verify_response_true( - not resource['failed'], - 'Step 4 failed: On the controller {0}, resource {1} is' - ' active but failed to start ({2}managed).' - .format(fqdn, - res_name, - "un" if not resource['managed'] else "")) - - # Make pairs from fqdn names of controllers - fqdns = list(cluster_resources.keys()) - fqdn_pairs = [ - (x, y) for i, x in enumerate(fqdns[:-1]) for y in fqdns[i+1:]] - - # For each pair, perform steps 5 and 6 (checks for split brain): - for x, y in fqdn_pairs: - res_x = cluster_resources[x] - res_y = cluster_resources[y] - - # 5. Compare resource lists. - set_x = set(res_x.keys()) - set_y = set(res_y.keys()) - self.verify_response_true( - set_x == set_y, - 'Step 5 failed: Resources list is different. Missed resources ' - 'on the controller {0}: {1} ; on the controller {2}: {3}.' - .format(x, set_y - set_x, y, set_x - set_y)) - - # 6. Check that nodes list of every resource is syncronized - for res in res_x: - self.verify_response_true( - set(res_x[res]['nodes']) == set(res_y[res]['nodes']), - 'Step 6 failed: On the controllers {0} and {1}, resource ' - '{2} has different list of nodes where it is started.' - .format(x, y, res)) - - # 7. Check that each resource started only on nodes that - # allowed to start this resource, and not started on other nodes. - - # Get pacemaker constraints - cmd = 'cibadmin --query --scope constraints' - err_msg = ('Cannot get pacemaker constraints. Execution of the "{0}" ' - 'failed on the controller {0}.' - .format(cmd, self.online_controller_names[0])) - constraints_xml = self.verify( - 20, self._run_ssh_cmd, 7, err_msg, 'get pacemaker constraints', - self.online_controller_ips[0], cmd)[0] - constraints = self.get_pcs_constraints(constraints_xml) - - for rsc in constraints: - (allowed, started, disallowed) = self.get_resource_nodes( - rsc, constraints, cluster_resources[fqdns[0]], orig_rsc=[]) - # In 'started' list should be only the nodes where the resource - # is 'allowed' to start - self.verify_response_true( - set(allowed) >= set(started), - 'Step 7 failed: Resource {0} started on the nodes {1}, but it ' - 'is allowed to start only on the nodes {2}' - .format(rsc, started, allowed)) - - # 'disallowed' list, where the resource started but - # not allowed to start, should be empty. - self.verify_response_true( - not disallowed, - 'Step 7 failed: Resource {0} disallowed to start on the nodes ' - '{1}, but actually started on the nodes {2}' - .format(rsc, disallowed, started)) - - # If 'allowed' is not empty and contains: - # - more than one node where resource is allowed, or - # - at least one working controller node, - # then 'started' should contain at least one node where - # the resource is actually running. - if (len(allowed) > 1) or (set(allowed) - nailgun_offline): - self.verify_response_true( - set(started) - nailgun_offline, - 'Step 7 failed: Resource {0} allowed to start on the nodes' - ' {1}, but it is not started on any node' - .format(rsc, allowed, started)) diff --git a/fuel_health/tests/ha/test_rabbit.py b/fuel_health/tests/ha/test_rabbit.py deleted file mode 100644 index 1cf87030..00000000 --- a/fuel_health/tests/ha/test_rabbit.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health import ha_base - - -LOG = logging.getLogger(__name__) - - -class RabbitSanityTest(ha_base.RabbitSanityClass): - """TestClass contains RabbitMQ test checks.""" - - def setUp(self): - super(RabbitSanityTest, self).setUp() - if 'ha' not in self.config.mode: - self.skipTest("It is not HA configuration") - if not self._controllers: - self.skipTest('There are no controller nodes') - if len(self.amqp_hosts_name) == 1: - self.skipTest('There is only one RabbitMQ node online. ' - 'Nothing to check') - - def test_001_rabbitmqctl_status(self): - """Check RabbitMQ is available - - Scenario: - 1. Retrieve cluster status for each controller. - 2. Check that numbers of rabbit nodes is the same - in Hiera DB and in actual cluster. - 3. Check crm status for rabbit - 4. List channels - Duration: 100 s. - Deployment tags: CENTOS - """ - self.verify(20, self.list_nodes, 1, - 'Cannot retrieve cluster nodes') - - if len(self.amqp_hosts_name) != self.list_nodes(): - self.fail('Step 2 failed: Number of RabbitMQ nodes ' - 'is not equal to number of cluster nodes.') - - res = self.verify(20, self.pick_rabbit_master, 3, - 'Cannot retrieve crm status') - - LOG.debug("Current res is {0}".format(res)) - - if not res: - LOG.debug("Current res is {0}".format(res)) - self.fail('Step 3 failed: Rabbit Master node is not running.') - - fail_msg_4 = 'Can not get rabbit channel list in 40 seconds.' - - self.verify(40, self.list_channels, 4, fail_msg_4, - 'Can not retrieve channels list') - - def test_002_rabbitmqctl_status_ubuntu(self): - """RabbitMQ availability - Scenario: - 1. Retrieve cluster status for each controller. - 2. Check that numbers of rabbit nodes is the same - in Hiera DB and in actual cluster. - 3. Check crm status for rabbit - 4. List channels - Duration: 100 s. - Deployment tags: Ubuntu - """ - self.verify(20, self.list_nodes, 1, 'Cannot retrieve cluster nodes') - - if len(self.amqp_hosts_name) != self.list_nodes(): - self.fail('Step 2 failed: Number of RabbitMQ nodes ' - 'is not equal to number of cluster nodes.') - - res = self.verify(20, self.pick_rabbit_master, 3, - 'Cannot retrieve crm status') - - LOG.debug("Current res is {0}".format(res)) - - if not res: - LOG.debug("Current res is {0}".format(res)) - self.fail('Step 3 failed: Rabbit Master node is not running.') - - fail_msg_4 = 'Can not get rabbit channel list in 40 seconds.' - - self.verify(40, self.list_channels, 4, fail_msg_4, - 'Can not retrieve channels list') - - def test_003_rabbitmqctl_replication(self): - """RabbitMQ replication - Scenario: - 1. Check rabbitmq connections. - 2. Create queue. - 3. Publish test message in created queue - 4. Request created queue and message - 5. Delete queue - Duration: 100 s. - Available since release: 2014.2-6.1 - """ - self.verify(40, self.check_rabbit_connections, 1, - 'Cannot retrieve cluster nodes') - - self.verify(60, self.create_queue, 2, - 'Failed to create queue') - - self.verify(40, self.publish_message, 3, - 'Failed to publish message') - - self.verify(40, self.check_queue_message_replication, 4, - 'Consume of message failed') - - self.verify(40, self.delete_queue, 5, - 'Failed to delete queue') diff --git a/fuel_health/tests/sanity/__init__.py b/fuel_health/tests/sanity/__init__.py deleted file mode 100644 index d5b4b192..00000000 --- a/fuel_health/tests/sanity/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Main purpose of following attribute is -to supply general information about test set. -This information will be stored in ostf database -in test_sets table. -""" -__profile__ = { - "test_runs_ordering_priority": 1, - "id": "sanity", - "driver": "nose", - "test_path": "fuel_health/tests/sanity", - "cleanup_path": "fuel_health.cleanup", - "description": "Sanity tests. Duration 30 sec - 2 min", - "exclusive_testsets": [] -} diff --git a/fuel_health/tests/sanity/test_sanity_ceilometer.py b/fuel_health/tests/sanity/test_sanity_ceilometer.py deleted file mode 100644 index 826d7329..00000000 --- a/fuel_health/tests/sanity/test_sanity_ceilometer.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from fuel_health import ceilometermanager - - -class CeilometerApiTests(ceilometermanager.CeilometerBaseTest): - """TestClass contains tests that check basic Ceilometer functionality.""" - - def test_list_meters(self): - """Ceilometer test to list meters, alarms, resources and events - Target component: Ceilometer - - Scenario: - 1. Request the list of meters with query: disk_format=qcow2. - 2. Request the list of alarms. - 3. Request the list of resources created for the last hour. - 4. Request the list of events created for the last hour. - - Duration: 180 s. - Deployment tags: Ceilometer - """ - - fail_msg = 'Failed to get list of meters.' - q = [{'field': 'metadata.disk_format', 'op': 'eq', 'value': 'qcow2'}] - self.verify(60, self.ceilometer_client.meters.list, - 1, fail_msg, 'getting list of meters', q) - - fail_msg = 'Failed to get list of alarms.' - self.verify(60, self.ceilometer_client.alarms.list, - 2, fail_msg, 'getting list of alarms') - - fail_msg = 'Failed to get list of resources.' - an_hour_ago = (datetime.datetime.now() - - datetime.timedelta(hours=1)).isoformat() - q = [{'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}] - self.verify(60, self.ceilometer_client.resources.list, - 3, fail_msg, 'getting list of resources', q) - - fail_msg = 'Failed to get list of events.' - self.verify(60, self.ceilometer_client.events.list, - 4, fail_msg, 'getting list of events', q) diff --git a/fuel_health/tests/sanity/test_sanity_compute.py b/fuel_health/tests/sanity/test_sanity_compute.py deleted file mode 100644 index 578b7dc6..00000000 --- a/fuel_health/tests/sanity/test_sanity_compute.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import nmanager - - -class SanityComputeTest(nmanager.SanityChecksTest): - """TestClass contains tests that check basic Compute functionality.""" - - def test_list_instances(self): - """Request instance list - Target component: Nova - - Scenario: - 1. Request the list of instances. - Duration: 20 s. - """ - fail_msg = 'Instance list is unavailable. ' - self.verify(20, self._list_instances, - 1, fail_msg, "instance listing", - self.compute_client) - - def test_list_images(self): - """Request image list using Nova - Target component: Nova - - Scenario: - 1. Request the list of images. - Duration: 20 s. - """ - fail_msg = 'Images list is unavailable. ' - self.verify(20, self._list_images, - 1, fail_msg, "images listing", - self.compute_client) - - def test_list_volumes(self): - """Request volume list - Target component: Cinder - - Scenario: - 1. Request the list of volumes. - Duration: 20 s. - """ - fail_msg = 'Volume list is unavailable. ' - self.verify(20, self._list_volumes, - 1, fail_msg, "volume listing", - self.volume_client) - - def test_list_snapshots(self): - """Request snapshot list - Target component: Cinder - - Scenario: - 1. Request the list of snapshots. - Duration: 20 s. - """ - fail_msg = 'Snapshots list is unavailable. ' - self.verify(20, self._list_snapshots, - 1, fail_msg, "snapshots listing", - self.volume_client) - - def test_list_flavors(self): - """Request flavor list - Target component: Nova - - Scenario: - 1. Request the list of flavors. - 2. Confirm that a response is received. - Duration: 20 s. - """ - fail_msg = 'Flavors list is unavailable. ' - list_flavors_resp = self.verify(30, self._list_flavors, - 1, fail_msg, "flavor listing", - self.compute_client) - - self.verify_response_true(list_flavors_resp, - "Step 2 failed: {msg}".format(msg=fail_msg)) - - def test_list_rate_limits(self): - """Request absolute limits list - Target component: Nova - - Scenario: - 1. Request the list of limits. - 2. Confirm that a response is received. - Duration: 20 s. - """ - fail_msg = 'Limits list is unavailable. ' - - list_limits_resp = self.verify(20, self._list_limits, - 1, fail_msg, "limits listing", - self.compute_client) - - self.verify_response_true( - list_limits_resp, "Step 2 failed: {msg}".format(msg=fail_msg)) diff --git a/fuel_health/tests/sanity/test_sanity_glance.py b/fuel_health/tests/sanity/test_sanity_glance.py deleted file mode 100644 index dea77e19..00000000 --- a/fuel_health/tests/sanity/test_sanity_glance.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import glancemanager - - -class GlanceSanityTests(glancemanager.GlanceTest): - """GlanceSanityTests contains verifications of basic Glance functionality. - """ - - def test_glance_image_list(self): - """Request image list using Glance v1 - Target component: Glance - - Scenario - 1. Get image list using Glance - 2. Confirm that a response is received - - Duration: 10 s. - Available since release: 2014.2-6.1 - """ - - fail_msg = "Can't get list of images. Glance API isn't available. " - image_list_resp = self.verify(10, self._list_images, - 1, fail_msg, "image listing", - self.glance_client_v1) - - fail_msg = "Image list is unavailable. " - self.verify_response_true(image_list_resp, fail_msg, 2) - - def test_glance_image_list_v2(self): - """Request image list using Glance v2 - Target component: Glance - - Scenario - 1. Get image list using Glance - 2. Confirm that a response is received - - Duration: 10 s. - Available since release: 2014.2-6.1 - """ - - fail_msg = "Can't get list of images. Glance API isn't available. " - image_list_resp = self.verify(10, self._list_images, - 1, fail_msg, "image listing", - self.glance_client) - - fail_msg = "Image list is unavailable. " - self.verify_response_true(image_list_resp, fail_msg, 2) diff --git a/fuel_health/tests/sanity/test_sanity_heat.py b/fuel_health/tests/sanity/test_sanity_heat.py deleted file mode 100644 index 06210523..00000000 --- a/fuel_health/tests/sanity/test_sanity_heat.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import nmanager - - -class SanityHeatTest(nmanager.SanityChecksTest): - """Class contains tests that check basic Heat functionality. - Special requirements: - 1. Heat component should be installed. - """ - - def test_list_stacks(self): - """Request stack list - Target component: Heat - - Scenario: - 1. Request the list of stacks. - - Duration: 20 s. - """ - self.verify(20, self._list_stacks, 1, - 'Stack list is unavailable. ', - "stack listing", - self.heat_client) diff --git a/fuel_health/tests/sanity/test_sanity_identity.py b/fuel_health/tests/sanity/test_sanity_identity.py deleted file mode 100644 index b1dd92a4..00000000 --- a/fuel_health/tests/sanity/test_sanity_identity.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import nmanager - - -class SanityIdentityTest(nmanager.SanityChecksTest): - """TestClass contains tests that check basic authentication functionality. - Special requirements: OS admin user permissions are needed - """ - - def test_list_services(self): - """Request active services list - Target component: Nova - - Scenario: - 1. Request the list of services. - 2. Confirm that a response is received. - Duration: 20 s. - """ - fail_msg = 'Services list is unavailable. ' - services = self.verify(20, self._list_services, - 1, fail_msg, "services listing", - self.compute_client) - - self.verify_response_true(services, - "Step 2 failed: {msg}".format(msg=fail_msg)) - - def test_list_users(self): - """Request user list - Target component: Keystone - - Scenario: - 1. Request the list of users. - 2. Confirm that a response is received. - Duration: 20 s. - """ - fail_msg = 'User list is unavailable. ' - users = self.verify(20, self._list_users, - 1, fail_msg, "user listing", - self.identity_client) - - self.verify_response_true(users, - "Step 2 failed: {msg}".format(msg=fail_msg)) diff --git a/fuel_health/tests/sanity/test_sanity_infrastructure.py b/fuel_health/tests/sanity/test_sanity_infrastructure.py deleted file mode 100644 index e9f0fa7e..00000000 --- a/fuel_health/tests/sanity/test_sanity_infrastructure.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import time - -from fuel_health.common import ssh -from fuel_health import nmanager - -LOG = logging.getLogger(__name__) - - -class SanityInfrastructureTest(nmanager.SanityChecksTest): - """TestClass contains tests that check the whole OpenStack availability. - Special requirements: - 1. A controller's IP address should be specified. - 2. A compute's IP address should be specified. - 3. SSH user credentials for the controller and the compute - should be specified in the controller_node_ssh_user parameter - """ - - @classmethod - def setUpClass(cls): - super(SanityInfrastructureTest, cls).setUpClass() - cls.controllers = cls.config.compute.online_controllers - cls.controller_names = cls.config.compute.online_controller_names - cls.computes = cls.config.compute.online_computes - cls.usr = cls.config.compute.controller_node_ssh_user - cls.pwd = cls.config.compute.controller_node_ssh_password - cls.key = cls.config.compute.path_to_private_key - cls.timeout = cls.config.compute.ssh_timeout - cls.fuel_dns = cls.config.fuel.dns - - @classmethod - def tearDownClass(cls): - pass - - def test_001_services_state(self): - """Check that required services are running - Target component: OpenStack - - Scenario: - 1. Execute nova service-list command on a controller node. - 2. Check there are no failed services (with down state). - Duration: 180 s. - """ - downstate = u'down' - - def get_controllers_down_states(): - states = {} - for controller in self.controller_names: - svc = self._list_services(self.compute_client, host=controller) - down = [True for service in svc if service.state == downstate] - if any(down): - states[controller] = True - return states - - if not self.controllers: - self.skipTest('Step 1 failed: there are no controller nodes.') - - output = self.verify( - 50, get_controllers_down_states, 1, - "'nova service-list' command execution failed. ", - "'nova service-list' command execution", - ) - - LOG.debug(output) - try: - self.verify_response_true( - len(output) == 0, - 'Step 2 failed: Some nova services have not been started.') - except Exception: - LOG.info("Will sleep for 120 seconds and try again") - LOG.exception("") - time.sleep(120) - # Re-collect data silently - output = get_controllers_down_states() - LOG.debug(output) - self.verify_response_true( - len(output) == 0, - 'Step 2 failed: Some nova services have not been started.') - - def test_002_internet_connectivity_from_compute(self): - """Check internet connectivity from a compute - Target component: OpenStack - - Scenario: - 1. Execute ping 8.8.8.8 command from a compute node. - Duration: 100 s. - - Deployment tags: qemu | kvm, public_on_all_nodes | nova_network - """ - if not self.computes: - self.skipTest('There are no compute nodes') - - cmd = "ping -q -c1 -w10 8.8.8.8" - - ssh_client = ssh.Client(self.computes[0], - self.usr, - self.pwd, - key_filename=self.key, - timeout=self.timeout) - self.verify(100, self.retry_command, 1, - "'ping' command failed. Looks like there is no " - "Internet connection on the compute node.", - "'ping' command", - 2, 30, ssh_client.exec_command, cmd) - - def test_003_dns_resolution(self): - """Check DNS resolution on compute node - Target component: OpenStack - - Scenario: - 1. Execute host 8.8.8.8 command from a compute node. - 2. Check 8.8.8.8 host was successfully resolved - 3. Check host google.com command from the compute node. - 4. Check google.com host was successfully resolved. - Duration: 120 s. - - Deployment tags: qemu | kvm, public_on_all_nodes | nova_network - """ - if not self.computes: - self.skipTest('There are no computes nodes') - - dns = self.fuel_dns.spit(',') if self.fuel_dns else ['8.8.8.8'] - - ssh_client = ssh.Client(self.computes[0], - self.usr, - self.pwd, - key_filename=self.key, - timeout=self.timeout) - expected_output = "{0}.in-addr.arpa domain name pointer".format(dns[0]) - - cmd = "host {0}".format(dns[0]) - output = self.verify(100, self.retry_command, 1, - "'host' command failed. Looks like there is no " - "Internet connection on the computes node.", - "'ping' command", 10, 5, - ssh_client.exec_command, cmd) - LOG.debug(output) - self.verify_response_true(expected_output in output, - 'Step 2 failed: ' - 'DNS name for {0} host ' - 'cannot be resolved.'.format(dns[0])) - - domain_name = output.split()[-1] - cmd = "host {0}".format(domain_name) - output = self.verify(100, self.retry_command, 3, - "'host' command failed. " - "DNS name cannot be resolved.", - "'host' command", 10, 5, - ssh_client.exec_command, cmd) - LOG.debug(output) - self.verify_response_true('has address {0}'.format(dns[0]) in output, - 'Step 4 failed: ' - 'DNS name cannot be resolved.') diff --git a/fuel_health/tests/sanity/test_sanity_ironic.py b/fuel_health/tests/sanity/test_sanity_ironic.py deleted file mode 100644 index 3131cde9..00000000 --- a/fuel_health/tests/sanity/test_sanity_ironic.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import ironicmanager - -LOG = logging.getLogger(__name__) - - -class IronicSanityTests(ironicmanager.IronicTest): - """TestClass contains tests to check that Ironic nodes are operable - - Special requirements: - 1. A controller's IP address should be specified. - 2. An ironic-conductor's IP address should be specified. - 3. SSH user credentials for the controller and the ironic-conductor - should be specified in the controller_node_ssh_user parameter - """ - - @classmethod - def setUpClass(cls): - super(IronicSanityTests, cls).setUpClass() - cls.controllers = cls.config.compute.online_controllers - cls.conductors = cls.config.ironic.online_conductors - if not cls.controllers: - cls.skipTest('There are no Controller nodes.') - if not cls.conductors: - cls.skipTest('There are no Ironic Conductor nodes.') - - def test_001_ironic_services(self): - """Check that Ironic services are running - Target component: Ironic - - Scenario: - 1. Check that ironic-api is running on all controllers. - 2. Check that ironic-conductor is running on all Ironic nodes. - 3. Check that nova-compute is running on single controller node. - Duration: 60 s. - Deployment tags: Ironic - Available since release: liberty-9.0 - """ - - # Step 1 - expected = u'/usr/bin/ironic-api' - cmd = 'pgrep -la ironic-api' - fail_msg = 'Ironic-api service is not running.' - action = 'checking ironic-api service' - self.verify(60, self.check_service_availability, 1, fail_msg, action, - self.controllers, cmd, expected, len(self.controllers)) - # Step 2 - expected = u'/usr/bin/ironic-conductor' - cmd = 'pgrep -la ironic' - fail_msg = 'Ironic-conductor service is not running.' - action = 'checking ironic-conductor service' - self.verify(60, self.check_service_availability, 2, fail_msg, action, - self.conductors, cmd, expected, len(self.conductors)) - # Step 3 - expected = u'/usr/bin/nova-compute' - cmd = 'pgrep -la nova-compute' - fail_msg = 'Nova-compute service is not running.' - action = 'checking nova-compute service' - self.verify(60, self.check_service_availability, 3, fail_msg, action, - self.controllers, cmd, expected) - - def test_002_ironic_node_actions(self): - """Check that Ironic can operate nodes - Target component: Ironic - - Scenario: - 1. Create Ironic node with fake driver. - 2. Update Ironic node properties. - 3. Show and check updated node properties. - 4. Delete Ironic node. - Duration: 60 s. - Deployment tags: Ironic - Available since release: liberty-9.0 - """ - # Step 1 - fail_msg = "Error creating node." - self.node = self.verify(20, self.node_create, 1, fail_msg, - 'Node creation', driver='fake', - extra={'NodeTest': ''}) - LOG.debug(self.node) - # Step 2 - prop = rand_name("ostf-prop") - value_prop = rand_name("prop-value") - fail_msg = "Can't update node with properties." - self.node = self.verify(20, self.node_update, 2, fail_msg, - 'Updating node', self.node, prop, value_prop) - LOG.debug(self.node) - # Step 3 - fail_msg = "Can't show node properties." - self.node = self.verify(20, self.node_show, 3, fail_msg, - 'Showing node', self.node) - LOG.debug(self.node) - for p, v in self.node.properties.items(): - self.verify(5, self.assertTrue, 3, "Can't check node property.", - 'Checking node property', prop in p) - self.verify(5, self.assertTrue, 3, "Can't check property value.", - 'Checking property value', value_prop in v) - # Step 4 - fail_msg = "Can't delete node." - self.verify(20, self.node_delete, 4, fail_msg, 'Deleting node', - self.node) - - def test_003_ironic_list_entities(self): - """List Ironic entities - Target component: Ironic - - Scenario: - 1. List chassis. - 2. List drivers. - 3. List nodes. - 4. List ports. - Duration: 80 s. - Deployment tags: Ironic - Available since release: liberty-9.0 - """ - fail_msg = "Can't list chassis." - self.verify(20, self.list_chassis, 1, fail_msg, 'Chassis list') - - fail_msg = "Can't list drivers." - self.drivers = self.verify(20, self.list_drivers, 2, - fail_msg, 'Drivers list') - LOG.debug(self.drivers) - wanted_drivers = {u'fake', u'fuel_ipmitool'} - for driver in wanted_drivers: - self.verify(20, self.get_driver, 2, "Can't find driver.", - 'Checking drivers in list', driver) - - fail_msg = "Can't list nodes." - self.verify(20, self.list_nodes, 3, fail_msg, 'Nodes list') - - fail_msg = "Can't list ports." - self.verify(20, self.list_ports, 4, fail_msg, 'Ports list') diff --git a/fuel_health/tests/sanity/test_sanity_murano.py b/fuel_health/tests/sanity/test_sanity_murano.py deleted file mode 100644 index b958bec3..00000000 --- a/fuel_health/tests/sanity/test_sanity_murano.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import muranomanager - - -class MuranoSanityTests(muranomanager.MuranoTest): - """TestClass contains verifications of basic Murano functionality. - Special requirements: - 1. Murano API service should be installed. - """ - - def test_create_and_delete_service(self): - """Create and delete Murano environment - Target component: Murano - - Scenario: - 1. Send request to create environment. - 2. Send request to delete environment. - - Duration: 10 s. - - Deployment tags: Murano | murano_plugin - """ - - fail_msg = "Can't create environment. Murano API isn't available. " - self.environment = self.verify(15, self.create_environment, - 1, fail_msg, "creating environment", - "ost1_test-Murano_env01") - - fail_msg = ("Can't delete environment. Murano API isn't available " - "or RabbitMQ connectivity broken. ") - self.verify(5, self.delete_environment, 2, fail_msg, - "deleting environment", self.environment.id) - - def test_get_list_categories(self): - """Get list of Murano applications categories - Target component: Murano - - Scenario: - 1. Send request to get list of categories - - Duration: 10 s. - - Deployment tags: Murano | murano_plugin - """ - fail_msg = "Can't get list of categories. Murano API isn't available. " - self.verify(10, self.get_list_categories, 1, fail_msg, - "getting list of categories") - - def test_get_list_packages(self): - """Get list of Murano applications packages - Target component: Murano - - Scenario: - 1. Send request to get list of packages - - Duration: 10 s. - - Deployment tags: Murano | murano_plugin, murano_without_glare - """ - fail_msg = "Can't get list of packages. Murano API isn't available. " - self.verify(10, self.get_list_packages, 1, fail_msg, - "getting list of packages") - - def test_get_list_artifacts_packages(self): - """Get list of Murano Artifact applications packages - Target component: Murano - - Scenario: - 1. Send request to get list of artifact packages - - Duration: 10 s. - - Deployment tags: Murano | murano_plugin, murano_use_glare - """ - fail_msg = "Can't get list of packages. Murano API isn't available. " - self.verify(10, self.get_list_packages, 1, fail_msg, - "getting list of packages", artifacts=True) diff --git a/fuel_health/tests/sanity/test_sanity_networking.py b/fuel_health/tests/sanity/test_sanity_networking.py deleted file mode 100644 index d2356c35..00000000 --- a/fuel_health/tests/sanity/test_sanity_networking.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import nmanager - - -class NetworksTest(nmanager.SanityChecksTest): - """TestClass contains tests check base networking functionality.""" - - def test_list_networks_nova_network(self): - """Request list of networks - Target component: Nova Networking. - - Scenario: - 1. Request the list of networks. - 2. Confirm that a response is received. - Duration: 20 s. - - Deployment tags: nova_network - """ - fail_msg = "Networks list is unavailable. " - networks = self.verify(20, self._list_networks, 1, - fail_msg, - "listing networks", - self.compute_client) - - self.verify_response_true(networks, - "Step 2 failed: {msg}".format(msg=fail_msg)) - - def test_list_networks_neutron(self): - """Request list of networks - Target component: Neutron. - - Scenario: - 1. Request the list of networks. - 2. Confirm that a response is received. - Duration: 20 s. - - Available since release: 2014.2-6.0 - Deployment tags: neutron - """ - fail_msg = "Networks list is unavailable. " - networks = self.verify(20, self._list_networks, 1, - fail_msg, - "listing networks", - self.neutron_client) - - self.verify_response_true(networks, - "Step 2 failed: {msg}".format(msg=fail_msg)) diff --git a/fuel_health/tests/sanity/test_sanity_sahara.py b/fuel_health/tests/sanity/test_sanity_sahara.py deleted file mode 100644 index 97441c18..00000000 --- a/fuel_health/tests/sanity/test_sanity_sahara.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import saharamanager - - -class SaharaTemplatesTest(saharamanager.SaharaTestsManager): - _plugin_name = 'An unknown plugin name' - _hadoop_version = 'An unknown Hadoop version' - _node_processes = 'An unknown list of processes' - - def setUp(self): - super(SaharaTemplatesTest, self).setUp() - - flavor_id = self.create_flavor() - self.ng_template = { - 'name': rand_name('sahara-ng-template-'), - 'plugin_name': self._plugin_name, - 'hadoop_version': self._hadoop_version, - 'flavor_id': flavor_id, - 'node_processes': self._node_processes, - 'description': 'Test node group template' - } - self.cl_template = { - 'name': rand_name('sahara-cl-template-'), - 'plugin_name': self._plugin_name, - 'hadoop_version': self._hadoop_version, - 'node_groups': [ - { - 'name': 'all-in-one', - 'flavor_id': flavor_id, - 'node_processes': self._node_processes, - 'count': 1 - } - ], - 'description': 'Test cluster template' - } - self.client = self.sahara_client - - -class VanillaTwoTemplatesTest(SaharaTemplatesTest): - def setUp(self): - mapping_versions_of_plugin = { - "6.1": "2.4.1", - "7.0": "2.6.0", - "8.0": "2.7.1", - "9.0": "2.7.1", - "9.1": "2.7.1" - } - self._plugin_name = 'vanilla' - self._hadoop_version = mapping_versions_of_plugin.get( - self.config.fuel.fuel_version, "2.7.1") - self._node_processes = ['resourcemanager', 'namenode', - 'secondarynamenode', 'oozie', 'historyserver', - 'nodemanager', 'datanode'] - super(VanillaTwoTemplatesTest, self).setUp() - - def test_vanilla_two_templates(self): - """Sahara test for checking CRUD operations on Vanilla2 templates - Target component: Sahara - - Scenario: - 1. Create a simple node group template - 2. Get the node group template - 3. List node group templates - 4. Delete the node group template - 5. Create a simple cluster template - 6. Get the cluster template - 7. List cluster templates - 8. Delete the cluster template - - Duration: 80 s. - Available since release: 2014.2-6.1 - Deployment tags: Sahara - """ - - fail_msg = 'Failed to create node group template.' - ng_template = self.verify(10, self.client.node_group_templates.create, - 1, fail_msg, 'creating node group template', - **self.ng_template) - - fail_msg = 'Failed to get node group template.' - self.verify(10, self.client.node_group_templates.get, 2, - fail_msg, 'getting node group template', ng_template.id) - - fail_msg = 'Failed to list node group templates.' - self.verify(10, self.client.node_group_templates.list, 3, - fail_msg, 'listing node group templates') - - fail_msg = 'Failed to delete node group template.' - self.verify(10, self.client.node_group_templates.delete, 4, - fail_msg, 'deleting node group template', ng_template.id) - - fail_msg = 'Failed to create cluster template.' - cl_template = self.verify(10, self.client.cluster_templates.create, 5, - fail_msg, 'creating cluster template', - **self.cl_template) - - fail_msg = 'Failed to get cluster template.' - self.verify(10, self.sahara_client.cluster_templates.get, 6, - fail_msg, 'getting cluster template', cl_template.id) - - fail_msg = 'Failed to list cluster templates.' - self.verify(10, self.sahara_client.cluster_templates.list, 7, - fail_msg, 'listing cluster templates') - - fail_msg = 'Failed to delete cluster template.' - self.verify(10, self.sahara_client.cluster_templates.delete, 8, - fail_msg, 'deleting cluster template', cl_template.id) - - -class HDPTwoTemplatesTest(SaharaTemplatesTest): - _plugin_name = 'ambari' - _hadoop_version = '2.3' - _node_processes = ["Ambari", "YARN Timeline Server", "DataNode", - "MapReduce History Server", "NameNode", "NodeManager", - "Oozie", "ResourceManager", "SecondaryNameNode", - "ZooKeeper"] - - def test_hdp_two_templates(self): - """Sahara test for checking CRUD operations on HDP2 templates - Target component: Sahara - - Scenario: - 1. Create a simple node group template - 2. Get the node group template - 3. List node group templates - 4. Delete the node group template - 5. Create a simple cluster template - 6. Get the cluster template - 7. List cluster templates - 8. Delete the cluster template - - Duration: 80 s. - Available since release: 2015.1.0-8.0 - Deployment tags: Sahara - """ - - fail_msg = 'Failed to create node group template.' - ng_template = self.verify(10, self.client.node_group_templates.create, - 1, fail_msg, 'creating node group template', - **self.ng_template) - - fail_msg = 'Failed to get node group template.' - self.verify(10, self.client.node_group_templates.get, 2, - fail_msg, 'getting node group template', ng_template.id) - - fail_msg = 'Failed to list node group templates.' - self.verify(10, self.client.node_group_templates.list, 3, - fail_msg, 'listing node group templates') - - fail_msg = 'Failed to delete node group template.' - self.verify(10, self.client.node_group_templates.delete, 4, - fail_msg, 'deleting node group template', ng_template.id) - - fail_msg = 'Failed to create cluster template.' - cl_template = self.verify(10, self.client.cluster_templates.create, 5, - fail_msg, 'creating cluster template', - **self.cl_template) - - fail_msg = 'Failed to get cluster template.' - self.verify(10, self.sahara_client.cluster_templates.get, 6, - fail_msg, 'getting cluster template', cl_template.id) - - fail_msg = 'Failed to list cluster templates.' - self.verify(10, self.sahara_client.cluster_templates.list, 7, - fail_msg, 'listing cluster templates') - - fail_msg = 'Failed to delete cluster template.' - self.verify(10, self.sahara_client.cluster_templates.delete, 8, - fail_msg, 'deleting cluster template', cl_template.id) diff --git a/fuel_health/tests/smoke/__init__.py b/fuel_health/tests/smoke/__init__.py deleted file mode 100644 index 2c383fe6..00000000 --- a/fuel_health/tests/smoke/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Main purpose of following attribute is -to supply general information about test set. -This information will be stored in ostf database -in test_sets table. -""" -__profile__ = { - "test_runs_ordering_priority": 2, - "id": "smoke", - "driver": "nose", - "test_path": "fuel_health/tests/smoke", - "cleanup_path": "fuel_health.cleanup", - "description": "Functional tests. Duration 3 min - 14 min", - "exclusive_testsets": ['smoke_platform'] -} diff --git a/fuel_health/tests/smoke/test_create_flavor.py b/fuel_health/tests/smoke/test_create_flavor.py deleted file mode 100644 index 1c082b24..00000000 --- a/fuel_health/tests/smoke/test_create_flavor.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import nmanager - - -class FlavorsAdminTest(nmanager.SmokeChecksTest): - """Tests for flavor creation that require admin privileges.""" - - def test_create_flavor(self): - """Create instance flavor - Target component: Nova - - Scenario: - 1. Create small-size flavor. - 2. Check that created flavor has the expected name. - 3. Check that the flavor disk has the expected size. - 4. Delete created flavor. - Duration: 30 s. - """ - fail_msg = "Flavor was not created properly." - flavor = self.verify(30, self._create_flavors, 1, - fail_msg, - "flavor creation", - self.compute_client, 255, 1) - - msg_s2 = "Flavor name is not the same as requested." - self.verify_response_true( - flavor.name.startswith('ost1_test-flavor'), - 'Step 2 failed: {msg}'.format(msg=msg_s2)) - - msg_s3 = "Disk size is not the same as requested." - self.verify_response_body_value( - body_structure=flavor.disk, - value=1, msg=msg_s3, failed_step=3) - - msg_s4 = "Flavor failed to be deleted." - self.verify(30, self._delete_flavors, 4, msg_s4, - "flavor deletion", self.compute_client, flavor) diff --git a/fuel_health/tests/smoke/test_create_images.py b/fuel_health/tests/smoke/test_create_images.py deleted file mode 100644 index c13ddf9d..00000000 --- a/fuel_health/tests/smoke/test_create_images.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import glancemanager - -LOG = logging.getLogger(__name__) - - -class GlanceSmokeTests(glancemanager.GlanceTest): - """Test suite verifies: - - image creation - - image update - - image deletion - """ - - def test_create_and_delete_image(self): - """Check create, update and delete image actions using Glance v1 - Target component: Glance - - Scenario: - 1.Create image - 2.Checking image status - 3.Check that image was created successfully - 4.Update image with properties - 5.Check that properties was updated successfully - 6.Delete image - - Duration: 130 s. - Deployment tags: disabled - Available since release: 2014.2-6.1 - """ - # TODO(tleontovich) enable when mos LP1527224 fixed - fail_msg = ("Error creating image. Please refer to Openstack logs " - "for more information.") - self.image = self.verify(100, self.image_create, 1, fail_msg, - 'Image creation', self.glance_client_v1) - - fail_msg = ("Image status is incorrect. Please refer to " - "Openstack logs for more information.") - self.verify(200, self.check_image_status, 2, fail_msg, - 'Checking image status', self.glance_client_v1, self.image) - - fail_msg = ("Image doesn't appear at list. Please refer to " - "Openstack logs for more information.") - self.verify(100, self.find_image_by_id, 3, fail_msg, 'Finding image', - self.glance_client_v1, self.image.id) - - group_props = rand_name("ostf_test") - prop = rand_name("ostf-prop") - value_prop = rand_name("prop-value") - - fail_msg = ("Can't update image with properties. Please refer to " - "Openstack logs for more information.") - self.image = self.verify(100, self.update_image, 4, fail_msg, - 'Updating image', self.glance_client_v1, - self.image, group_props, prop, value_prop) - - fail_msg = ("Can't find appended properties. Please refer to " - "OSTF logs for more information.") - self.verify(100, self.find_props, 5, fail_msg, 'Finding properties', - self.glance_client_v1, self.image, group_props, prop, - value_prop) - - fail_msg = ("Can't delete image. Please refer to Openstack logs " - "for more information.") - self.verify(100, self.delete_image, 6, fail_msg, 'Deleting image', - self.glance_client_v1, self.image) - - def test_create_and_delete_image_v2(self): - """Check create, update and delete image actions using Glance v2 - Target component: Glance - - Scenario: - 1.Send request to create image - 2.Checking image status - 3.Check that image was created successfully - 4.Update image with properties - 5.Check that properties was updated successfully - 6.Delete image - - Duration: 70 s. - Available since release: 2014.2-6.1 - """ - fail_msg = ("Error creating image. Please refer to Openstack logs " - "for more information.") - self.image = self.verify(100, self.image_create, 1, fail_msg, - 'Image creation', self.glance_client) - - fail_msg = ("Image status is incorrect. Please refer to " - "Openstack logs for more information.") - self.verify(100, self.check_image_status, 2, fail_msg, - 'Checking image status', self.glance_client, self.image) - - fail_msg = ("Image doesn't appear at list. Please refer to " - "Openstack logs for more information.") - self.verify(100, self.find_image_by_id, 3, fail_msg, 'Finding image', - self.glance_client, self.image.id) - - group_props = rand_name("ostf_test") - prop = rand_name("ostf-prop") - value_prop = rand_name("prop-value") - - fail_msg = ("Can't update image with properties. Please refer to " - "Openstack logs for more information.") - self.image = self.verify(100, self.update_image, 4, fail_msg, - 'Updating image', self.glance_client, - self.image.id, group_props, prop, value_prop) - - fail_msg = ("Can't find appended properties. Please refer to " - "OSTF logs for more information.") - self.verify(100, self.find_props, 5, fail_msg, 'Finding properties', - self.glance_client, self.image, group_props, prop, - value_prop) - - fail_msg = ("Can't delete image. Please refer to Openstack logs " - "for more information.") - self.verify(200, self.delete_image, 6, fail_msg, 'Deleting image', - self.glance_client, self.image.id) diff --git a/fuel_health/tests/smoke/test_create_volume.py b/fuel_health/tests/smoke/test_create_volume.py deleted file mode 100644 index b7437eda..00000000 --- a/fuel_health/tests/smoke/test_create_volume.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health import nmanager - - -LOG = logging.getLogger(__name__) - - -class VolumesTest(nmanager.SmokeChecksTest): - - @classmethod - def setUpClass(cls): - super(VolumesTest, cls).setUpClass() - if cls.manager.clients_initialized: - cls.micro_flavors = cls.find_micro_flavor() - - def setUp(self): - super(VolumesTest, self).setUp() - self.check_clients_state() - if (not self.config.volume.cinder_node_exist - and not self.config.volume.ceph_exist): - self.skipTest('There are no cinder nodes or ' - 'ceph storage for volume') - if not self.config.compute.compute_nodes: - self.skipTest('There are no compute nodes') - self.check_image_exists() - - @classmethod - def tearDownClass(cls): - super(VolumesTest, cls).tearDownClass() - - def _wait_for_volume_status(self, volume, status): - self.status_timeout(self.volume_client.volumes, volume.id, status) - - def _wait_for_instance_status(self, server, status): - self.status_timeout(self.compute_client.servers, server.id, status) - - def test_volume_create(self): - """Create volume and attach it to instance - Target component: Compute - - Scenario: - 1. Create a new small-size volume. - 2. Wait for volume status to become "available". - 3. Check volume has correct name. - 4. Create new instance. - 5. Wait for "Active" status - 6. Attach volume to an instance. - 7. Check volume status is "in use". - 8. Get information on the created volume by its id. - 9. Detach volume from the instance. - 10. Check volume has "available" status. - 11. Delete volume. - 12. Verify that volume deleted - 13. Delete server. - Duration: 350 s. - """ - - msg_s1 = 'Volume was not created.' - az = 'nova' - - # Create volume - volume = self.verify(120, self._create_volume, 1, - msg_s1, - "volume creation", - self.volume_client, availability_zone=az) - - self.verify(200, self._wait_for_volume_status, 2, - msg_s1, - "volume becoming 'available'", - volume, 'available') - - self.verify_response_true( - volume.name.startswith('ostf-test-volume'), - 'Step 3 failed: {msg}'.format(msg=msg_s1)) - - # create instance - instance = self.verify(200, self._create_server, 4, - "Instance creation failed. ", - "server creation", - self.compute_client) - - self.verify(200, self._wait_for_instance_status, 5, - 'Instance status did not become "ACTIVE".', - "instance becoming 'ACTIVE'", - instance, 'ACTIVE') - - # Attach volume - self.verify(120, self._attach_volume_to_instance, 6, - 'Volume couldn`t be attached.', - 'volume attachment', - volume, instance.id) - - self.verify(180, self._wait_for_volume_status, 7, - 'Attached volume status did not become "in-use".', - "volume becoming 'in-use'", - volume, 'in-use') - - # get volume details - self.verify(20, self.volume_client.volumes.get, 8, - "Can not retrieve volume details. ", - "retrieving volume details", volume.id) - - # detach volume - self.verify(50, self._detach_volume, 9, - 'Can not detach volume. ', - "volume detachment", - instance.id, volume.id) - - self.verify(120, self._wait_for_volume_status, 10, - 'Volume status did not become "available".', - "volume becoming 'available'", - volume, 'available') - - self.verify(50, self.volume_client.volumes.delete, 11, - 'Can not delete volume. ', - "volume deletion", - volume) - - self.verify(50, self.verify_volume_deletion, 12, - 'Can not delete volume. ', - "volume deletion", - volume) - - self.verify(60, self._delete_server, 13, - "Can not delete server. ", - "server deletion", - instance) - - def test_create_boot_volume(self): - """Create volume and boot instance from it - Target component: Compute - - Scenario: - 1. Create a new small-size volume from image. - 2. Wait for volume status to become "available". - 3. Launch instance from created volume. - 4. Wait for "Active" status. - 5. Delete instance. - 6. Wait for volume status to become available - 7. Delete volume. - 8. Verify that volume deleted - Duration: 350 s. - """ - fail_msg_step_1 = 'Volume was not created' - az = 'nova' - # Create volume - volume = self.verify(120, self._create_boot_volume, 1, - fail_msg_step_1, - "volume creation", - self.volume_client, availability_zone=az) - - self.verify(200, self._wait_for_volume_status, 2, - fail_msg_step_1, - "volume becoming 'available'", - volume, 'available') - - # create instance - instance = self.verify(200, self.create_instance_from_volume, 3, - "Instance creation failed. ", - "server creation", - self.compute_client, volume) - - self.verify(200, self._wait_for_instance_status, 4, - 'Instance status did not become "ACTIVE".', - "instance becoming 'ACTIVE'", - instance, 'ACTIVE') - - self.verify(60, self._delete_server, 5, - "Can not delete server. ", - "server deletion", - instance) - - self.verify(200, self._wait_for_volume_status, 6, - 'Volume status did not become "available".', - "volume becoming 'available'", - volume, 'available') - - self.verify(50, self.volume_client.volumes.delete, 7, - 'Can not delete volume. ', - "volume deletion", - volume) - - self.verify(50, self.verify_volume_deletion, 8, - 'Can not delete volume. ', - "volume deletion", - volume) diff --git a/fuel_health/tests/smoke/test_dpdk.py b/fuel_health/tests/smoke/test_dpdk.py deleted file mode 100644 index b474f8bd..00000000 --- a/fuel_health/tests/smoke/test_dpdk.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import neutronmanager -from fuel_health import nmanager - -LOG = logging.getLogger(__name__) - - -class TestDPDK(neutronmanager.NeutronBaseTest, nmanager.SmokeChecksTest): - """Test suite verifies: - - blah-blah - """ - - def test_check_dpdk_instance_connectivity(self): - """Check network connectivity from instance with DPDK via floating IP - Target component: Neutron - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create router - 3. Create network - 4. Create subnet - 5. Uplink subnet to router. - 6. Create new flavor with huge pages - 7. Create an instance using the new flavor, security group - in created subnet. Boot it on compute with enabled DPDK. - 8. Create a new floating IP - 9. Assign the new floating IP to the instance. - 10. Check connectivity to the floating IP using ping command. - 11. Check that public IP 8.8.8.8 can be pinged from instance. - 12. Disassociate server floating ip. - 13. Delete floating ip - 14. Delete server. - 15. Delete flavor - 16. Remove router. - 17. Remove subnet - 18. Remove network - Duration: 300 s. - - Deployment tags: computes_with_dpdk - """ - if not self.config.compute.dpdk_compute_fqdn: - self.skipTest('There are no compute nodes with DPDK') - - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-dpdk-') - security_groups = [self.security_groups[self.tenant_id].name] - - router = self.verify(30, self.create_router, 2, - 'Router can not be created', 'Router creation', - name) - - network = self.verify(20, self.create_network, 3, - 'Network can not be created', - 'Network creation', name) - - subnet = self.verify(20, self.create_subnet, 4, - 'Subnet can not be created', - 'Subnet creation', network) - - self.verify(20, self.uplink_subnet_to_router, 5, - 'Can not uplink subnet to router', - 'Uplink subnet to router', router, subnet) - - fail_msg = "Flavor was not created properly." - flavor = self.verify(30, self._create_flavors, 6, - fail_msg, - "flavor creation", - self.compute_client, 256, 1, use_huge_page=True) - - server = self.verify(200, self._create_server, 7, - "Server can not be created.", - "server creation", - self.compute_client, name, security_groups, - net_id=network['id'], flavor_id=flavor, - az_name='nova:{}'.format( - self.config.compute.dpdk_compute_fqdn)) - - floating_ip = self.verify( - 20, - self._create_floating_ip, - 8, - "Floating IP can not be created.", - 'floating IP creation') - - self.verify(20, self._assign_floating_ip_to_instance, - 9, "Floating IP can not be assigned.", - 'floating IP assignment', - self.compute_client, server, floating_ip) - - self.floating_ips.append(floating_ip) - - ip_address = floating_ip.ip - LOG.info('is address is {0}'.format(ip_address)) - LOG.debug(ip_address) - - self.verify(600, self._check_vm_connectivity, 10, - "VM connectivity doesn`t function properly.", - 'VM connectivity checking', ip_address, - 30, (9, 60)) - - self.verify(600, self._check_connectivity_from_vm, - 11, ("Connectivity to 8.8.8.8 from the VM doesn`t " - "function properly."), - 'public connectivity checking from VM', ip_address, - 30, (9, 60)) - - self.verify(20, self.compute_client.servers.remove_floating_ip, - 12, "Floating IP cannot be removed.", - "removing floating IP", server, floating_ip) - - self.verify(20, self.compute_client.floating_ips.delete, - 13, "Floating IP cannot be deleted.", - "floating IP deletion", floating_ip) - - if self.floating_ips: - self.floating_ips.remove(floating_ip) - - self.verify(40, self._delete_server, 14, - "Server can not be deleted. ", - "server deletion", server) - - self.verify(30, self._delete_flavors, 15, - "Flavor failed to be deleted.", - "flavor deletion", self.compute_client, flavor) - - self.verify(40, self._remove_router, 16, "Router can not be deleted", - "router deletion", router, [subnet['id']]) - self.verify(20, self._remove_subnet, 17, "Subnet can not be deleted", - "Subnet deletion", subnet) - self.verify(20, self._remove_network, 18, - "Network can not be deleted", "Network deletion", network) diff --git a/fuel_health/tests/smoke/test_live_migration.py b/fuel_health/tests/smoke/test_live_migration.py deleted file mode 100644 index 6624fbf0..00000000 --- a/fuel_health/tests/smoke/test_live_migration.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import nmanager - -LOG = logging.getLogger(__name__) - - -class TestInstanceLiveMigration(nmanager.NovaNetworkScenarioTest): - """Test suit verifies: - - Instance creation - - Floating ip creation - - Migrate instance - """ - @classmethod - def setUpClass(cls): - super(TestInstanceLiveMigration, cls).setUpClass() - if cls.manager.clients_initialized: - cls.tenant_id = cls.manager._get_identity_client( - cls.config.identity.admin_username, - cls.config.identity.admin_password, - cls.config.identity.admin_tenant_name).tenant_id - cls.keypairs = {} - cls.security_groups = {} - cls.network = [] - cls.servers = [] - cls.floating_ips = [] - - def setUp(self): - super(TestInstanceLiveMigration, self).setUp() - self.check_clients_state() - if not self.config.compute.compute_nodes and \ - self.config.compute.libvirt_type != 'vcenter': - self.skipTest('There are no compute nodes') - if len(self.config.compute.compute_nodes) < 2: - self.skipTest('To test live migration at least' - ' 2 compute nodes are needed') - - def tearDown(self): - super(TestInstanceLiveMigration, self).tearDown() - if self.manager.clients_initialized: - if self.servers: - for server in self.servers: - try: - self._delete_server(server) - self.servers.remove(server) - except Exception: - LOG.exception("Server {0} already \ - deleted.".format(server)) - - def test_001_live_migration(self): - """Instance live migration - Target component: Nova - - Scenario: - 1. Create a new security group. - 2. Create an instance using the new security group. - 3. Assign floating ip - 4. Check instance connectivity by floating ip - 5. Find host to migrate - 6. Migrate instance - 7. Check instance host - 8. Check connectivity to migrated instance by floating ip - 9. Remove floating ip - 10. Delete instance. - Duration: 200 s. - Deployment tags: ephemeral_ceph - Available since release: 2014.2-6.1 - """ - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, - self._create_security_group, - 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-smoke-migrate-') - security_groups = [self.security_groups[self.tenant_id].name] - - server = self.verify( - 200, - self._create_server, - 2, - "Creating instance using the new security group has failed.", - 'image creation', - self.compute_client, name, security_groups - ) - - floating_ip = self.verify( - 20, - self._create_floating_ip, - 3, - "Floating IP can not be created.", - 'floating IP creation') - - self.verify(20, self._assign_floating_ip_to_instance, - 3, "Floating IP can not be assigned.", - 'floating IP assignment', - self.compute_client, server, floating_ip) - - self.floating_ips.append(floating_ip) - - ip_address = floating_ip.ip - LOG.info('is address is {0}'.format(ip_address)) - LOG.debug(ip_address) - - self.verify(600, self._check_vm_connectivity, 4, - "VM connectivity doesn`t function properly.", - 'VM connectivity checking', ip_address, - 30, (9, 60)) - - free_host = self.verify( - 20, - self.get_free_host, - 5, - "Can not find free host for instance migration.", - 'free host for migration', server) - - LOG.debug('Free host for migration is {0}'.format(free_host)) - - migrate_server = self.verify( - 300, - self.migrate_instance, - 6, - "Instance migration failed", 'Instance migration', - server, free_host) - - LOG.debug('Migrated instance {0}'.format(migrate_server)) - - self.verify_response_body_content( - free_host, self.get_instance_host(migrate_server), - msg='Server failed to migrate', - failed_step='7') - - self.verify(600, self._check_vm_connectivity, 8, - "VM connectivity doesn`t function properly.", - 'VM connectivity checking', ip_address, - 30, (9, 60)) - - self.verify(20, self.compute_client.servers.remove_floating_ip, - 9, "Floating IP cannot be removed.", - "removing floating IP", migrate_server, floating_ip) - - self.verify(20, self.compute_client.floating_ips.delete, - 9, "Floating IP cannot be deleted.", - "floating IP deletion", floating_ip) - - if self.floating_ips: - self.floating_ips.remove(floating_ip) - - self.verify(30, self._delete_server, 10, - "Server can not be deleted.", - "server deletion", server) diff --git a/fuel_health/tests/smoke/test_neutron_actions.py b/fuel_health/tests/smoke/test_neutron_actions.py deleted file mode 100644 index 065f2342..00000000 --- a/fuel_health/tests/smoke/test_neutron_actions.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import neutronmanager - -LOG = logging.getLogger(__name__) - - -class TestNeutron(neutronmanager.NeutronBaseTest): - """Test suite verifies: - - router creation - - network creation - - subnet creation - - opportunity to attach network to router - - instance creation in created network - - instance network connectivity - """ - - def test_check_neutron_objects_creation(self): - """Check network connectivity from instance via floating IP - Target component: Neutron - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create router - 3. Create network - 4. Create subnet - 5. Uplink subnet to router. - 6. Create an instance using the new security group - in created subnet. - 7. Create a new floating IP - 8. Assign the new floating IP to the instance. - 9. Check connectivity to the floating IP using ping command. - 10. Check that public IP 8.8.8.8 can be pinged from instance. - 11. Disassociate server floating ip. - 12. Delete floating ip - 13. Delete server. - 14. Remove router. - 15. Remove subnet - 16. Remove network - - Duration: 300 s. - - Deployment tags: neutron, computes_without_dpdk - """ - if not self.config.compute.compute_nodes: - self.skipTest('There are no compute nodes') - - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-smoke-') - security_groups = [self.security_groups[self.tenant_id].name] - - router = self.verify(30, self.create_router, 2, - 'Router can not be created', 'Router creation', - name) - - network = self.verify(20, self.create_network, 3, - 'Network can not be created', - 'Network creation', name) - - subnet = self.verify(20, self.create_subnet, 4, - 'Subnet can not be created', - 'Subnet creation', network) - - self.verify(20, self.uplink_subnet_to_router, 5, - 'Can not uplink subnet to router', - 'Uplink subnet to router', router, subnet) - - server = self.verify(200, self._create_server, 6, - "Server can not be created.", - "server creation", - self.compute_client, name, security_groups, - net_id=network['id']) - - floating_ip = self.verify( - 20, - self._create_floating_ip, - 7, - "Floating IP can not be created.", - 'floating IP creation') - - self.verify(20, self._assign_floating_ip_to_instance, - 8, "Floating IP can not be assigned.", - 'floating IP assignment', - self.compute_client, server, floating_ip) - - self.floating_ips.append(floating_ip) - - ip_address = floating_ip.ip - LOG.info('IP address is {0}'.format(ip_address)) - LOG.debug(ip_address) - - self.verify(600, self._check_vm_connectivity, 9, - "VM connectivity doesn`t function properly.", - 'VM connectivity checking', ip_address, - 30, (9, 60)) - - self.verify(600, self._check_connectivity_from_vm, - 10, ("Connectivity to 8.8.8.8 from the VM doesn`t " - "function properly."), - 'public connectivity checking from VM', ip_address, - 30, (9, 60)) - - self.verify(20, self.compute_client.servers.remove_floating_ip, - 11, "Floating IP cannot be removed.", - "removing floating IP", server, floating_ip) - - self.verify(20, self.compute_client.floating_ips.delete, - 12, "Floating IP cannot be deleted.", - "floating IP deletion", floating_ip) - - if self.floating_ips: - self.floating_ips.remove(floating_ip) - - self.verify(40, self._delete_server, 13, - "Server can not be deleted. ", - "server deletion", server) - - self.verify(40, self._remove_router, 14, "Router can not be deleted", - "router deletion", router, [subnet['id']]) - self.verify(20, self._remove_subnet, 15, "Subnet can not be deleted", - "Subnet deletion", subnet) - self.verify(20, self._remove_network, 16, - "Network can not be deleted", "Network deletion", network) - - def test_check_sriov_instance_connectivity(self): - """Check network connectivity from SRIOV instance via floating IP - Target component: Neutron - - Scenario: - 1. Create a new security group (if it doesn't exist yet). - 2. Create SR-IOV port - 3. Create an instance using new security group and SR-IOV port. - 4. Create new floating IP - 5. Assign created floating IP to the instance. - 6. Check connectivity to the floating IP using ping command. - 7. Check that public IP 8.8.8.8 can be pinged from instance. - 8. Disassociate server floating ip. - 9. Delete floating ip - 10. Delete server. - 11. Delete SR-IOV port - Duration: 300 s. - - Deployment tags: sriov - Available since release: mitaka-9.0 - """ - if 'physnet2' not in self.config.compute.sriov_physnets: - self.skipTest('physnet2 is not configured for any interface') - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-sriov-') - security_groups = [self.security_groups[self.tenant_id].name] - - network = [net.id for net in - self.compute_client.networks.list() - if net.label == self.private_net] - - port = self.verify( - 20, - self._create_port, - 2, - "SRIOV port can not be created.", - 'SRIOV port creation', - net_id=network[0], vnic_type='direct') - - server = self.verify(250, self._create_server, 3, - "Server can not be created.", - "server creation", - self.compute_client, name, security_groups, - port=port, net_id=network[0]) - - floating_ip = self.verify( - 20, - self._create_floating_ip, - 4, - "Floating IP can not be created.", - 'floating IP creation') - - self.verify(20, self._assign_floating_ip_to_instance, - 5, "Floating IP can not be assigned.", - 'floating IP assignment', - self.compute_client, server, floating_ip) - - self.floating_ips.append(floating_ip) - - ip_address = floating_ip.ip - LOG.info('IP address is {0}'.format(ip_address)) - LOG.debug(ip_address) - - self.verify(600, self._check_vm_connectivity, 6, - "VM connectivity doesn`t function properly.", - 'VM connectivity checking', ip_address, - 30, (5, 10)) - - self.verify(600, self._check_connectivity_from_vm, - 7, ("Connectivity to 8.8.8.8 from the VM doesn`t " - "function properly."), - 'public connectivity checking from VM', ip_address, - 30, (5, 10)) - - self.verify(20, self.compute_client.servers.remove_floating_ip, - 8, "Floating IP cannot be removed.", - "removing floating IP", server, floating_ip) - - self.verify(20, self.compute_client.floating_ips.delete, - 9, "Floating IP cannot be deleted.", - "floating IP deletion", floating_ip) - - self.verify(30, self._delete_server, 10, - "Server can not be deleted. ", - "server deletion", server) - - self.verify(30, self.neutron_client.delete_port, 11, - "Port can not be deleted. ", - "port deletion", port['port']['id']) diff --git a/fuel_health/tests/smoke/test_nova_create_instance_with_connectivity.py b/fuel_health/tests/smoke/test_nova_create_instance_with_connectivity.py deleted file mode 100644 index 1c244e6c..00000000 --- a/fuel_health/tests/smoke/test_nova_create_instance_with_connectivity.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright 2013 OpenStack, LLC -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import nmanager - -LOG = logging.getLogger(__name__) - - -class TestNovaNetwork(nmanager.NovaNetworkScenarioTest): - """Test suit verifies: - - keypairs creation - - security groups creation - - Network creation - - Instance creation - - Floating ip creation - - Instance connectivity by floating IP - """ - @classmethod - def setUpClass(cls): - super(TestNovaNetwork, cls).setUpClass() - if cls.manager.clients_initialized: - cls.tenant_id = cls.manager._get_identity_client( - cls.config.identity.admin_username, - cls.config.identity.admin_password, - cls.config.identity.admin_tenant_name).tenant_id - cls.keypairs = {} - cls.security_groups = {} - cls.network = [] - cls.servers = [] - cls.floating_ips = [] - - def setUp(self): - super(TestNovaNetwork, self).setUp() - self.check_clients_state() - if not self.config.compute.compute_nodes: - self.skipTest('There are no compute nodes') - - def tearDown(self): - super(TestNovaNetwork, self).tearDown() - if self.manager.clients_initialized: - if self.servers: - for server in self.servers: - try: - self._delete_server(server) - self.servers.remove(server) - except Exception: - LOG.exception("Server {0} was already \ - deleted.".format(server)) - - def test_001_create_keypairs(self): - """Create keypair - Target component: Nova. - - Scenario: - 1. Create a new keypair, check if it was created successfully. - Duration: 25 s. - - """ - self.keypairs[self.tenant_id] = self.verify(30, - self._create_keypair, - 1, - 'Keypair can not be' - ' created.', - 'keypair creation', - self.compute_client) - - def test_002_create_security_groups(self): - """Create security group - Target component: Nova - - Scenario: - 1. Create a security group, check if it was created correctly. - Duration: 25 s. - - """ - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - def test_003_check_networks(self): - """Check network parameters - Target component: Nova - - Scenario: - 1. Get the list of networks. - 2. Confirm that networks have expected labels. - 3. Confirm that networks have expected ids. - Duration: 50 s. - - """ - seen_nets = self.verify( - 50, - self._list_networks, - 1, - "List of networks is not available.", - 'listing networks' - ) - seen_labels, seen_ids = zip(*((n.label, n.id) for n in seen_nets)) - for mynet in self.network: - self.verify_response_body(seen_labels, mynet.label, - ('Network can not be created.' - 'properly'), failed_step=2) - self.verify_response_body(seen_ids, mynet.id, - ('Network can not be created.' - ' properly '), failed_step=3) - - def test_004_create_servers(self): - """Launch instance - Target component: Nova - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create an instance using the new security group. - 3. Delete instance. - Duration: 200 s. - - """ - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, - self._create_security_group, - 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-smoke-') - security_groups = [self.security_groups[self.tenant_id].name] - - server = self.verify( - 200, - self._create_server, - 2, - "Creating instance using the new security group has failed.", - 'image creation', - self.compute_client, name, security_groups - ) - - self.verify(60, self._delete_server, 3, - "Server can not be deleted.", - "server deletion", server) - - def test_008_check_public_instance_connectivity_from_instance(self): - """Check network connectivity from instance via floating IP - Target component: Nova - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create an instance using the new security group. - 3. Create a new floating IP - 4. Assign the new floating IP to the instance. - 5. Check connectivity to the floating IP using ping command. - 6. Check that public IP 8.8.8.8 can be pinged from instance. - 7. Disassociate server floating ip. - 8. Delete floating ip - 9. Delete server. - Duration: 300 s. - - Deployment tags: nova_network - """ - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-smoke-') - security_groups = [self.security_groups[self.tenant_id].name] - - server = self.verify(250, self._create_server, 2, - "Server can not be created.", - "server creation", - self.compute_client, name, security_groups) - - floating_ip = self.verify( - 20, - self._create_floating_ip, - 3, - "Floating IP can not be created.", - 'floating IP creation') - - self.verify(20, self._assign_floating_ip_to_instance, - 4, "Floating IP can not be assigned.", - 'floating IP assignment', - self.compute_client, server, floating_ip) - - self.floating_ips.append(floating_ip) - - ip_address = floating_ip.ip - LOG.info('is address is {0}'.format(ip_address)) - LOG.debug(ip_address) - - self.verify(600, self._check_vm_connectivity, 5, - "VM connectivity doesn`t function properly.", - 'VM connectivity checking', ip_address, - 30, (9, 60)) - - self.verify(600, self._check_connectivity_from_vm, - 6, ("Connectivity to 8.8.8.8 from the VM doesn`t " - "function properly."), - 'public connectivity checking from VM', ip_address, - 30, (9, 60)) - - self.verify(20, self.compute_client.servers.remove_floating_ip, - 7, "Floating IP cannot be removed.", - "removing floating IP", server, floating_ip) - - self.verify(20, self.compute_client.floating_ips.delete, - 8, "Floating IP cannot be deleted.", - "floating IP deletion", floating_ip) - - if self.floating_ips: - self.floating_ips.remove(floating_ip) - - self.verify(60, self._delete_server, 9, - "Server can not be deleted. ", - "server deletion", server) - - def test_006_check_internet_connectivity_instance_without_floatingIP(self): - """Check network connectivity from instance without floating IP - Target component: Nova - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create an instance using the new security group. - (if it doesn`t exist yet). - 3. Check that public IP 8.8.8.8 can be pinged from instance. - 4. Delete server. - Duration: 300 s. - - Deployment tags: nova_network - """ - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', self.compute_client) - - name = rand_name('ost1_test-server-smoke-') - security_groups = [self.security_groups[self.tenant_id].name] - - server = self.verify( - 250, self._create_server, 2, - "Server can not be created.", - 'server creation', - self.compute_client, name, security_groups) - - try: - for addr in server.addresses: - if addr.startswith('novanetwork'): - instance_ip = server.addresses[addr][0]['addr'] - if not self.config.compute.use_vcenter: - compute = getattr(server, 'OS-EXT-SRV-ATTR:host') - else: - compute = None - except Exception: - LOG.exception("Unable to get instance details") - self.fail("Step 3 failed: cannot get instance details. " - "Please refer to OpenStack logs for more details.") - - self.verify(600, self._check_connectivity_from_vm, - 3, ("Connectivity to 8.8.8.8 from the VM doesn`t " - "function properly."), - 'public connectivity checking from VM', - instance_ip, 30, (9, 30), compute) - - self.verify(60, self._delete_server, 4, - "Server can not be deleted. ", - "server deletion", server) - - def test_009_create_server_with_file(self): - """Launch instance with file injection - Target component: Nova - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create an instance with injected file. - 3. Assign floating ip to instance. - 4. Check file exists on created instance. - 5. Delete floating ip. - 6. Delete instance. - Duration: 200 s. - Deployment tags: computes_without_dpdk - Available since release: 2014.2-6.1 - """ - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, - self._create_security_group, - 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-smoke-file_inj-') - security_groups = [self.security_groups[self.tenant_id].name] - - data_file = {"/home/cirros/server.txt": self._load_file('server.txt')} - server = self.verify( - 300, - self._create_server, - 2, - "Creating instance using the new security group has failed.", - 'instance creation', - self.compute_client, name, security_groups, data_file=data_file - ) - - floating_ip = self.verify( - 20, - self._create_floating_ip, - 3, - "Floating IP can not be created.", - 'floating IP creation') - - self.verify(20, self._assign_floating_ip_to_instance, - 3, "Floating IP can not be assigned.", - 'floating IP assignment', - self.compute_client, server, floating_ip) - - self.floating_ips.append(floating_ip) - - ip_address = floating_ip.ip - - self.verify( - 600, self._run_command_from_vm, - 4, "Can not find injected file on instance.", - 'check if injected file exists', ip_address, - 30, (9, 30), - '[ -f /home/cirros/server.txt ] && echo "True" || echo "False"') - - self.verify(20, self.compute_client.servers.remove_floating_ip, - 5, "Floating IP cannot be removed.", - "removing floating IP", server, floating_ip) - - self.verify(20, self.compute_client.floating_ips.delete, - 5, "Floating IP cannot be deleted.", - "floating IP deletion", floating_ip) - - if self.floating_ips: - self.floating_ips.remove(floating_ip) - - self.verify(60, self._delete_server, 6, - "Server can not be deleted. ", - "server deletion", server) diff --git a/fuel_health/tests/smoke/test_nova_image_actions.py b/fuel_health/tests/smoke/test_nova_image_actions.py deleted file mode 100644 index aba8cbe5..00000000 --- a/fuel_health/tests/smoke/test_nova_image_actions.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import nmanager -from fuel_health import test - - -LOG = logging.getLogger(__name__) - - -class TestImageAction(nmanager.SmokeChecksTest): - """Test class verifies the following: - - verify that image can be created; - - verify that instance can be booted from created image; - - verify that snapshot can be created from an instance; - - verify that instance can be booted from a snapshot. - """ - @classmethod - def setUpClass(cls): - super(TestImageAction, cls).setUpClass() - if cls.manager.clients_initialized: - cls.micro_flavors = cls.find_micro_flavor() - - @classmethod - def tearDownClass(cls): - super(TestImageAction, cls).tearDownClass() - - def setUp(self): - super(TestImageAction, self).setUp() - self.check_clients_state() - if not self.config.compute.compute_nodes: - self.skipTest('There are no compute nodes') - self.check_image_exists() - - def _wait_for_server_status(self, server, status): - self.status_timeout(self.compute_client.servers, - server.id, - status) - - def _wait_for_image_status(self, image_id, status): - self.status_timeout(self.compute_client.images, image_id, status) - - def _wait_for_server_deletion(self, server): - def is_deletion_complete(): - # Deletion testing is only required for objects whose - # existence cannot be checked via retrieval. - if isinstance(server, dict): - return True - try: - server.get() - except Exception as e: - # Clients are expected to return an exception - # called 'NotFound' if retrieval fails. - if e.__class__.__name__ == 'NotFound': - return True - self.error_msg.append(e) - LOG.exception(e) - return False - - # Block until resource deletion has completed or timed-out - test.call_until_true(is_deletion_complete, 10, 1) - - def _boot_image(self, image_id): - if not self.find_micro_flavor(): - self.fail("Flavor for tests was not created. Seems that " - "something is wrong with nova services.") - else: - flavor_id = self.micro_flavors[0] - disk = self.glance_client_v1.images.get(image_id).disk_format - if disk == 'vmdk': - az_name = 'vcenter' - else: - az_name = 'nova' - name = rand_name('ost1_test-image') - client = self.compute_client - LOG.debug("name:%s, image:%s" % (name, image_id)) - if 'neutron' in self.config.network.network_provider: - network = [net.id for net in - self.compute_client.networks.list() - if net.label == self.private_net] - if network: - create_kwargs = { - 'nics': [ - {'net-id': network[0]}, - ], - } - else: - self.fail("Default private network '{0}' isn't present. " - "Please verify it is properly created.". - format(self.private_net)) - server = client.servers.create(name=name, - image=image_id, - flavor=flavor_id, - availability_zone=az_name, - **create_kwargs) - else: - server = client.servers.create(name=name, - image=image_id, - flavor=flavor_id, - availability_zone=az_name) - self.set_resource(name, server) - self.verify_response_body_content( - name, server.name, - msg="Please refer to OpenStack logs for more details.") - self._wait_for_server_status(server, 'ACTIVE') - server = client.servers.get(server) # getting network information - LOG.debug("server:%s" % server) - return server - - def _create_image(self, server): - snapshot_name = rand_name('ost1_test-snapshot-') - create_image_client = self.compute_client.servers.create_image - image_id = create_image_client(server, snapshot_name) - self.addCleanup(self.compute_client.images.delete, image_id) - self._wait_for_server_status(server, 'ACTIVE') - self._wait_for_image_status(image_id, 'ACTIVE') - snapshot_image = self.compute_client.images.get(image_id) - self.verify_response_body_content( - snapshot_name, snapshot_image.name, - msg="Please refer to OpenStack logs for more details.") - return image_id - - def test_snapshot(self): - """Launch instance, create snapshot, launch instance from snapshot - Target component: Glance - - Scenario: - 1. Get existing image by name. - 2. Launch an instance using the default image. - 3. Make snapshot of the created instance. - 4. Delete the instance created in step 1. - 5. Wait while instance deleted - 6. Launch another instance from the snapshot created in step 2. - 7. Delete server. - Duration: 300 s. - """ - - image = self.verify(30, self.get_image_from_name, 1, - "Image can not be retrieved.", - "getting image by name") - - server = self.verify(180, self._boot_image, 2, - "Image can not be booted.", - "image booting", - image) - - # snapshot the instance - snapshot_image_id = self.verify(180, self._create_image, 3, - "Snapshot of an" - " instance can not be created.", - 'snapshotting an instance', - server) - - self.verify(180, self.compute_client.servers.delete, 4, - "Instance can not be deleted.", - 'Instance deletion', - server) - - self.verify(180, self._wait_for_server_deletion, 5, - "Instance can not be deleted.", - 'Wait for instance deletion complete', - server) - - server = self.verify(180, self._boot_image, 6, - "Instance can not be launched from snapshot.", - 'booting instance from snapshot', - snapshot_image_id) - - self.verify(60, self._delete_server, 7, - "Server can not be deleted.", - "server deletion", server) diff --git a/fuel_health/tests/smoke/test_user_create.py b/fuel_health/tests/smoke/test_user_create.py deleted file mode 100644 index 5f99967e..00000000 --- a/fuel_health/tests/smoke/test_user_create.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import requests - -from fuel_health import nmanager - -LOG = logging.getLogger(__name__) - - -class TestUserTenantRole(nmanager.SmokeChecksTest): - """Test class verifies the following: - - verify that a tenant can be created; - - verify that a user can be created based on the new tenant; - - verify that a user role can be created. - """ - - def test_create_user(self): - """Create user and authenticate with it. - Target components: Nova, Keystone - - Scenario: - 1. Create a new tenant. - 2. Check that tenant was created successfully. - 3. Create a new user. - 4. Check that user was created successfully. - 5. Create a new user role. - 6. Check that user role was created successfully. - 7. Perform token authentication. - 8. Check that authentication was successful. - 9. Send authentication request to Horizon. - 10. Confirm that response status is 200. - Duration: 80 s. - """ - # Create a tenant: - msg_s1 = 'Tenant can not be created. ' - - tenant = self.verify(20, self._create_tenant, 1, - msg_s1, 'tenant creation', self.identity_client) - - self.verify_response_true( - tenant.name.startswith('ost1_test'), - "Step 2 failed: {msg}".format(msg=msg_s1)) - - # Create a user: - msg_s3 = "User can not be created." - - user = self.verify(20, self._create_user, 3, msg_s3, - 'user creation', self.identity_client, - tenant.id) - - self.verify_response_true( - user.name.startswith('ost1_test'), - 'Step 4 failed: {msg}'.format(msg=msg_s3)) - - msg_s5 = "User role can not be created. " - - role = self.verify(20, self._create_role, - 5, msg_s5, - 'user role creation', - self.identity_client) - - self.verify_response_true( - role.name.startswith('ost1_test'), - "Step 6 failed: {msg}".format(msg=msg_s5)) - - # Authenticate with created user: - password = '123456' - msg_s7 = "Can not get authentication token." - - auth = self.verify(40, self.identity_client.tokens.authenticate, - 7, msg_s7, - 'authentication', - username=user.name, - password=password, - tenant_id=tenant.id, - tenant_name=tenant.name) - - self.verify_response_true(auth, - 'Step 8 failed: {msg}'.format(msg=msg_s7)) - try: - # Auth in horizon with non-admin user - client = requests.session() - if self.config.compute.deployment_os == 'Ubuntu': - url = self.config.horizon_ubuntu_url - else: - url = self.config.horizon_url - - # Retrieve the CSRF token first - client.get(url, verify=False) # sets cookie - if not len(client.cookies): - login_data = dict(username=user.name, - password=password, - next='/') - resp = client.post(url, data=login_data, - headers=dict(Referer=url), verify=False) - self.verify_response_status( - resp.status_code, - msg="Check that the request was successful. " - "Please refer to OpenStack logs for more details.", - failed_step=9) - else: - login_data = dict(username=user.name, - password=password, - next='/') - csrftoken = client.cookies.get('csrftoken', None) - if csrftoken: - login_data['csrfmiddlewaretoken'] = csrftoken - - resp = client.post(url, data=login_data, - headers=dict(Referer=url), verify=False) - self.verify_response_status( - resp.status_code, - msg="Check that the request was successful. " - "Please, refer to OpenStack " - "logs for more details.", - failed_step=9) - except Exception: - LOG.exception("") - self.fail("Step 10 failed: Can not authenticate in Horizon. " - "Please refer to OpenStack logs for more details.") diff --git a/fuel_health/tests/smoke/test_vcenter.py b/fuel_health/tests/smoke/test_vcenter.py deleted file mode 100644 index 015f7dec..00000000 --- a/fuel_health/tests/smoke/test_vcenter.py +++ /dev/null @@ -1,576 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import nmanager -from fuel_health import test - -LOG = logging.getLogger(__name__) - - -class TestVcenter(nmanager.NovaNetworkScenarioTest): - """Test suit verifies: - - Instance creation - - Floating ip creation - - Instance connectivity by floating IP - """ - @classmethod - def setUpClass(cls): - super(TestVcenter, cls).setUpClass() - if cls.manager.clients_initialized: - cls.tenant_id = cls.manager._get_identity_client( - cls.config.identity.admin_username, - cls.config.identity.admin_password, - cls.config.identity.admin_tenant_name).tenant_id - cls.keypairs = {} - cls.security_groups = {} - cls.network = [] - cls.servers = [] - cls.floating_ips = [] - - def setUp(self): - super(TestVcenter, self).setUp() - self.check_clients_state() - - def tearDown(self): - super(TestVcenter, self).tearDown() - if self.manager.clients_initialized: - if self.servers: - for server in self.servers: - try: - self._delete_server(server) - self.servers.remove(server) - except Exception: - LOG.exception("Server was already deleted.") - - @classmethod - def find_flavor_id(cls): - flavors = dict([flavor.ram, flavor.id] - for flavor in cls.compute_client.flavors.list() - if flavor.ram >= 128) - return flavors[sorted(flavors)[0]] - - def test_1_vcenter_create_servers(self): - """vCenter: Launch instance - Target component: Nova - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create an instance using the new security group. - 3. Delete instance. - - Duration: 200 s. - Available since release: 2014.2-6.1 - Deployment tags: use_vcenter - """ - img_name = 'TestVM-VMDK' - self.manager.config.compute.image_name = img_name - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, - self._create_security_group, - 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-smoke-') - security_groups = [self.security_groups[self.tenant_id].name] - flavor_id = self.find_flavor_id() - - server = self.verify( - 200, - self._create_server, - 2, - "Creating instance using the new security group has failed.", - 'image creation', - self.compute_client, name, security_groups, flavor_id, None, - img_name) - - self.verify(30, self._delete_server, 3, - "Server can not be deleted.", - "server deletion", server) - - def test_3_vcenter_check_public_instance_connectivity_from_instance(self): - """vCenter: Check network connectivity from instance via floating IP - Target component: Nova - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create an instance using the new security group. - 3. Create a new floating IP - 4. Assign the new floating IP to the instance. - 5. Check connectivity to the floating IP using ping command. - 6. Check that public IP 8.8.8.8 can be pinged from instance. - 7. Disassociate server floating ip. - 8. Delete floating ip - 9. Delete server. - - Duration: 300 s. - Available since release: 2014.2-6.1 - Deployment tags: use_vcenter - """ - img_name = 'TestVM-VMDK' - self.manager.config.compute.image_name = img_name - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', - self.compute_client) - - name = rand_name('ost1_test-server-smoke-') - security_groups = [self.security_groups[self.tenant_id].name] - flavor_id = self.find_flavor_id() - - server = self.verify(250, self._create_server, 2, - "Server can not be created.", - "server creation", - self.compute_client, name, security_groups, - flavor_id, None, img_name) - - floating_ip = self.verify( - 20, - self._create_floating_ip, - 3, - "Floating IP can not be created.", - 'floating IP creation') - - self.verify(20, self._assign_floating_ip_to_instance, - 4, "Floating IP can not be assigned.", - 'floating IP assignment', - self.compute_client, server, floating_ip) - - self.floating_ips.append(floating_ip) - - ip_address = floating_ip.ip - LOG.info('is address is {0}'.format(ip_address)) - LOG.debug(ip_address) - - self.verify(600, self._check_vm_connectivity, 5, - "VM connectivity doesn`t function properly.", - 'VM connectivity checking', ip_address, - 30, (6, 60)) - - self.verify(600, self._check_connectivity_from_vm, - 6, ("Connectivity to 8.8.8.8 from the VM doesn`t " - "function properly."), - 'public connectivity checking from VM', ip_address, - 30, (6, 60)) - - self.verify(20, self.compute_client.servers.remove_floating_ip, - 7, "Floating IP cannot be removed.", - "removing floating IP", server, floating_ip) - - self.verify(20, self.compute_client.floating_ips.delete, - 8, "Floating IP cannot be deleted.", - "floating IP deletion", floating_ip) - - if self.floating_ips: - self.floating_ips.remove(floating_ip) - - self.verify(30, self._delete_server, 9, - "Server can not be deleted. ", - "server deletion", server) - - def test_2_vcenter_check_internet_connectivity_without_floatingIP(self): - """vCenter: Check network connectivity from instance without floating \ - IP - Target component: Nova - - Scenario: - 1. Create a new security group (if it doesn`t exist yet). - 2. Create an instance using the new security group. - (if it doesn`t exist yet). - 3. Check that public IP 8.8.8.8 can be pinged from instance. - 4. Delete server. - - Duration: 300 s. - Available since release: 2014.2-6.1 - Deployment tags: nova_network, use_vcenter - """ - self.check_image_exists() - if not self.security_groups: - self.security_groups[self.tenant_id] = self.verify( - 25, self._create_security_group, 1, - "Security group can not be created.", - 'security group creation', self.compute_client) - - name = rand_name('ost1_test-server-smoke-') - security_groups = [self.security_groups[self.tenant_id].name] - img_name = 'TestVM-VMDK' - compute = None - - server = self.verify( - 250, self._create_server, 2, - "Server can not be created.", - 'server creation', - self.compute_client, name, security_groups, None, None, img_name) - - try: - for addr in server.addresses: - if addr.startswith('novanetwork'): - instance_ip = server.addresses[addr][0]['addr'] - except Exception: - LOG.exception("") - self.fail("Step 3 failed: cannot get instance details. " - "Please refer to OpenStack logs for more details.") - - self.verify(400, self._check_connectivity_from_vm, - 3, ("Connectivity to 8.8.8.8 from the VM doesn`t " - "function properly."), - 'public connectivity checking from VM', - instance_ip, 30, (6, 30), compute) - - self.verify(30, self._delete_server, 4, - "Server can not be deleted. ", - "server deletion", server) - - -class TestVcenterImageAction(nmanager.SmokeChecksTest): - """Test class verifies the following: - - verify that image can be created; - - verify that instance can be booted from created image; - - verify that snapshot can be created from an instance; - - verify that instance can be booted from a snapshot. - """ - @classmethod - def setUpClass(cls): - super(TestVcenterImageAction, cls).setUpClass() - if cls.manager.clients_initialized: - cls.micro_flavors = cls.find_micro_flavor() - - @classmethod - def tearDownClass(cls): - super(TestVcenterImageAction, cls).tearDownClass() - - def setUp(self): - super(TestVcenterImageAction, self).setUp() - self.check_clients_state() - - def _wait_for_server_status(self, server, status): - self.status_timeout(self.compute_client.servers, - server.id, - status) - - def _wait_for_image_status(self, image_id, status): - self.status_timeout(self.compute_client.images, image_id, status) - - def _wait_for_server_deletion(self, server): - def is_deletion_complete(): - # Deletion testing is only required for objects whose - # existence cannot be checked via retrieval. - if isinstance(server, dict): - return True - try: - server.get() - except Exception as e: - # Clients are expected to return an exception - # called 'NotFound' if retrieval fails. - if e.__class__.__name__ == 'NotFound': - return True - self.error_msg.append(e) - LOG.exception("") - return False - - # Block until resource deletion has completed or timed-out - test.call_until_true(is_deletion_complete, 10, 1) - - def _boot_image(self, image_id, flavor_id): - - name = rand_name('ost1_test-image') - client = self.compute_client - LOG.debug("name:%s, image:%s" % (name, image_id)) - if 'neutron' in self.config.network.network_provider: - network = [net.id for net in - self.compute_client.networks.list() - if net.label == self.private_net] - if network: - create_kwargs = { - 'nics': [ - {'net-id': network[0]}, - ], - } - else: - self.fail("Default private network '{0}' isn't present. " - "Please verify it is properly created.". - format(self.private_net)) - server = client.servers.create(name=name, - image=image_id, - flavor=flavor_id, - **create_kwargs) - else: - server = client.servers.create(name=name, - image=image_id, - flavor=self.micro_flavors[0]) - self.set_resource(name, server) - # self.addCleanup(self.compute_client.servers.delete, server) - self.verify_response_body_content( - name, server.name, - msg="Please refer to OpenStack logs for more details.") - self._wait_for_server_status(server, 'ACTIVE') - server = client.servers.get(server) # getting network information - LOG.debug("server:%s" % server) - return server - - def _create_image(self, server): - snapshot_name = rand_name('ost1_test-snapshot-') - create_image_client = self.compute_client.servers.create_image - image_id = create_image_client(server, snapshot_name) - self.addCleanup(self.compute_client.images.delete, image_id) - self._wait_for_server_status(server, 'ACTIVE') - self._wait_for_image_status(image_id, 'ACTIVE') - snapshot_image = self.compute_client.images.get(image_id) - self.verify_response_body_content( - snapshot_name, snapshot_image.name, - msg="Please refer to OpenStack logs for more details.") - return image_id - - def test_4_snapshot(self): - """vCenter: Launch instance, create snapshot, launch instance from \ - snapshot - Target component: Glance - - Scenario: - 1. Create flavor. - 1. Get existing image by name. - 2. Launch an instance using the default image. - 3. Make snapshot of the created instance. - 4. Delete the instance created in step 1. - 5. Wait while instance deleted - 6. Launch another instance from the snapshot created in step 2. - 7. Delete server. - 9. Delete flavor. - - Duration: 300 s. - Available since release: 2014.2-6.1 - Deployment tags: use_vcenter - """ - - img_name = 'TestVM-VMDK' - self.manager.config.compute.image_name = img_name - self.check_image_exists() - - fail_msg = "Flavor was not created properly." - flavor = self.verify(30, self._create_flavors, 1, - fail_msg, - "flavor creation", - self.compute_client, 256, 0) - - image = self.verify(30, self.get_image_from_name, 2, - "Image can not be retrieved.", - "getting image by name", - img_name) - - server = self.verify(180, self._boot_image, 3, - "Image can not be booted.", - "image booting", - image, flavor.id) - - # snapshot the instance - snapshot_image_id = self.verify(700, self._create_image, 4, - "Snapshot of an" - " instance can not be created.", - 'snapshotting an instance', - server) - - self.verify(180, self.compute_client.servers.delete, 5, - "Instance can not be deleted.", - 'Instance deletion', - server) - - self.verify(180, self._wait_for_server_deletion, 6, - "Instance can not be deleted.", - 'Wait for instance deletion complete', - server) - - server = self.verify(700, self._boot_image, 7, - "Instance can not be launched from snapshot.", - 'booting instance from snapshot', - snapshot_image_id, flavor.id) - - self.verify(30, self._delete_server, 8, - "Server can not be deleted.", - "server deletion", server) - - msg = "Flavor failed to be deleted." - self.verify(30, self._delete_flavors, 9, msg, - "flavor deletion", self.compute_client, flavor) - - -class VcenterVolumesTest(nmanager.SmokeChecksTest): - - @classmethod - def setUpClass(cls): - super(VcenterVolumesTest, cls).setUpClass() - if cls.manager.clients_initialized: - cls.micro_flavors = cls.find_micro_flavor() - - def setUp(self): - super(VcenterVolumesTest, self).setUp() - self.check_clients_state() - if (not self.config.volume.cinder_vmware_node_exist): - self.skipTest('There are no cinder-vmware nodes') - - @classmethod - def tearDownClass(cls): - super(VcenterVolumesTest, cls).tearDownClass() - - @classmethod - def find_flavor_id(cls): - flavors = dict([flavor.ram, flavor.id] - for flavor in cls.compute_client.flavors.list() - if flavor.ram >= 128) - return flavors[sorted(flavors)[0]] - - def _wait_for_volume_status(self, volume, status): - self.status_timeout(self.volume_client.volumes, volume.id, status) - - def _wait_for_instance_status(self, server, status): - self.status_timeout(self.compute_client.servers, server.id, status) - - def _create_server(self, client, img_name=None): - flavor_id = self.find_flavor_id() - if not flavor_id: - self.fail("Flavor for tests was not found. Seems that " - "something is wrong with nova services.") - - name = rand_name('ost1_test-volume-instance') - - base_image_id = self.get_image_from_name(img_name=img_name) - az_name = self.get_availability_zone(image_id=base_image_id) - - if 'neutron' in self.config.network.network_provider: - network = [net.id for net in - self.compute_client.networks.list() - if net.label == self.private_net] - if network: - create_kwargs = {'nics': [{'net-id': network[0]}]} - else: - self.fail("Default private network '{0}' isn't present. " - "Please verify it is properly created.". - format(self.private_net)) - server = client.servers.create( - name, base_image_id, flavor_id, - availability_zone=az_name, - **create_kwargs) - else: - server = client.servers.create(name, base_image_id, - self.micro_flavors[0].id, - availability_zone=az_name) - - self.verify_response_body_content(server.name, - name, - "Instance creation failed") - # The instance retrieved on creation is missing network - # details, necessitating retrieval after it becomes active to - # ensure correct details. - server = self._wait_server_param(client, server, 'addresses', 5, 1) - self.set_resource(name, server) - return server - - def test_5_vcenter_volume_create(self): - """vCenter: Create volume and attach it to instance - Target component: Compute - - Scenario: - 1. Create a new small-size volume. - 2. Wait for volume status to become "available". - 3. Create new instance. - 4. Wait for "Active" status - 5. Attach volume to an instance. - 6. Check volume status is "in use". - 7. Get information on the created volume by its id. - 8. Detach volume from the instance. - 9. Check volume has "available" status. - 10. Delete volume. - 11. Verify that volume deleted - 12. Delete server. - - Duration: 350 s. - Available since release: 2014.2-6.1 - Deployment tags: use_vcenter - """ - msg_s1 = 'Volume was not created.' - img_name = 'TestVM-VMDK' - self.manager.config.compute.image_name = img_name - self.check_image_exists() - az = self.config.volume.cinder_vmware_storage_az - # Create volume - volume = self.verify(120, self._create_volume, 1, - msg_s1, - "volume creation", - self.volume_client, None, availability_zone=az) - - self.verify(200, self._wait_for_volume_status, 2, - msg_s1, - "volume becoming 'available'", - volume, 'available') - - # create instance - instance = self.verify(200, self._create_server, 3, - "Instance creation failed. ", - "server creation", - self.compute_client, img_name) - - self.verify(200, self._wait_for_instance_status, 4, - 'Instance status did not become "available".', - "instance becoming 'available'", - instance, 'ACTIVE') - - # Attach volume - self.verify(120, self._attach_volume_to_instance, 5, - 'Volume couldn`t be attached.', - 'volume attachment', - volume, instance.id) - - self.verify(180, self._wait_for_volume_status, 6, - 'Attached volume status did not become "in-use".', - "volume becoming 'in-use'", - volume, 'in-use') - - # get volume details - self.verify(20, self.volume_client.volumes.get, 7, - "Can not retrieve volume details. ", - "retrieving volume details", volume.id) - - # detach volume - self.verify(50, self._detach_volume, 8, - 'Can not detach volume. ', - "volume detachment", - instance.id, volume.id) - - self.verify(120, self._wait_for_volume_status, 9, - 'Volume status did not become "available".', - "volume becoming 'available'", - volume, 'available') - - self.verify(50, self.volume_client.volumes.delete, 10, - 'Can not delete volume. ', - "volume deletion", - volume) - - self.verify(50, self.verify_volume_deletion, 11, - 'Can not delete volume. ', - "volume deletion", - volume) - - self.verify(30, self._delete_server, 12, - "Can not delete server. ", - "server deletion", - instance) diff --git a/fuel_health/tests/tests_platform/__init__.py b/fuel_health/tests/tests_platform/__init__.py deleted file mode 100644 index a66cfae8..00000000 --- a/fuel_health/tests/tests_platform/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Main purpose of following attribute is -to supply general information about test set. -This information will be stored in ostf database -in test_sets table. -""" -__profile__ = { - "test_runs_ordering_priority": 4, - "id": "tests_platform", - "driver": "nose", - "test_path": "fuel_health/tests/tests_platform", - "description": ("Platform services functional tests." - " Duration 3 min - 60 min"), - "cleanup_path": "fuel_health.cleanup", - "deployment_tags": ['additional_components'], - "exclusive_testsets": ['smoke_platform'] -} diff --git a/fuel_health/tests/tests_platform/io.murano.apps.Simple/Classes/Simple.yaml b/fuel_health/tests/tests_platform/io.murano.apps.Simple/Classes/Simple.yaml deleted file mode 100644 index e5f508d7..00000000 --- a/fuel_health/tests/tests_platform/io.murano.apps.Simple/Classes/Simple.yaml +++ /dev/null @@ -1,30 +0,0 @@ -Namespaces: - =: io.murano.apps - std: io.murano - sys: io.murano.system - res: io.murano.resources - - -Name: Simple - -Extends: std:Application - -Properties: - name: - Contract: $.string().notNull() - instance: - Contract: $.class(res:Instance).notNull() - -Methods: - initialize: - Body: - - $.environment: $.find(std:Environment).require() - - deploy: - Body: - - If: not $.getAttr(deployed, false) - Then: - - $.environment.reporter.report($this, 'Starting SimpleApp deployment!') - - $.instance.deploy() - - $.environment.reporter.report($this, 'SimpleApp deployment is finished') - - $.setAttr(deployed, true) diff --git a/fuel_health/tests/tests_platform/io.murano.apps.Simple/UI/ui.yaml b/fuel_health/tests/tests_platform/io.murano.apps.Simple/UI/ui.yaml deleted file mode 100644 index 760c67d0..00000000 --- a/fuel_health/tests/tests_platform/io.murano.apps.Simple/UI/ui.yaml +++ /dev/null @@ -1,53 +0,0 @@ -Version: 2 - -Application: - ?: - type: io.murano.apps.Simple - name: $.appConfiguration.name - - instance: - ?: - type: io.murano.resources.LinuxMuranoInstance - name: generateHostname($.appConfiguration.unitNamingPattern, 1) - image: $.appConfiguration.osImage - keyname: $.appConfiguration.keyPair - flavor: $.appConfiguration.flavor - -Forms: - - appConfiguration: - fields: - - name: name - initial: SimpleVM - type: string - label: Application Name - description: >- - Enter a desired name for the application. Just A-Z, a-z, 0-9, dash and - underline are allowed - - name: flavor - type: flavor - label: Instance flavor - description: >- - Select registered in Openstack flavor. Consider that application performance - depends on this parameter. - - name: osImage - type: image - imageType: linux - label: Instance image - description: >- - Select a valid image for the application. Image should already be prepared and - registered in glance. - - name: keyPair - type: keypair - hidden: true - label: Key Pair - description: >- - Select a Key Pair to control access to instances. You can login to - instances using this KeyPair after the deployment of application. - required: false - - name: unitNamingPattern - label: Instance Name Pattern - type: string - required: false - widgetMedia: - js: ['muranodashboard/js/support_placeholder.js'] - css: {all: ['muranodashboard/css/support_placeholder.css']} diff --git a/fuel_health/tests/tests_platform/io.murano.apps.Simple/manifest.yaml b/fuel_health/tests/tests_platform/io.murano.apps.Simple/manifest.yaml deleted file mode 100644 index 51b077b9..00000000 --- a/fuel_health/tests/tests_platform/io.murano.apps.Simple/manifest.yaml +++ /dev/null @@ -1,11 +0,0 @@ -Format: 1.0 -Type: Application -FullName: io.murano.apps.Simple -Name: SimpleApp -Description: | - Simple Test Application. - This app initiates VM installation, but doesn't require Murano Agent -Author: 'Mirantis, Inc' -Tags: [App, Test, HelloWorld] -Classes: - io.murano.apps.Simple: Simple.yaml diff --git a/fuel_health/tests/tests_platform/test_ceilometer.py b/fuel_health/tests/tests_platform/test_ceilometer.py deleted file mode 100644 index 6535800e..00000000 --- a/fuel_health/tests/tests_platform/test_ceilometer.py +++ /dev/null @@ -1,552 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from fuel_health import ceilometermanager -from fuel_health.common.utils.data_utils import rand_name - - -class CeilometerApiPlatformTests(ceilometermanager.CeilometerBaseTest): - """TestClass contains tests that check basic Ceilometer functionality.""" - - def test_create_update_delete_alarm(self): - """Ceilometer test to create, update, check and delete alarm - Target component: Ceilometer - - Scenario: - 1. Get the statistic of a metric for the last hour. - 2. Create an alarm. - 3. Get the alarm. - 4. List alarms. - 5. Wait for 'ok' alarm state. - 6. Update the alarm. - 7. Wait for 'alarm' alarm state. - 8. Get the alarm history. - 9. Set the alarm state to 'insufficient data'. - 10. Verify that the alarm state is 'insufficient data'. - 11. Delete the alarm. - - Duration: 120 s. - Deployment tags: Ceilometer - """ - - fail_msg = 'Failed to get statistic of metric.' - msg = 'getting statistic of metric' - an_hour_ago = (datetime.datetime.now() - - datetime.timedelta(hours=1)).isoformat() - query = [{'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}] - - self.verify(600, self.wait_for_statistic_of_metric, 1, - fail_msg, msg, meter_name='image', query=query) - - fail_msg = 'Failed to create alarm.' - msg = 'creating alarm' - alarm = self.verify(60, self.create_alarm, 2, - fail_msg, msg, - meter_name='image', - threshold=0.9, - name=rand_name('ceilometer-alarm'), - period=600, - statistic='avg', - comparison_operator='lt') - - fail_msg = 'Failed to get alarm.' - msg = 'getting alarm' - self.verify(60, self.ceilometer_client.alarms.get, 3, - fail_msg, msg, alarm.alarm_id) - - fail_msg = 'Failed to list alarms.' - msg = 'listing alarms' - query = [{'field': 'project', 'op': 'eq', 'value': alarm.project_id}] - self.verify(60, self.ceilometer_client.alarms.list, 4, - fail_msg, msg, q=query) - - fail_msg = 'Failed while waiting for alarm state to become "ok".' - msg = 'waiting for alarm state to become "ok"' - self.verify(1000, self.wait_for_alarm_status, 5, - fail_msg, msg, alarm.alarm_id, 'ok') - - fail_msg = 'Failed to update alarm.' - msg = 'updating alarm' - self.verify(60, self.ceilometer_client.alarms.update, 6, - fail_msg, msg, alarm_id=alarm.alarm_id, threshold=1.1) - - fail_msg = 'Failed while waiting for alarm state to become "alarm".' - msg = 'waiting for alarm state to become "alarm"' - self.verify(1000, self.wait_for_alarm_status, 7, - fail_msg, msg, alarm.alarm_id, 'alarm') - - fail_msg = 'Failed to get alarm history.' - msg = 'getting alarm history' - self.verify(60, self.ceilometer_client.alarms.get_history, 8, - fail_msg, msg, alarm_id=alarm.alarm_id) - - fail_msg = 'Failed to set alarm state to "insufficient data".' - msg = 'setting alarm state to "insufficient data"' - self.verify(60, self.ceilometer_client.alarms.set_state, 9, - fail_msg, msg, alarm_id=alarm.alarm_id, - state='insufficient data') - - fail_msg = 'Failed while verifying alarm state.' - msg = 'verifying alarm state' - self.verify(60, self.verify_state, 10, - fail_msg, msg, alarm_id=alarm.alarm_id, - state='insufficient data') - - fail_msg = 'Failed to delete alarm.' - msg = 'deleting alarm' - self.verify(60, self.ceilometer_client.alarms.delete, 11, - fail_msg, msg, alarm_id=alarm.alarm_id) - - @ceilometermanager.check_compute_nodes() - def test_check_alarm_state(self): - """Ceilometer test to check alarm state and get Nova notifications - Target component: Ceilometer - - Scenario: - 1. Create an instance. - 2. Wait for 'ACTIVE' status of the instance. - 3. Get notifications. - 4. Get the statistic notification:vcpus. - 5. Create an alarm for the summary statistic notification:vcpus. - 6. Wait for the alarm state to become 'alarm' or 'ok'. - - Duration: 90 s. - Deployment tags: Ceilometer, qemu | kvm - """ - - self.check_image_exists() - private_net_id, _ = self.create_network_resources() - - fail_msg = 'Failed to create instance.' - msg = 'creating instance' - name = rand_name('ost1_test-ceilo-instance-') - instance = self.verify(600, self.create_server, 1, fail_msg, msg, name, - net_id=private_net_id) - - fail_msg = 'Failed while waiting for "ACTIVE" status of instance.' - msg = 'waiting for "ACTIVE" status of instance' - self.verify(200, self.wait_for_resource_status, 2, - fail_msg, msg, self.compute_client.servers, - instance.id, 'ACTIVE') - - fail_msg = 'Failed to get notifications.' - msg = 'getting notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': instance.id}] - self.verify(300, self.wait_for_ceilo_objects, 3, - fail_msg, msg, self.nova_notifications, query, 'sample') - - fail_msg = 'Failed to get statistic notification:cpu_util.' - msg = 'getting statistic notification:cpu_util' - an_hour_ago = (datetime.datetime.now() - - datetime.timedelta(hours=1)).isoformat() - query = [{'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}] - vcpus_stat = self.verify(60, self.wait_for_statistic_of_metric, 4, - fail_msg, msg, 'vcpus', query) - - fail_msg = ('Failed to create alarm for ' - 'summary statistic notification:cpu_util.') - msg = 'creating alarm for summary statistic notification:cpu_util' - threshold = vcpus_stat[0].sum - 1 - alarm = self.verify(60, self.create_alarm, 5, - fail_msg, msg, - meter_name='vcpus', - threshold=threshold, - name=rand_name('ost1_test-ceilo-alarm'), - period=600, - statistic='sum', - comparison_operator='lt') - - fail_msg = ('Failed while waiting for ' - 'alarm state to become "alarm" or "ok".') - msg = 'waiting for alarm state to become "alarm" or "ok"' - self.verify(300, self.wait_for_alarm_status, 6, - fail_msg, msg, alarm.alarm_id) - - def test_create_sample(self): - """Ceilometer test to create, check and list samples - Target component: Ceilometer - - Scenario: - 1. Create a sample for the image. - 2. Get count of samples stored for the last hour for an image. - 3. Create another sample for the image. - 4. Check that the sample has the expected resource. - 5. Get count of samples and compare counts before and after - the second sample creation. - 6. Get the resource of the sample. - - Duration: 5 s. - Deployment tags: Ceilometer - """ - - self.check_image_exists() - - image_id = self.get_image_from_name() - - fail_msg = 'Failed to create first sample for image.' - msg = 'creating first sample for image' - self.verify(60, self.create_image_sample, 1, fail_msg, msg, image_id) - - an_hour_ago = (datetime.datetime.now() - - datetime.timedelta(hours=1)).isoformat() - query = [{'field': 'resource', 'op': 'eq', 'value': image_id}, - {'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}] - fail_msg = 'Failed to get samples for image.' - msg = 'getting samples for image' - count_before_create_second_sample = self.verify( - 60, self.get_samples_count, 2, fail_msg, msg, 'image', query) - - fail_msg = 'Failed to create second sample for image.' - msg = 'creating second sample for image' - second_sample = self.verify(60, self.create_image_sample, 1, - fail_msg, msg, image_id) - - fail_msg = ('Resource of sample is missing or ' - 'does not equal to the expected resource.') - self.verify_response_body_value( - body_structure=second_sample[0].resource_id, value=image_id, - msg=fail_msg, failed_step=4) - - fail_msg = ('Failed while waiting ' - 'for addition of new sample to samples list.') - msg = 'waiting for addition of new sample to samples list' - self.verify(20, self.wait_samples_count, 5, fail_msg, msg, - 'image', query, count_before_create_second_sample) - - fail_msg = 'Failed to get resource of sample.' - msg = 'getting resource of sample' - self.verify(20, self.ceilometer_client.resources.get, 6, - fail_msg, msg, second_sample[0].resource_id) - - @ceilometermanager.check_compute_nodes() - def test_check_events_and_traits(self): - """Ceilometer test to check events and traits - Target component: Ceilometer - - Scenario: - 1. Create an instance. - 2. Wait for 'ACTIVE' status of the instance. - 3. Check that event type list contains expected event type. - 4. Check that event list contains event with expected type. - 5. Check event traits description. - 6. Check that event exists for expected instance. - 7. Get information about expected event. - 8. Delete the instance. - - Duration: 40 s. - - Deployment tags: Ceilometer - """ - - event_type = 'compute.instance.create.start' - - self.check_image_exists() - private_net_id, _ = self.create_network_resources() - - name = rand_name('ost1_test-ceilo-instance-') - - fail_msg = 'Failed to create instance.' - msg = 'creating instance' - - vcenter = self.config.compute.use_vcenter - image_name = 'TestVM-VMDK' if vcenter else None - instance = self.verify(600, self.create_server, 1, fail_msg, msg, name, - net_id=private_net_id, img_name=image_name) - - fail_msg = 'Failed while waiting for "ACTIVE" status of instance.' - msg = 'waiting for "ACTIVE" status of instance' - self.verify(200, self.wait_for_resource_status, 2, - fail_msg, msg, self.compute_client.servers, - instance.id, 'ACTIVE') - - fail_msg = ('Failed to find "{event_type}" in event type list.'.format( - event_type=event_type)) - msg = ('searching "{event_type}" in event type list'.format( - event_type=event_type)) - self.verify(60, self.check_event_type, 3, fail_msg, msg, event_type) - - fail_msg = ('Failed to find event with "{event_type}" type in event ' - 'list.'.format(event_type=event_type)) - msg = ('searching event with "{event_type}" type in event type ' - 'list'.format(event_type=event_type)) - - query = [{'field': 'event_type', 'op': 'eq', 'value': event_type}] - events_list = self.verify(60, self.ceilometer_client.events.list, 4, - fail_msg, msg, query, limit=1000) - - if not events_list: - self.fail('Events with "{event_type}" type not found'.format( - event_type=event_type)) - - traits = ['instance_id', 'request_id', 'state', 'service', 'host'] - - fail_msg = 'Failed to check event traits description.' - msg = 'checking event traits description' - self.verify(60, self.check_traits, 5, fail_msg, msg, - event_type=event_type, traits=traits) - - fail_msg = ('Failed to find "{event_type}" event type with expected ' - 'instance ID.'.format(event_type=event_type)) - msg = ('searching "{event_type}" event type with expected ' - 'instance ID'.format(event_type=event_type)) - message_id = self.verify(60, self.check_event_message_id, 6, - fail_msg, msg, events_list, instance.id) - - fail_msg = 'Failed to get event information.' - msg = 'getting event information' - self.verify(60, self.ceilometer_client.events.get, 7, - fail_msg, msg, message_id) - - fail_msg = 'Failed to delete the instance.' - msg = 'instance deleting' - self.verify(60, self._delete_server, 8, fail_msg, msg, instance) - - @ceilometermanager.check_compute_nodes() - def test_check_volume_events(self): - """Ceilometer test to check events from Cinder - Target component: Ceilometer - - Scenario: - 1. Create an instance. - 2. Wait for 'ACTIVE' status of the instance. - 3. Create a volume and volume snapshot. - 4. Get volume snapshot events. - 5. Get volume events. - 6. Delete the instance. - - Duration: 150 s. - Deployment tags: Ceilometer - """ - - if (not self.config.volume.cinder_node_exist - and not self.config.volume.ceph_exist): - self.skipTest('There are no storage nodes for volumes.') - - self.check_image_exists() - private_net_id, _ = self.create_network_resources() - - fail_msg = 'Failed to create instance.' - msg = 'creating instance' - name = rand_name('ostf-ceilo-instance-') - vcenter = self.config.compute.use_vcenter - image_name = 'TestVM-VMDK' if vcenter else None - instance = self.verify(300, self.create_server, 1, fail_msg, msg, name, - net_id=private_net_id, img_name=image_name) - - fail_msg = 'Failed while waiting for "ACTIVE" status of instance.' - msg = 'waiting for "ACTIVE" status of instance' - self.verify(200, self.wait_for_resource_status, 2, - fail_msg, msg, self.compute_client.servers, - instance.id, 'ACTIVE') - - fail_msg = 'Failed to create volume and volume snapshot.' - msg = 'creating volume and volume snapshot' - volume, snapshot = self.verify(300, self.volume_helper, 3, - fail_msg, msg, instance) - - query = [{'field': 'resource_id', 'op': 'eq', 'value': snapshot.id}] - fail_msg = 'Failed to get volume snapshot events.' - msg = 'getting volume snapshot events' - self.verify(300, self.wait_for_ceilo_objects, 4, - fail_msg, msg, self.snapshot_events, query, 'event') - - query = [{'field': 'resource_id', 'op': 'eq', 'value': volume.id}] - fail_msg = 'Failed to get volume events.' - msg = 'getting volume events' - self.verify(300, self.wait_for_ceilo_objects, 5, - fail_msg, msg, self.volume_events, query, 'event') - - fail_msg = 'Failed to delete the server.' - msg = 'deleting server' - self.verify(60, self._delete_server, 6, fail_msg, msg, instance) - - def test_check_glance_notifications(self): - """Ceilometer test to check notifications from Glance - Target component: Ceilometer - - Scenario: - 1. Create an image. - 2. Get image notifications. - - Duration: 5 s. - Deployment tags: Ceilometer - """ - - fail_msg = 'Failed to create image.' - msg = 'creating image' - image = self.verify(120, self.glance_helper, 1, fail_msg, msg) - - query = [{'field': 'resource', 'op': 'eq', 'value': image.id}] - fail_msg = 'Failed to get image notifications.' - msg = 'getting image notifications' - self.verify(300, self.wait_for_ceilo_objects, 2, - fail_msg, msg, self.glance_notifications, query, 'sample') - - def test_check_keystone_notifications(self): - """Ceilometer test to check notifications from Keystone - Target component: Ceilometer - - Scenario: - 1. Create Keystone resources. - 2. Get project notifications. - 3. Get user notifications. - 4. Get role notifications. - 5. Get role assignment notifications. - 6. Get group notifications. - 7. Get trust notifications. - - Duration: 5 s. - Available since release: 2014.2-6.0 - Deployment tags: Ceilometer - """ - - fail_msg = 'Failed to create some Keystone resources.' - msg = 'creating Keystone resources' - tenant, user, role, group, trust = self.verify( - 60, self.identity_helper, 1, fail_msg, msg) - - fail_msg = 'Failed to get project notifications.' - msg = 'getting project notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': tenant.id}] - self.verify(300, self.wait_for_ceilo_objects, 2, fail_msg, msg, - self.keystone_project_notifications, query, 'sample') - - fail_msg = 'Failed to get user notifications.' - msg = 'getting user notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': user.id}] - self.verify(300, self.wait_for_ceilo_objects, 3, fail_msg, msg, - self.keystone_user_notifications, query, 'sample') - - fail_msg = 'Failed to get role notifications.' - msg = 'getting role notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': role.id}] - self.verify(300, self.wait_for_ceilo_objects, 4, fail_msg, msg, - self.keystone_role_notifications, query, 'sample') - - fail_msg = 'Failed to get role assignment notifications.' - msg = 'getting role assignment notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': role.id}] - self.verify(300, self.wait_for_ceilo_objects, 5, fail_msg, msg, - self.keystone_role_assignment_notifications, query, - 'sample') - - fail_msg = 'Failed to get group notifications.' - msg = 'getting group notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': group.id}] - self.verify(300, self.wait_for_ceilo_objects, 6, fail_msg, msg, - self.keystone_group_notifications, query, 'sample') - - fail_msg = 'Failed to get trust notifications.' - msg = 'getting trust notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': trust.id}] - self.verify(300, self.wait_for_ceilo_objects, 7, fail_msg, msg, - self.keystone_trust_notifications, query, 'sample') - - def test_check_neutron_notifications(self): - """Ceilometer test to check notifications from Neutron - Target component: Ceilometer - - Scenario: - 1. Create Neutron resources. - 2. Get network notifications. - 3. Get subnet notifications. - 4. Get port notifications. - 5. Get router notifications. - 6. Get floating IP notifications. - - Duration: 40 s. - Deployment tags: Ceilometer, Neutron - """ - - fail_msg = 'Failed to create some Neutron resources.' - msg = 'creating Neutron resources' - net, subnet, port, router, flip = self.verify( - 60, self.neutron_helper, 1, fail_msg, msg) - - fail_msg = 'Failed to get network notifications.' - msg = 'getting network notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': net['id']}] - self.verify(60, self.wait_for_ceilo_objects, 2, fail_msg, msg, - self.neutron_network_notifications, query, 'sample') - - fail_msg = 'Failed to get subnet notifications.' - msg = 'getting subnet notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': subnet['id']}] - self.verify(60, self.wait_for_ceilo_objects, 3, fail_msg, msg, - self.neutron_subnet_notifications, query, 'sample') - - fail_msg = 'Failed to get port notifications.' - msg = 'getting port notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': port['id']}] - self.verify(60, self.wait_for_ceilo_objects, 4, fail_msg, msg, - self.neutron_port_notifications, query, 'sample') - - fail_msg = 'Failed to get router notifications.' - msg = 'getting router notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': router['id']}] - self.verify(60, self.wait_for_ceilo_objects, 5, fail_msg, msg, - self.neutron_router_notifications, query, 'sample') - - fail_msg = 'Failed to get floating IP notifications.' - msg = 'getting floating IP notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': flip['id']}] - self.verify(60, self.wait_for_ceilo_objects, 6, fail_msg, msg, - self.neutron_floatingip_notifications, query, 'sample') - - @ceilometermanager.check_compute_nodes() - def test_check_sahara_notifications(self): - """Ceilometer test to check notifications from Sahara - Target component: Ceilometer - - Scenario: - 1. Find a correctly registered Sahara image - 2. Create a Sahara cluster - 3. Get cluster notifications - - Duration: 40 s. - Deployment tags: Ceilometer, Sahara - """ - - plugin_name = 'vanilla' - mapping_versions_of_plugin = { - "6.1": "2.4.1", - "7.0": "2.6.0", - "8.0": "2.7.1", - "9.0": "2.7.1", - "10.0": "2.7.1" - } - hadoop_version = mapping_versions_of_plugin.get( - self.config.fuel.fuel_version, "2.7.1") - - fail_msg = 'Failed to find correctly registered Sahara image.' - msg = 'finding correctly registered Sahara image' - image_id = self.verify(60, self.find_and_check_image, 1, - fail_msg, msg, plugin_name, hadoop_version) - - if image_id is None: - self.skipTest('Correctly registered image ' - 'to create Sahara cluster not found.') - - fail_msg = 'Failed to create Sahara cluster.' - msg = 'creating Sahara cluster' - cluster = self.verify(300, self.sahara_helper, 2, fail_msg, - msg, image_id, plugin_name, hadoop_version) - - fail_msg = 'Failed to get cluster notifications.' - msg = 'getting cluster notifications' - query = [{'field': 'resource', 'op': 'eq', 'value': cluster.id}] - self.verify(60, self.wait_for_ceilo_objects, 3, fail_msg, msg, - self.sahara_cluster_notifications, query, 'sample') diff --git a/fuel_health/tests/tests_platform/test_heat.py b/fuel_health/tests/tests_platform/test_heat.py deleted file mode 100644 index a2fec373..00000000 --- a/fuel_health/tests/tests_platform/test_heat.py +++ /dev/null @@ -1,889 +0,0 @@ -# Copyright 2013 Mirantis, Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_health import heatmanager - - -class HeatSmokeTests(heatmanager.HeatBaseTest): - """Test class verifies Heat API calls, rollback and - autoscaling use-cases. - """ - def setUp(self): - super(HeatSmokeTests, self).setUp() - if not self.config.compute.compute_nodes: - self.skipTest('There are no compute nodes') - self.min_required_ram_mb = 7000 - - def test_advanced_actions(self): - """Advanced stack actions: suspend, resume and check - Target component: Heat - - Scenario: - 1. Create test flavor. - 2. Create a stack. - 3. Wait until the stack status will change to 'CREATE_COMPLETE'. - 4. Call stack suspend action. - 5. Wait until the stack status will change to 'SUSPEND_COMPLETE'. - 6. Check that stack resources are in 'SUSPEND_COMPLETE' status. - 7. Check that server owned by stack is in 'SUSPENDED' status. - 8. Call stack resume action. - 9. Wait until the stack status will change to 'RESUME_COMPLETE'. - 10. Check that stack resources are in 'RESUME_COMPLETE' status. - 11. Check that instance owned by stack is in 'ACTIVE' status. - 12. Call stack check action. - 13. Wait until the stack status will change to 'CHECK_COMPLETE'. - 14. Check that stack resources are in 'CHECK_COMPLETE' status. - 15. Check that instance owned by stack is in 'ACTIVE' status. - 16. Delete the stack and wait for the stack to be deleted. - - Duration: 900 s. - Available since release: 2014.2-6.1 - """ - - self.check_image_exists() - - # create test flavor - fail_msg = 'Test flavor was not created.' - heat_flavor = self.verify( - 60, self.create_flavor, - 1, fail_msg, - 'flavor creation' - ) - - # define stack parameters - parameters = { - 'InstanceType': heat_flavor.name, - 'ImageId': self.config.compute.image_name - } - if 'neutron' in self.config.network.network_provider: - parameters['network'], _ = self.create_network_resources() - template = self.load_template( - 'heat_create_neutron_stack_template.yaml') - else: - template = self.load_template( - 'heat_create_nova_stack_template.yaml') - - # create stack - fail_msg = 'Stack was not created properly.' - stack = self.verify( - 90, self.create_stack, - 2, fail_msg, - 'stack creation', - template, parameters=parameters - ) - self.verify( - 420, self.wait_for_stack_status, - 3, fail_msg, - 'stack status becoming "CREATE_COMPLETE"', - stack.id, 'CREATE_COMPLETE' - ) - res = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_type', value='OS::Nova::Server' - ) - - # suspend stack - fail_msg = 'Stack suspend failed.' - self.verify( - 10, self.heat_client.actions.suspend, - 4, fail_msg, - 'executing suspend stack action', - stack.id - ) - self.verify( - 90, self.wait_for_stack_status, - 5, fail_msg, - 'stack status becoming "SUSPEND_COMPLETE"', - stack.id, 'SUSPEND_COMPLETE' - ) - res = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_type', value='OS::Nova::Server' - ) - self.verify_response_body_content( - 'SUSPEND_COMPLETE', res[0].resource_status, - 'Stack resource is not in "SUSPEND_COMPLETE" status.', 6 - ) - instance = self.compute_client.servers.get(res[0].physical_resource_id) - self.verify_response_body_content( - 'SUSPENDED', instance.status, - 'Instance owned by stack is not in "SUSPENDED" status.', 7 - ) - - # resume stack - fail_msg = 'Stack resume failed.' - self.verify( - 10, self.heat_client.actions.resume, - 8, fail_msg, - 'executing resume stack action', - stack.id - ) - self.verify( - 90, self.wait_for_stack_status, - 9, fail_msg, - 'stack status becoming "RESUME_COMPLETE"', - stack.id, 'RESUME_COMPLETE' - ) - res = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_type', value='OS::Nova::Server' - ) - self.verify_response_body_content( - 'RESUME_COMPLETE', res[0].resource_status, - 'Stack resource is not in "RESUME_COMPLETE".', 10 - ) - instance = self.compute_client.servers.get(res[0].physical_resource_id) - self.verify_response_body_content( - 'ACTIVE', instance.status, - 'Instance owned by stack is not in "ACTIVE" status.', 11 - ) - - # stack check - fail_msg = 'Stack check failed.' - self.verify( - 10, self.heat_client.actions.check, - 12, fail_msg, - 'executing check stack action', - stack.id - ) - self.verify( - 90, self.wait_for_stack_status, - 13, fail_msg, - 'stack status becoming "CHECK_COMPLETE"', - stack.id, 'CHECK_COMPLETE' - ) - res = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_type', value='OS::Nova::Server' - ) - self.verify_response_body_content( - 'CHECK_COMPLETE', res[0].resource_status, - 'Stack resource is not in "CHECK_COMPLETE" status', 14 - ) - instance = self.compute_client.servers.get(res[0].physical_resource_id) - self.verify_response_body_content( - 'ACTIVE', instance.status, - 'Instance owned by stack is not in "ACTIVE" status', 15 - ) - - # delete stack - fail_msg = 'Cannot delete stack.' - self.verify( - 10, self.heat_client.stacks.delete, - 16, fail_msg, - 'deleting stack', - stack.id - ) - self.verify( - 60, self.wait_for_stack_deleted, - 16, fail_msg, - 'deleting stack', - stack.id - ) - - def test_actions(self): - """Typical stack actions: create, delete, show details, etc. - Target component: Heat - - Scenario: - 1. Create test flavor. - 2. Create a stack. - 3. Wait for the stack status to change to 'CREATE_COMPLETE'. - 4. Get the details of the created stack by its name. - 5. Get the resources list of the created stack. - 6. Get the details of the stack resource. - 7. Get the events list of the created stack. - 8. Get the details of the stack event. - 9. Get the stack template details. - 10. Delete the stack and wait for the stack to be deleted. - - Duration: 720 s. - """ - - self.check_image_exists() - - # create test flavor - fail_msg = 'Test flavor was not created.' - heat_flavor = self.verify( - 50, self.create_flavor, - 1, fail_msg, - 'flavor creation' - ) - - # define stack parameters - parameters = { - 'InstanceType': heat_flavor.name, - 'ImageId': self.config.compute.image_name - } - if 'neutron' in self.config.network.network_provider: - parameters['network'], _ = self.create_network_resources() - template = self.load_template( - 'heat_create_neutron_stack_template.yaml') - else: - template = self.load_template( - 'heat_create_nova_stack_template.yaml') - - # create stack - fail_msg = 'Stack was not created properly.' - stack = self.verify( - 90, self.create_stack, - 2, fail_msg, - 'stack creation', - template, parameters=parameters - ) - self.verify( - 420, self.wait_for_stack_status, - 3, fail_msg, - 'stack status becoming "CREATE_COMPLETE"', - stack.id, 'CREATE_COMPLETE' - ) - - # get stack details - fail_msg = 'Cannot retrieve stack details.' - details = self.verify( - 20, self.get_stack, - 4, fail_msg, - 'retrieving stack details', - stack.stack_name - ) - fail_msg = 'Stack details contain incorrect values.' - self.verify_response_body_content( - stack.id, details.id, - fail_msg, 4 - ) - self.verify_response_body_content( - self.config.compute.image_name, details.parameters['ImageId'], - fail_msg, 4 - ) - self.verify_response_body_content( - 'CREATE_COMPLETE', details.stack_status, - fail_msg, 4 - ) - - # get resources list - fail_msg = 'Cannot retrieve list of stack resources.' - resources = self.verify( - 10, self.get_stack_objects, - 5, fail_msg, - 'retrieving list of stack resources', - self.heat_client.resources, - stack.id - ) - self.verify_response_body_content( - 1, len(resources), - fail_msg, 5 - ) - - # get resource details - resource_name = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_type', value='OS::Nova::Server' - )[0].logical_resource_id - - fail_msg = 'Cannot retrieve stack resource details.' - res_details = self.verify( - 10, self.heat_client.resources.get, - 6, fail_msg, - 'retrieving stack resource details', - stack.id, resource_name - ) - fail_msg = 'Resource details contain incorrect values.' - self.verify_response_body_content( - 'CREATE_COMPLETE', res_details.resource_status, - fail_msg, 6 - ) - self.verify_response_body_content( - 'OS::Nova::Server', res_details.resource_type, - fail_msg, 6 - ) - - # get events list - fail_msg = 'Cannot retrieve list of stack events.' - events = self.verify( - 10, self.get_stack_objects, - 7, fail_msg, - 'retrieving list of stack events', - self.heat_client.events, - stack.id - ) - self.verify_response_body_not_equal( - 0, len(events), - fail_msg, 7 - ) - - # get event details - event_id = self.get_stack_objects( - self.heat_client.events, stack.id, - key='resource_name', value=resource_name - )[0].id - - fail_msg = 'Cannot retrieve stack event details.' - ev_details = self.verify( - 10, self.heat_client.events.get, - 8, fail_msg, - 'retrieving stack event details', - stack.id, resource_name, event_id - ) - fail_msg = 'Event details contain incorrect values.' - self.verify_response_body_content( - event_id, ev_details.id, - fail_msg, 8 - ) - self.verify_response_body_content( - resource_name, ev_details.logical_resource_id, - fail_msg, 8 - ) - - # show template - fail_msg = 'Cannot retrieve template of the stack.' - act_tpl = self.verify( - 10, self.heat_client.stacks.template, - 9, fail_msg, - 'retrieving stack template', - stack.id - ) - self.verify_response_body_content( - 'OS::Nova::Server', act_tpl['resources'][resource_name]['type'], - fail_msg, 9 - ) - - # delete stack - fail_msg = 'Can not delete stack.' - self.verify( - 30, self.heat_client.stacks.delete, - 10, fail_msg, - 'deleting stack', - stack.id - ) - self.verify( - 100, self.wait_for_stack_deleted, - 10, fail_msg, - 'deleting stack', - stack.id - ) - - def test_update(self): - """Update stack actions: inplace, replace and update whole template - Target component: Heat - - Scenario: - 1. Create test flavor. - 2. Create a stack. - 3. Wait for the stack status to change to 'CREATE_COMPLETE'. - 4. Change instance name, execute update stack in-place. - 5. Wait for the stack status to change to 'UPDATE_COMPLETE'. - 6. Check that instance name was changed. - 7. Create one more test flavor. - 8. Change instance flavor to just created and update stack - (update replace). - 9. Wait for the stack status to change to 'UPDATE_COMPLETE'. - 10. Check that instance flavor was changed. - 11. Change stack template and update it. - 12. Wait for the stack status to change to 'UPDATE_COMPLETE'. - 13. Check that there are only two newly created stack instances. - 14. Delete the stack. - 15. Wait for the stack to be deleted. - - Duration: 1300 s. - """ - - self.check_image_exists() - - # create test flavor - fail_msg = 'Test flavor was not created.' - heat_flavor = self.verify( - 50, self.create_flavor, - 1, fail_msg, - 'flavor creation' - ) - - # define stack parameters - parameters = { - 'InstanceType': heat_flavor.name, - 'ImageId': self.config.compute.image_name - } - if 'neutron' in self.config.network.network_provider: - parameters['network'], _ = self.create_network_resources() - template = self.load_template( - 'heat_create_neutron_stack_template.yaml') - else: - template = self.load_template( - 'heat_create_nova_stack_template.yaml') - - # create stack - fail_msg = 'Stack was not created properly.' - stack = self.verify( - 90, self.create_stack, - 2, fail_msg, - 'stack creation', - template, parameters=parameters - ) - self.verify( - 420, self.wait_for_stack_status, - 3, fail_msg, - 'stack status becoming "CREATE_COMPLETE"', - stack.id, 'CREATE_COMPLETE' - ) - - fail_msg = 'Can not update stack.' - - # update inplace - template = template.replace( - 'name: ost1-test_heat', - 'name: ost1-test_updated' - ) - - stack = self.verify( - 30, self.update_stack, - 4, fail_msg, - 'updating stack, changing resource name', - stack.id, - template, parameters=parameters - ) - self.verify( - 100, self.wait_for_stack_status, - 5, fail_msg, - 'stack status becoming "UPDATE_COMPLETE"', - stack.id, 'UPDATE_COMPLETE' - ) - - instances = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_type', value='OS::Nova::Server' - ) - instance_id = instances[0].physical_resource_id - new_instance_name = self.compute_client.servers.get( - instance_id).name - - self.verify_response_body_content( - 'ost1-test_updated', new_instance_name, - 'Update inplace failed, instance name was not changed', 6 - ) - - # creation of one more flavor, that will be used for 'update replace' - flavor = self.verify( - 60, self.create_flavor, - 7, 'Test flavor was not created.', - 'flavor creation' - ) - - # update replace - parameters['InstanceType'] = flavor.name - - stack = self.verify( - 30, self.update_stack, - 8, fail_msg, - 'updating stack, changing instance flavor', - stack.id, - template, parameters=parameters - ) - self.verify( - 150, self.wait_for_stack_status, - 9, fail_msg, - 'stack status becoming "UPDATE_COMPLETE"', - stack.id, 'UPDATE_COMPLETE' - ) - instances = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_type', value='OS::Nova::Server' - ) - instance_id = instances[0].physical_resource_id - new_instance_flavor = self.compute_client.servers.get( - instance_id).flavor['id'] - - self.verify_response_body_content( - flavor.id, new_instance_flavor, - 'Update replace failed, instance flavor was not changed.', 10 - ) - - # update the whole template: one old resource will be deleted and - # two new resources will be created - - parameters = { - 'InstanceType': heat_flavor.name, - 'ImageId': self.config.compute.image_name - } - if 'neutron' in self.config.network.network_provider: - parameters['network'], _ = self.create_network_resources() - template = self.load_template( - 'heat_update_neutron_stack_template.yaml') - else: - template = self.load_template( - 'heat_update_nova_stack_template.yaml') - - stack = self.verify( - 30, self.update_stack, - 11, fail_msg, - 'updating stack, changing template', - stack.id, - template, parameters=parameters - ) - self.verify( - 300, self.wait_for_stack_status, - 12, fail_msg, - 'stack status becoming "UPDATE_COMPLETE"', - stack.id, 'UPDATE_COMPLETE' - ) - - instances = self.get_stack_objects( - self.heat_client.resources, stack.id - ) - self.verify( - 2, self.assertTrue, - 13, 'Number of instances belonging to stack is not equal 2.', - 'verifying the number of instances after template update', - len(instances) == 2 - ) - - if instance_id in [ins.physical_resource_id for ins in instances]: - self.fail('Failed step: 13. Previously create instance ' - 'was not deleted during stack update.') - - # delete stack - fail_msg = 'Can not delete stack.' - self.verify( - 30, self.heat_client.stacks.delete, - 14, fail_msg, - 'deleting stack', - stack.id - ) - self.verify( - 100, self.wait_for_stack_deleted, - 15, fail_msg, - 'deleting stack', - stack.id - ) - - def test_autoscaling(self): - """Check stack autoscaling - Target component: Heat - - Scenario: - 1. Create test flavor. - 2. Create a keypair. - 3. Save generated private key to file on Controller node. - 4. Create a security group. - 5. Create a stack. - 6. Wait for the stack status to change to 'CREATE_COMPLETE'. - 7. Create a floating IP. - 8. Assign the floating IP to the instance of the stack. - 9. Wait when the instance is ready to connect. - 10. Wait for the 2nd instance to be launched. - 11. Wait for the 2nd instance to be terminated. - 12. Delete the file with private key. - 13. Delete the stack. - 14. Wait for the stack to be deleted. - - Duration: 2200 s. - Deployment tags: Ceilometer - """ - - self.check_image_exists() - - self.check_required_resources(self.min_required_ram_mb) - - # creation of test flavor - heat_flavor = self.verify( - 50, self.create_flavor, - 1, 'Test flavor can not be created.', - 'flavor creation' - ) - - # creation of test keypair - keypair = self.verify( - 10, self._create_keypair, - 2, 'Keypair can not be created.', - 'keypair creation', - self.compute_client - ) - path_to_key = self.verify( - 10, self.save_key_to_file, - 3, 'Private key can not be saved to file.', - 'saving private key to the file', - keypair.private_key - ) - - # creation of test security group - sec_group = self.verify( - 60, self._create_security_group, - 4, 'Security group can not be created.', - 'security group creation', - self.compute_client, 'ost1_test-sgroup' - ) - - # definition of stack parameters - parameters = { - 'KeyName': keypair.name, - 'InstanceType': heat_flavor.name, - 'ImageId': self.config.compute.image_name, - 'SecurityGroup': sec_group.name - } - - if 'neutron' in self.config.network.network_provider: - parameters['Net'], _ = self.create_network_resources() - template = self.load_template('heat_autoscaling_neutron.yaml') - else: - template = self.load_template('heat_autoscaling_nova.yaml') - - # creation of stack - fail_msg = 'Stack was not created properly.' - stack = self.verify( - 60, self.create_stack, - 5, fail_msg, - 'stack creation', - template, parameters=parameters - ) - self.verify( - 600, self.wait_for_stack_status, - 6, fail_msg, - 'stack status becoming "CREATE_COMPLETE"', - stack.id, 'CREATE_COMPLETE', 600, 15 - ) - - reduced_stack_name = '{0}-{1}'.format( - stack.stack_name[:2], stack.stack_name[-4:]) - - instances = self.get_instances_by_name_mask(reduced_stack_name) - self.verify( - 2, self.assertTrue, - 6, 'Instance for the stack was not created.', - 'verifying the number of instances after template update', - len(instances) != 0 - ) - - # assigning floating ip - floating_ip = self.verify( - 10, self._create_floating_ip, - 7, 'Floating IP can not be created.', - 'floating IP creation' - ) - self.verify( - 20, self._assign_floating_ip_to_instance, - 8, 'Floating IP can not be assigned.', - 'assigning floating IP', - self.compute_client, instances[0], floating_ip - ) - - # vm connection check - vm_connection = ('ssh -o StrictHostKeyChecking=no -i {0} {1}@{2}'. - format(path_to_key, 'cirros', floating_ip.ip)) - - self.verify( - 120, self.wait_for_vm_ready_for_load, - 9, 'VM is not ready or connection can not be established.', - 'test script execution on VM', - vm_connection, 120, 15 - ) - - # launching the second instance during autoscaling - self.verify( - 1500, self.wait_for_autoscaling, - 10, 'Failed to launch the 2nd instance per autoscaling alarm.', - 'launching the new instance per autoscaling alarm', - len(instances) + 2, 1500, 10, reduced_stack_name - ) - - # termination of the second instance during autoscaling - self.verify( - 1500, self.wait_for_autoscaling, - 11, 'Failed to terminate the 2nd instance per autoscaling alarm.', - 'terminating the 2nd instance per autoscaling alarm', - len(instances) + 1, 1500, 10, reduced_stack_name - ) - - # deletion of file with keypair from vm - self.verify( - 10, self.delete_key_file, - 12, 'The file with private key can not be deleted.', - 'deleting the file with private key', - path_to_key - ) - - # deletion of stack - self.verify( - 20, self.heat_client.stacks.delete, - 13, 'Can not delete stack.', - 'deleting stack', - stack.id - ) - self.verify( - 100, self.wait_for_stack_deleted, - 14, 'Can not delete stack.', - 'deleting stack', - stack.id - ) - - def test_rollback(self): - """Check stack rollback - Target component: Heat - - Scenario: - 1. Create extra large flavor. - 2. Start stack creation with rollback enabled. - 3. Verify the stack appears with status 'CREATE_IN_PROGRESS'. - 4. Wait for the stack to be deleted in result of rollback after - expiration of timeout defined in WaitHandle resource - of the stack. - 5. Verify the instance of the stack has been deleted. - - Duration: 470 s. - """ - self.check_image_exists() - - # create test flavor - fail_msg = 'Test flavor was not created.' - large_flavor = self.verify( - 50, self.create_flavor, - 1, fail_msg, - 'flavor creation', - ram=1048576 - ) - - parameters = { - 'InstanceType': large_flavor.name, - 'ImageId': self.config.compute.image_name - } - if 'neutron' in self.config.network.network_provider: - parameters['network'], _ = self.create_network_resources() - template = self.load_template( - 'heat_create_neutron_stack_template.yaml') - else: - template = self.load_template( - 'heat_create_nova_stack_template.yaml') - - fail_msg = 'Stack creation was not started.' - stack = self.verify( - 90, self.create_stack, - 2, fail_msg, - 'starting stack creation', - template, disable_rollback=False, parameters=parameters - ) - - self.verify_response_body_content( - 'CREATE_IN_PROGRESS', stack.stack_status, - fail_msg, 3 - ) - self.verify( - 420, self.wait_for_stack_deleted, - 4, 'Rollback of the stack failed.', - 'rolling back the stack after its creation failed', - stack.id - ) - - instances = self.get_stack_objects( - self.heat_client.resources, stack.id, - key='resource_name', value='OS::Nova::Server' - ) - - fail_msg = 'The stack instance rollback failed.' - self.verify( - 30, self.assertTrue, - 5, fail_msg, - 'verifying if the instance was rolled back', - len(instances) == 0 - ) - - def test_wait_condition(self): - """Check creation of stack with Wait Condition/Handle resources - Target component: Heat - - Scenario: - 1. Create test flavor. - 2. Create a keypair. - 3. Save generated private key to file on Controller node. - 4. Create a stack using template. - 5. Wait for the stack status to change to 'CREATE_COMPLETE'. - 6. Delete the file with private key. - 7. Delete the stack. - 8. Wait for the stack to be deleted. - - Duration: 820 s. - Available since release: 2015.1.0-8.0 - """ - - self.check_image_exists() - - self.check_required_resources(self.min_required_ram_mb) - - # creation of test flavor - heat_flavor = self.verify( - 50, self.create_flavor, - 1, 'Test flavor can not be created.', - 'flavor creation' - ) - - # creation of test keypair - keypair = self.verify( - 10, self._create_keypair, - 2, 'Keypair can not be created.', - 'keypair creation', - self.compute_client - ) - path_to_key = self.verify( - 10, self.save_key_to_file, - 3, 'Private key can not be saved to file.', - 'saving private key to the file', - keypair.private_key - ) - - # definition of stack parameters - parameters = { - 'key_name': keypair.name, - 'flavor': heat_flavor.name, - 'image': self.config.compute.image_name, - } - - if 'neutron' in self.config.network.network_provider: - private, public = self.create_network_resources() - parameters['net'], parameters['floating_net'] = private, public - template = self.load_template('heat_wait_condition_neutron.yaml') - else: - template = self.load_template('heat_wait_condition_nova.yaml') - - # creation of stack - fail_msg = 'Stack was not created properly.' - stack = self.verify( - 60, self.create_stack, - 4, fail_msg, - 'stack creation', - template, parameters=parameters - ) - self.verify( - 600, self.wait_for_stack_status, - 5, fail_msg, - 'stack status becoming "CREATE_COMPLETE"', - stack.id, 'CREATE_COMPLETE', 600, 15 - ) - - # deletion of file with keypair from vm - self.verify( - 10, self.delete_key_file, - 6, 'The file with private key can not be deleted.', - 'deleting the file with private key', - path_to_key - ) - - # deletion of stack - self.verify( - 20, self.heat_client.stacks.delete, - 7, 'Can not delete stack.', - 'deleting stack', - stack.id - ) - self.verify( - 100, self.wait_for_stack_deleted, - 8, 'Can not delete stack.', - 'deleting stack', - stack.id - ) diff --git a/fuel_health/tests/tests_platform/test_murano_linux.py b/fuel_health/tests/tests_platform/test_murano_linux.py deleted file mode 100644 index 62ec7702..00000000 --- a/fuel_health/tests/tests_platform/test_murano_linux.py +++ /dev/null @@ -1,553 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import uuid - -from fuel_health import muranomanager - -from fuel_health.common.utils.data_utils import rand_name - - -LOG = logging.getLogger(__name__) - - -class MuranoDeployLinuxServicesTests(muranomanager.MuranoTest): - """TestClass contains verifications of full Murano functionality. - - Special requirements: - 1. Murano component should be installed. - 2. Internet access for virtual machines in OpenStack. - 3. Linux image with Murano metadata should be imported. - """ - - def setUp(self): - super(MuranoDeployLinuxServicesTests, self).setUp() - self.check_clients_state() - - self.doc_link = 'https://www.fuel-infra.org/#fueldocs' - - self.image = self.find_murano_image('linux') - - self.dummy_fqdn = 'io.murano.apps.Simple' - - # Flavor with 2 vCPU and 40Gb HDD will allow to successfully - # deploy all Murano applications. - self.flavor_name = rand_name("ostf_test_Murano_flavor") - flavor = self.compute_client.flavors.create( - self.flavor_name, disk=40, ram=self.min_required_ram_mb, vcpus=2) - self.addCleanup(self.compute_client.flavors.delete, flavor.id) - - def tearDown(self): - super(MuranoDeployLinuxServicesTests, self).tearDown() - - def test_deploy_dummy_app(self): - """Check that user can deploy application in Murano environment - Target component: Murano - - Scenario: - 1. Prepare test app. - 2. Upload test app. - 3. Send request to create environment. - 4. Send request to create session for environment. - 5. Send request to create test service. - 6. Send request to deploy session. - 7. Checking environment status. - 8. Checking deployment status. - 9. Send request to delete environment. - 10. Send request to delete package. - - Duration: 1200 s. - Deployment tags: Murano | murano_plugin, murano_without_glare - Available since release: 2014.2-6.1 - """ - - vms_count = self.get_info_about_available_resources( - self.min_required_ram_mb, 40, 2) - if vms_count < 1: - msg = ('This test requires more hardware resources of your ' - 'OpenStack cluster: your cloud should allow to create ' - 'at least 1 VM with {0} MB of RAM, {1} HDD and {2} vCPUs. ' - 'You need to remove some resources or add compute nodes ' - 'to have an ability to run this OSTF test.' - .format(self.min_required_ram_mb, 40, 2)) - LOG.debug(msg) - self.skipTest(msg) - - if self.package_exists(self.dummy_fqdn): - package = self.get_package_by_fqdn(self.dummy_fqdn) - self.delete_package(package["id"]) - - fail_msg = ("Package preparation failed. Please refer to " - "OSTF logs for more information") - zip_path = self.verify(10, self.zip_dir, 1, fail_msg, - 'prepare package', - os.path.dirname(__file__), self.dummy_fqdn) - - fail_msg = ("Package uploading failed. " - "Please refer to Openstack and OSTF logs") - self.package = self.verify(10, self.upload_package, 2, fail_msg, - 'uploading package', 'SimpleApp', - {"categories": ["Web"], "tags": ["tag"]}, - zip_path) - - fail_msg = "Can't create environment. Murano API is not available. " - self.environment = self.verify(15, self.create_environment, - 3, fail_msg, 'creating environment', - self.env_name) - - fail_msg = "User can't create session for environment. " - session = self.verify(5, self.create_session, - 4, fail_msg, "session creating", - self.environment.id) - - post_body = { - "instance": { - "flavor": self.flavor_name, - "image": "TestVM", - "assignFloatingIp": True, - "?": { - "type": "io.murano.resources.LinuxMuranoInstance", - "id": str(uuid.uuid4()) - }, - "name": rand_name("testMurano") - }, - "name": rand_name("teMurano"), - "?": { - "_{id}".format(id=uuid.uuid4().hex): { - "name": "SimpleApp" - }, - "type": self.dummy_fqdn, - "id": str(uuid.uuid4()) - } - } - - fail_msg = "User can't create service. " - self.verify(5, self.create_service, - 5, fail_msg, "service creating", - self.environment.id, session.id, post_body) - - fail_msg = "User can't deploy session. " - self.verify(5, self.deploy_session, - 6, fail_msg, - "sending session on deployment", - self.environment.id, session.id) - - fail_msg = "Deployment was not completed correctly. " - self.verify(860, self.deploy_check, - 7, fail_msg, 'deployment is going', - self.environment) - - self.verify(5, self.deployments_status_check, 8, fail_msg, - 'Check deployments status', - self.environment.id) - - fail_msg = "Can't delete environment. " - self.verify(180, self.environment_delete_check, - 9, fail_msg, "deleting environment", - self.environment.id) - - fail_msg = "Can't delete package" - self.verify(5, self.delete_package, 10, fail_msg, "deleting_package", - self.package.id) - - def test_deploy_dummy_app_with_glare(self): - """Check application deployment in Murano environment with GLARE - Target component: Murano - - Scenario: - 1. Prepare test app. - 2. Upload test app. - 3. Send request to create environment. - 4. Send request to create session for environment. - 5. Send request to create test service. - 6. Send request to deploy session. - 7. Checking environment status. - 8. Checking deployment status. - 9. Send request to delete environment. - 10. Send request to delete package. - - Duration: 1200 s. - Deployment tags: Murano | murano_plugin, murano_use_glare - Available since release: 2014.2-6.1 - """ - artifacts = True - vms_count = self.get_info_about_available_resources( - self.min_required_ram_mb, 40, 2) - if vms_count < 1: - msg = ('This test requires more hardware resources of your ' - 'OpenStack cluster: your cloud should allow to create ' - 'at least 1 VM with {0} MB of RAM, {1} HDD and {2} vCPUs. ' - 'You need to remove some resources or add compute nodes ' - 'to have an ability to run this OSTF test.' - .format(self.min_required_ram_mb, 40, 2)) - LOG.debug(msg) - self.skipTest(msg) - - if self.package_exists(artifacts, self.dummy_fqdn): - package = self.get_package_by_fqdn(self.dummy_fqdn, artifacts) - self.delete_package(package.to_dict()["id"], artifacts) - - fail_msg = ("Package preparation failed. Please refer to " - "OSTF logs for more information") - zip_path = self.verify(10, self.zip_dir, 1, fail_msg, - 'prepare package', - os.path.dirname(__file__), self.dummy_fqdn) - - fail_msg = ("Package uploading failed. " - "Please refer to Openstack and OSTF logs") - self.package = self.verify(10, self.upload_package, 2, fail_msg, - 'uploading package', 'SimpleApp', - {"categories": ["Web"], "tags": ["tag"]}, - zip_path, artifacts) - - fail_msg = "Can't create environment. Murano API is not available. " - self.environment = self.verify(15, self.create_environment, - 3, fail_msg, 'creating environment', - self.env_name) - - fail_msg = "User can't create session for environment. " - session = self.verify(5, self.create_session, - 4, fail_msg, "session creating", - self.environment.id) - - post_body = { - "instance": { - "flavor": self.flavor_name, - "image": "TestVM", - "assignFloatingIp": True, - "?": { - "type": "io.murano.resources.LinuxMuranoInstance", - "id": str(uuid.uuid4()) - }, - "name": rand_name("testMurano") - }, - "name": rand_name("teMurano"), - "?": { - "_{id}".format(id=uuid.uuid4().hex): { - "name": "SimpleApp" - }, - "type": self.dummy_fqdn, - "id": str(uuid.uuid4()) - } - } - - fail_msg = "User can't create service. " - self.verify(5, self.create_service, - 5, fail_msg, "service creating", - self.environment.id, session.id, post_body) - - fail_msg = "User can't deploy session. " - self.verify(5, self.deploy_session, - 6, fail_msg, - "sending session on deployment", - self.environment.id, session.id) - - fail_msg = "Deployment was not completed correctly. " - self.verify(860, self.deploy_check, - 7, fail_msg, 'deployment is going', - self.environment) - - self.verify(5, self.deployments_status_check, 8, fail_msg, - 'Check deployments status', - self.environment.id) - - fail_msg = "Can't delete environment. " - self.verify(180, self.environment_delete_check, - 9, fail_msg, "deleting environment", - self.environment.id) - - fail_msg = "Can't delete package" - self.verify(5, self.delete_package, 10, fail_msg, "deleting_package", - self.package.id, artifacts) - - def test_deploy_apache_service(self): - """Check that user can deploy Apache service in Murano environment - Target component: Murano - - Scenario: - 1. Send request to create environment. - 2. Send request to create session for environment. - 3. Send request to create Linux-based service Apache. - 4. Request to deploy session. - 5. Checking environment status. - 6. Checking deployments status - 7. Checking ports - 8. Send request to delete environment. - - Duration: 2140 s. - Deployment tags: Murano | murano_plugin, murano_without_artifacts - Available since release: 2014.2-6.0 - """ - - vms_count = self.get_info_about_available_resources( - self.min_required_ram_mb, 40, 2) - if vms_count < 1: - msg = ('This test requires more hardware resources of your ' - 'OpenStack cluster: your cloud should allow to create ' - 'at least 1 VM with {0} MB of RAM, {1} HDD and {2} vCPUs. ' - 'You need to remove some resources or add compute nodes ' - 'to have an ability to run this OSTF test.' - .format(self.min_required_ram_mb, 40, 2)) - LOG.debug(msg) - self.skipTest(msg) - - if not self.image: - msg = ('Murano image was not properly registered or was not ' - 'uploaded at all. Please refer to the Fuel ' - 'documentation ({0}) to find out how to upload and/or ' - 'register image for Murano.'.format(self.doc_link)) - LOG.debug(msg) - self.skipTest(msg) - - if not self.package_exists('io.murano.apps.apache.ApacheHttpServer'): - self.skipTest("This test requires Apache HTTP Server application." - "Please add this application to Murano " - "and run this test again.") - - fail_msg = "Can't create environment. Murano API is not available. " - self.environment = self.verify(15, self.create_environment, - 1, fail_msg, 'creating environment', - self.env_name) - - fail_msg = "User can't create session for environment. " - session = self.verify(5, self.create_session, - 2, fail_msg, "session creating", - self.environment.id) - - post_body = { - "instance": { - "flavor": self.flavor_name, - "image": self.image.name, - "assignFloatingIp": True, - "?": { - "type": "io.murano.resources.LinuxMuranoInstance", - "id": str(uuid.uuid4()) - }, - "name": rand_name("testMurano") - }, - "name": rand_name("teMurano"), - "?": { - "_{id}".format(id=uuid.uuid4().hex): { - "name": "Apache" - }, - "type": "io.murano.apps.apache.ApacheHttpServer", - "id": str(uuid.uuid4()) - } - } - - fail_msg = "User can't create service. " - apache = self.verify(5, self.create_service, - 3, fail_msg, "service creating", - self.environment.id, session.id, post_body) - - fail_msg = "User can't deploy session. " - self.verify(5, self.deploy_session, - 4, fail_msg, - "sending session on deployment", - self.environment.id, session.id) - - fail_msg = "Deployment was not completed correctly. " - self.environment = self.verify(1800, self.deploy_check, - 5, fail_msg, 'deployment is going', - self.environment) - - self.verify(5, self.deployments_status_check, - 6, fail_msg, - 'Check deployments status', - self.environment.id) - - self.verify(300, self.port_status_check, - 7, fail_msg, - 'Check that needed ports are opened', - self.environment, [[apache['instance']['name'], 22, 80]]) - - fail_msg = "Can't delete environment. " - self.verify(5, self.delete_environment, - 8, fail_msg, "deleting environment", - self.environment.id) - - def test_deploy_wordpress_app(self): - """Check that user can deploy WordPress app in Murano environment - Target component: Murano - - Scenario: - 1. Send request to create environment. - 2. Send request to create session for environment. - 3. Send request to create MySQL. - 4. Send request to create Linux-based service Apache. - 5. Send request to create WordPress. - 6. Request to deploy session. - 7. Checking environment status. - 8. Checking deployments status. - 9. Checking ports availability. - 10. Checking WordPress path. - 11. Send request to delete environment. - - Duration: 2140 s. - Deployment tags: Murano | murano_plugin, murano_without_artifacts - Available since release: 2014.2-6.1 - """ - - vms_count = self.get_info_about_available_resources( - self.min_required_ram_mb, 40, 2) - if vms_count < 2: - msg = ('This test requires more hardware resources of your ' - 'OpenStack cluster: your cloud should allow to create ' - 'at least 2 VMs with {0} MB of RAM, {1} HDD and {2} vCPUs.' - ' You need to remove some resources or add compute nodes ' - 'to have an ability to run this OSTF test.' - .format(self.min_required_ram_mb, 40, 2)) - LOG.debug(msg) - self.skipTest(msg) - - if not self.image: - msg = ('Murano image was not properly registered or was not ' - 'uploaded at all. Please refer to the Fuel ' - 'documentation ({0}) to find out how to upload and/or ' - 'register image for Murano.'.format(self.doc_link)) - LOG.debug(msg) - self.skipTest(msg) - - if not self.package_exists('io.murano.apps.apache.ApacheHttpServer', - 'io.murano.databases.MySql', - 'io.murano.apps.WordPress'): - self.skipTest("This test requires Apache HTTP Server, " - "MySQL database and WordPress applications." - "Please add this applications to Murano and " - "run this test again.") - - fail_msg = "Can't create environment. Murano API is not available. " - self.environment = self.verify(15, self.create_environment, - 1, fail_msg, 'creating environment', - self.env_name) - - fail_msg = "User can't create session for environment. " - session = self.verify(5, self.create_session, - 2, fail_msg, "session creating", - self.environment.id) - - post_body = { - "instance": { - "flavor": self.flavor_name, - "image": self.image.name, - "assignFloatingIp": True, - "?": { - "type": "io.murano.resources.LinuxMuranoInstance", - "id": str(uuid.uuid4()) - }, - "name": rand_name("testMurano") - }, - "name": rand_name("teMurano"), - "database": rand_name("ostf"), - "username": rand_name("ostf"), - "password": rand_name("Ost1@"), - "?": { - "_{id}".format(id=uuid.uuid4().hex): { - "name": "MySQL" - }, - "type": "io.murano.databases.MySql", - "id": str(uuid.uuid4()) - } - } - - fail_msg = "User can't create service MySQL. " - self.mysql = self.verify(5, self.create_service, - 3, fail_msg, "service creating", - self.environment.id, session.id, - post_body) - - post_body = { - "instance": { - "flavor": self.flavor_name, - "image": self.image.name, - "assignFloatingIp": True, - "?": { - "type": "io.murano.resources.LinuxMuranoInstance", - "id": str(uuid.uuid4()) - }, - "name": rand_name("testMurano") - }, - "name": rand_name("teMurano"), - "enablePHP": True, - "?": { - "_{id}".format(id=uuid.uuid4().hex): { - "name": "Apache" - }, - "type": "io.murano.apps.apache.ApacheHttpServer", - "id": str(uuid.uuid4()) - } - } - - fail_msg = "User can't create service Apache. " - self.apache = self.verify(5, self.create_service, - 4, fail_msg, "service creating", - self.environment.id, session.id, - post_body) - - post_body = { - "name": rand_name("teMurano"), - "server": self.apache, - "database": self.mysql, - "dbName": "wordpress", - "dbUser": "wp_user", - "dbPassword": "U0yleh@c", - "?": { - "_{id}".format(id=uuid.uuid4().hex): { - "name": "WordPress" - }, - "type": "io.murano.apps.WordPress", - "id": str(uuid.uuid4()) - } - } - - fail_msg = "User can't create service WordPress. " - self.verify(5, self.create_service, - 5, fail_msg, "service creating", - self.environment.id, session.id, post_body) - - fail_msg = "User can't deploy session. " - self.verify(5, self.deploy_session, - 6, fail_msg, - "sending session on deployment", - self.environment.id, session.id) - - fail_msg = "Deployment was not completed correctly. " - self.environment = self.verify(2400, self.deploy_check, - 7, fail_msg, 'deployment is going', - self.environment) - - self.verify(5, self.deployments_status_check, - 8, fail_msg, - 'Check deployments status', - self.environment.id) - - self.verify(300, self.port_status_check, - 9, fail_msg, - 'Check that needed ports are opened', - self.environment, - [[self.apache['instance']['name'], 22, 80], - [self.mysql['instance']['name'], 22, 3306]]) - - fail_msg = "Path to WordPress unavailable" - self.verify(30, self.check_path, 10, fail_msg, - 'checking path availability', - self.environment, "wordpress", - self.apache['instance']['name']) - - fail_msg = "Can't delete environment. " - self.verify(10, self.delete_environment, - 11, fail_msg, "deleting environment", - self.environment.id) diff --git a/fuel_health/tests/tests_platform/test_sahara.py b/fuel_health/tests/tests_platform/test_sahara.py deleted file mode 100644 index 9ae5fa17..00000000 --- a/fuel_health/tests/tests_platform/test_sahara.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from fuel_health.common.utils.data_utils import rand_name -from fuel_health import saharamanager - -LOG = logging.getLogger(__name__) - - -class SaharaClusterTest(saharamanager.SaharaTestsManager): - _plugin_name = 'An unknown plugin name' - _hadoop_version = 'An unknown Hadoop version' - _worker_processes = 'An unknown list of worker processes' - _master_processes = 'An unknown list of master processes' - - def setUp(self): - super(SaharaClusterTest, self).setUp() - - doc_link = 'https://www.fuel-infra.org/#fueldocs' - - max_free_ram_mb = ( - self.get_max_free_compute_node_ram(self.min_required_ram_mb)) - if max_free_ram_mb < self.min_required_ram_mb: - msg = ('This test requires more hardware resources of your ' - 'OpenStack cluster: at least one of the compute nodes ' - 'must have >= {0} MB of free RAM, but you have only ' - '{1} MB on most appropriate compute node.' - .format(self.min_required_ram_mb, max_free_ram_mb)) - LOG.debug(msg) - self.skipTest(msg) - - self.image_id = self.find_and_check_image(self._plugin_name, - self._hadoop_version) - if not self.image_id: - msg = ('Sahara image was not correctly registered or it was not ' - 'uploaded at all. Please refer to the Fuel ' - 'documentation ({0}) to find out how to upload and/or ' - 'register image for Sahara.'.format(doc_link)) - LOG.debug(msg) - self.skipTest(msg) - - flavor_id = self.create_flavor() - private_net_id, floating_ip_pool = self.create_network_resources() - self.cl_template = { - 'name': rand_name('sahara-cluster-template-'), - 'plugin': self._plugin_name, - 'hadoop_version': self._hadoop_version, - 'node_groups': [ - { - 'name': 'master', - 'flavor_id': flavor_id, - 'node_processes': self._master_processes, - 'floating_ip_pool': floating_ip_pool, - 'auto_security_group': True, - 'count': 1 - }, - { - 'name': 'worker', - 'flavor_id': flavor_id, - 'node_processes': self._worker_processes, - 'floating_ip_pool': floating_ip_pool, - 'auto_security_group': True, - 'count': 1 - } - ], - 'net_id': private_net_id, - 'cluster_configs': {'HDFS': {'dfs.replication': 1}}, - 'description': 'Test cluster template' - } - self.cluster = { - 'name': rand_name('sahara-cluster-'), - 'plugin': self._plugin_name, - 'hadoop_version': self._hadoop_version, - 'default_image_id': self.image_id, - 'description': 'Test cluster' - } - - -class VanillaTwoClusterTest(SaharaClusterTest): - def setUp(self): - mapping_versions_of_plugin = { - "6.1": "2.4.1", - "7.0": "2.6.0", - "8.0": "2.7.1", - "9.0": "2.7.1", - "9.1": "2.7.1", - "10.0": "2.7.1" - } - self._plugin_name = 'vanilla' - self._hadoop_version = mapping_versions_of_plugin.get( - self.config.fuel.fuel_version, "2.7.1") - self._worker_processes = ['nodemanager', 'datanode'] - self._master_processes = ['resourcemanager', 'namenode', 'oozie', - 'historyserver', 'secondarynamenode'] - super(VanillaTwoClusterTest, self).setUp() - - self.processes_map = { - 'resourcemanager': [8032, 8088], - 'namenode': [9000, 50070], - 'nodemanager': [8042], - 'datanode': [50010, 50020, 50075], - 'secondarynamenode': [50090], - 'oozie': [11000], - 'historyserver': [19888] - } - - def test_vanilla_two_cluster(self): - """Sahara test for launching a simple Vanilla2 cluster - Target component: Sahara - - Scenario: - 1. Create a cluster template - 2. Create a cluster - 3. Wait for the cluster to build and get to "Active" status - 4. Check deployment of Hadoop services on the cluster - 5. Check ability to log into cluster nodes via SSH - 6. Delete the cluster - 7. Delete the cluster template - - Duration: 1200 s. - Available since release: 2014.2-6.1 - Deployment tags: Sahara - """ - - fail_msg = 'Failed to create cluster template.' - msg = 'creating cluster template' - cl_template_id = self.verify(30, self.create_cluster_template, - 1, fail_msg, msg, **self.cl_template) - - self.cluster['cluster_template_id'] = cl_template_id - fail_msg = 'Failed to create cluster.' - msg = 'creating cluster' - cluster_id = self.verify(30, self.create_cluster, 2, - fail_msg, msg, **self.cluster) - - fail_msg = 'Failed while polling cluster status.' - msg = 'polling cluster status' - self.verify(self.cluster_timeout, - self.poll_cluster_status, 3, fail_msg, msg, cluster_id) - - fail_msg = 'Failed to check deployment of Hadoop services on cluster.' - msg = 'checking deployment of Hadoop services on cluster' - self.verify(self.process_timeout, self.check_hadoop_services, - 4, fail_msg, msg, cluster_id, self.processes_map) - - fail_msg = 'Failed to log into cluster nodes via SSH.' - msg = 'logging into cluster nodes via SSH' - self.verify( - 30, self.check_node_access_via_ssh, 5, fail_msg, msg, cluster_id) - - fail_msg = 'Failed to delete cluster.' - msg = 'deleting cluster' - self.verify(self.delete_timeout, self.delete_resource, 6, - fail_msg, msg, self.sahara_client.clusters, cluster_id) - - fail_msg = 'Failed to delete cluster template.' - msg = 'deleting cluster template' - self.verify(30, self.delete_resource, 7, fail_msg, msg, - self.sahara_client.cluster_templates, cl_template_id) diff --git a/fuel_plugin/__init__.py b/fuel_plugin/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/consts.py b/fuel_plugin/consts.py deleted file mode 100644 index 50cb200b..00000000 --- a/fuel_plugin/consts.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2016 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import collections - - -def Enum(*values, **kwargs): - names = kwargs.get('names') - if names: - return collections.namedtuple('Enum', names)(*values) - return collections.namedtuple('Enum', values)(*values) - -TESTRUN_STATUSES = Enum( - 'stopped', - 'restarted', - 'finished', - 'running' -) - -TEST_STATUSES = Enum( - 'stopped', - 'restarted', - 'finished', - 'running', - 'error', - 'skipped', - 'success', - 'failure', - 'wait_running', - 'disabled' -) diff --git a/fuel_plugin/ostf_adapter/__init__.py b/fuel_plugin/ostf_adapter/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/ostf_adapter/config.py b/fuel_plugin/ostf_adapter/config.py deleted file mode 100644 index 28762b38..00000000 --- a/fuel_plugin/ostf_adapter/config.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os - -from fuel_plugin.ostf_adapter import mixins - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - - -LOG = logging.getLogger(__name__) - -adapter_group = cfg.OptGroup(name='adapter', - title='Adapter Options') - -adapter_opts = [ - cfg.StrOpt('server_host', - default='127.0.0.1', - help="adapter host"), - cfg.IntOpt('server_port', - default=8777, - help="Port number"), - cfg.StrOpt('dbpath', - default='postgresql+psycopg2://ostf:ostf@localhost/ostf', - help=""), - cfg.StrOpt('lock_dir', - default='/var/lock', - help=""), - cfg.StrOpt('nailgun_host', - default='127.0.0.1', - help=""), - cfg.StrOpt('nailgun_port', - default='8000', - help=""), - cfg.StrOpt('log_file', - default='/var/log/ostf.log', - help=""), - cfg.BoolOpt('auth_enable', - default=False, - help="Set True to enable auth."), -] - -cli_opts = [ - cfg.BoolOpt('debug', default=False), - cfg.BoolOpt('clear-db', default=False), - cfg.BoolOpt('after-initialization-environment-hook', default=False), - cfg.StrOpt('debug_tests') -] - - -cfg.CONF.register_cli_opts(cli_opts) -cfg.CONF.register_opts(adapter_opts, group='adapter') - - -DEFAULT_CONFIG_DIR = os.path.join(os.path.abspath( - os.path.dirname(__file__)), '/etc') - -DEFAULT_CONFIG_FILE = "ostf.conf" - - -def init_config(args=[]): - - config_files = [] - - failsafe_path = "/etc/ostf/" + DEFAULT_CONFIG_FILE - - # Environment variables override defaults... - custom_config = os.environ.get('CUSTOM_OSTF_CONFIG') - if custom_config: - path = custom_config - else: - conf_dir = os.environ.get('OSTF_CONFIG_DIR', - DEFAULT_CONFIG_DIR) - conf_file = os.environ.get('OSTF_CONFIG', DEFAULT_CONFIG_FILE) - - path = os.path.join(conf_dir, conf_file) - - if not (os.path.isfile(path) - or 'OSTF_CONFIG_DIR' in os.environ - or 'OSTF_CONFIG' in os.environ): - path = failsafe_path - - if not os.path.exists(path): - msg = "Config file {0} not found".format(path) - LOG.warning(msg) - # No need to fail here! If config doesnot exist defaults are used - else: - config_files.append(path) - - cfg.CONF(args, project='fuel_ostf', default_config_files=config_files, - version=mixins.get_version_string()) diff --git a/fuel_plugin/ostf_adapter/logger.py b/fuel_plugin/ostf_adapter/logger.py deleted file mode 100644 index aa439937..00000000 --- a/fuel_plugin/ostf_adapter/logger.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import logging.handlers -import os - - -_LOG_TIME_FORMAT = "%Y-%m-%d %H:%M:%S" - - -class ResultsLogger(object): - """Logger used to log results of OSTF tests. Resutls are stored in - /var/log/ostf/ dir. Each cluster has one log file per each set of tests. - """ - - def __init__(self, testset, cluster_id): - self.testset = testset - self.cluster_id = cluster_id - self.filename = self._make_filename() - self._logger = self._init_file_logger() - - def _init_file_logger(self): - logger = logging.getLogger('ostf-results-log-{0}-{1}'.format( - self.cluster_id, self.testset)) - - if not logger.handlers: - log_dir = '/var/log/ostf' - log_file = os.path.join(log_dir, self.filename) - - file_handler = logging.handlers.WatchedFileHandler(log_file) - file_handler.setLevel(logging.DEBUG) - - formatter = logging.Formatter( - '%(asctime)s %(message)s', - _LOG_TIME_FORMAT) - file_handler.setFormatter(formatter) - - logger.addHandler(file_handler) - - logger.propagate = 0 - - return logger - - def _make_filename(self): - return 'cluster_{cluster_id}_{testset}.log'.format( - testset=self.testset, cluster_id=self.cluster_id) - - def log_results(self, test_id, test_name, status, message, traceback): - status = status.upper() - msg = "{status} {test_name} ({test_id}) {message} {traceback}".format( - test_name=test_name, test_id=test_id, status=status, - message=message, traceback=traceback) - self._logger.info(msg) - - -def setup(log_file=None): - formatter = logging.Formatter( - '%(asctime)s %(levelname)s (%(module)s) %(message)s', - _LOG_TIME_FORMAT) - log = logging.getLogger(None) - stream_handler = logging.StreamHandler() - stream_handler.setLevel(logging.INFO) - stream_handler.setFormatter(formatter) - log.addHandler(stream_handler) - - if log_file: - log_file = os.path.abspath(log_file) - file_handler = logging.handlers.WatchedFileHandler(log_file) - file_handler.setLevel(logging.DEBUG) - mode = int('0644', 8) - os.chmod(log_file, mode) - file_handler.setFormatter(formatter) - log.addHandler(file_handler) - - log.setLevel(logging.INFO) diff --git a/fuel_plugin/ostf_adapter/mixins.py b/fuel_plugin/ostf_adapter/mixins.py deleted file mode 100644 index 7da8e504..00000000 --- a/fuel_plugin/ostf_adapter/mixins.py +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - -try: - from oslo.serialization import jsonutils -except ImportError: - from oslo_serialization import jsonutils - -import requests -from sqlalchemy.orm import joinedload - -from fuel_plugin.ostf_adapter.nose_plugin import nose_utils -from fuel_plugin.ostf_adapter.storage import models - -LOG = logging.getLogger(__name__) - -TEST_REPOSITORY = [] -# TODO(ikutukov): remove hardcoded Nailgun API urls here and below -NAILGUN_VERSION_API_URL = 'http://{0}:{1}/api/v1/version' - - -def delete_db_data(session): - LOG.info('Starting clean db action.') - session.query(models.ClusterTestingPattern).delete() - session.query(models.ClusterState).delete() - session.query(models.TestSet).delete() - - session.commit() - - -def cache_test_repository(session): - test_repository = session.query(models.TestSet)\ - .options(joinedload('tests'))\ - .all() - - crucial_tests_attrs = ['name', 'deployment_tags', - 'available_since_release'] - for test_set in test_repository: - data_elem = dict() - - data_elem['test_set_id'] = test_set.id - data_elem['deployment_tags'] = test_set.deployment_tags - data_elem['available_since_release'] = test_set.available_since_release - data_elem['tests'] = [] - - for test in test_set.tests: - test_dict = dict([(attr_name, getattr(test, attr_name)) - for attr_name in crucial_tests_attrs]) - data_elem['tests'].append(test_dict) - - TEST_REPOSITORY.append(data_elem) - - -def discovery_check(session, cluster_id, token=None): - cluster_attrs = _get_cluster_attrs(cluster_id, token=token) - - cluster_data = { - 'id': cluster_id, - 'deployment_tags': cluster_attrs['deployment_tags'], - 'release_version': cluster_attrs['release_version'], - } - - cluster_state = session.query(models.ClusterState)\ - .filter_by(id=cluster_data['id'])\ - .first() - - if not cluster_state: - session.add( - models.ClusterState( - id=cluster_data['id'], - deployment_tags=list(cluster_data['deployment_tags']) - ) - ) - - # flush data to db, because _add_cluster_testing_pattern - # is dependent on it - session.flush() - - _add_cluster_testing_pattern(session, cluster_data) - - return - - old_deployment_tags = cluster_state.deployment_tags - if set(old_deployment_tags) != cluster_data['deployment_tags']: - session.query(models.ClusterTestingPattern)\ - .filter_by(cluster_id=cluster_state.id)\ - .delete() - - _add_cluster_testing_pattern(session, cluster_data) - - cluster_state.deployment_tags = \ - list(cluster_data['deployment_tags']) - - session.merge(cluster_state) - - -def get_version_string(token=None): - requests_session = requests.Session() - requests_session.trust_env = False - request_url = NAILGUN_VERSION_API_URL.format(cfg.CONF.adapter.nailgun_host, - cfg.CONF.adapter.nailgun_port) - try: - response = requests_session.get(request_url).json() - return jsonutils.dumps(response) - except (ValueError, IOError, requests.exceptions.HTTPError): - return "Can't obtain version via Nailgun API" - - -def _get_cluster_attrs(cluster_id, token=None): - cluster_attrs = {} - - REQ_SES = requests.Session() - REQ_SES.trust_env = False - REQ_SES.verify = False - - if token is not None: - REQ_SES.headers.update({'X-Auth-Token': token}) - - URL = 'http://{0}:{1}/{2}' - NAILGUN_API_URL = 'api/clusters/{0}' - - cluster_url = NAILGUN_API_URL.format(cluster_id) - request_url = URL.format(cfg.CONF.adapter.nailgun_host, - cfg.CONF.adapter.nailgun_port, - cluster_url) - - response = REQ_SES.get(request_url).json() - release_id = response.get('release_id', 'failed to get id') - - release_url = URL.format( - cfg.CONF.adapter.nailgun_host, cfg.CONF.adapter.nailgun_port, - 'api/releases/{0}'.format(release_id)) - - nodes_url = URL.format( - cfg.CONF.adapter.nailgun_host, cfg.CONF.adapter.nailgun_port, - 'api/nodes?cluster_id={0}'.format(cluster_id)) - nodes_response = REQ_SES.get(nodes_url).json() - if 'objects' in nodes_response: - nodes_response = nodes_response['objects'] - enable_without_ceph = filter(lambda node: 'ceph-osd' in node['roles'], - nodes_response) - - sriov_compute_ids = [] - dpdk_compute_ids = [] # Check env has computes with DPDK - compute_ids = [node['id'] for node in nodes_response - if "compute" in node['roles']] - for compute_id in compute_ids: - ifaces_url = URL.format( - cfg.CONF.adapter.nailgun_host, cfg.CONF.adapter.nailgun_port, - 'api/nodes/{id}/interfaces'.format(id=compute_id)) - ifaces_resp = REQ_SES.get(ifaces_url).json() - for iface in ifaces_resp: - if 'interface_properties' in iface: - if ('sriov' in iface['interface_properties'] and - iface['interface_properties']['sriov']['enabled']): - sriov_compute_ids.append(compute_id) - if 'dpdk' in iface['interface_properties']: - if 'enabled' in iface['interface_properties']['dpdk']: - if iface['interface_properties']['dpdk']['enabled']: - dpdk_compute_ids.append(compute_id) - else: - if ('sriov' in iface['attributes'] and - iface['attributes']['sriov']['enabled']['value']): - sriov_compute_ids.append(compute_id) - if 'dpdk' in iface['attributes']: - if 'enabled' in iface['attributes']['dpdk']: - if iface['attributes']['dpdk']['enabled']['value']: - dpdk_compute_ids.append(compute_id) - - deployment_tags = set() - - if sriov_compute_ids: - deployment_tags.add('sriov') - - if dpdk_compute_ids: - deployment_tags.add('computes_with_dpdk') - if not dpdk_compute_ids or set(compute_ids) - set(dpdk_compute_ids): - deployment_tags.add('computes_without_dpdk') - - if not enable_without_ceph: - deployment_tags.add('enable_without_ceph') - - fuel_version = response.get('fuel_version') - if fuel_version: - deployment_tags.add(fuel_version) - - release_data = REQ_SES.get(release_url).json() - - if 'version' in release_data: - cluster_attrs['release_version'] = release_data['version'] - - # info about deployment type and operating system - mode = 'ha' if 'ha' in response['mode'].lower() else response['mode'] - deployment_tags.add(mode) - deployment_tags.add(release_data.get( - 'operating_system', 'failed to get os')) - - # networks manager - network_type = response.get('net_provider', 'nova_network') - deployment_tags.add(network_type) - - # info about murano/sahara clients installation - request_url += '/' + 'attributes' - response = REQ_SES.get(request_url).json() - - public_assignment = response['editable'].get('public_network_assignment') - if not public_assignment or \ - public_assignment['assign_to_all_nodes']['value']: - deployment_tags.add('public_on_all_nodes') - - additional_components = \ - response['editable'].get('additional_components', dict()) - - use_vcenter = response['editable']['common'].get('use_vcenter', None) - libvrt_data = response['editable']['common'].get('libvirt_type', None) - - if use_vcenter and use_vcenter.get('value'): - deployment_tags.add('use_vcenter') - - additional_depl_tags = set() - - comp_names = ['murano', 'sahara', 'heat', 'ceilometer', 'ironic'] - - def processor(comp): - if comp in comp_names: - if additional_components.get(comp)\ - and additional_components.get(comp)['value']\ - is True: - additional_depl_tags.add(comp) - - for comp in comp_names: - processor(comp) - - # TODO(freerunner): Rework murano part after removal murano from the box - murano_settings = response['editable'].get('murano_settings', {}) - # murano_glance_artifacts_plugin was moved from additional components - # in mitaka, thus for old environments it should taken from them - murano_glance_artifacts_plugin = murano_settings.get( - 'murano_glance_artifacts_plugin', - additional_components.get('murano_glance_artifacts_plugin') - ) - # NOTE(freerunner): Murano settings appears only if murano enabled - murano_artifacts = None - if murano_glance_artifacts_plugin: - murano_artifacts = murano_glance_artifacts_plugin['value'] - - detach_murano = response['editable'].get('detach-murano', None) - murano_plugin_enabled = None - if detach_murano: - murano_plugin_enabled = detach_murano['metadata'].get('enabled', None) - if murano_plugin_enabled: - additional_depl_tags.add('murano_plugin') - - # TODO(freerunner): Rework GLARE discover mechanism after - # TODO(freerunner): removal murano from the box - if murano_artifacts: - additional_depl_tags.add('murano_use_glare') - # NOTE(freerunner): Murano plugin will always support only one version - elif detach_murano and murano_plugin_enabled and ( - detach_murano['metadata']['versions'][0] - ['murano_glance_artifacts'].get('value', None)): - additional_depl_tags.add('murano_use_glare') - # NOTE(freerunner): Set this tag only if murano is present - elif murano_plugin_enabled or murano_settings: - additional_depl_tags.add('murano_without_glare') - - storage_components = response['editable'].get('storage', dict()) - - storage_comp = ['volumes_ceph', 'images_ceph', 'ephemeral_ceph', - 'objects_ceph', 'osd_pool_size', 'volumes_lvm', - 'volumes_vmdk', 'images_vcenter'] - - storage_depl_tags = set() - - def storage_processor(scomp): - if scomp in storage_comp: - if storage_components.get(scomp) \ - and storage_components.get(scomp)['value'] \ - is True: - storage_depl_tags.add(scomp) - for scomp in storage_comp: - storage_processor(scomp) - - if additional_depl_tags: - deployment_tags.add('additional_components') - deployment_tags.update(additional_depl_tags) - if storage_depl_tags: - deployment_tags.add('storage') - deployment_tags.update(storage_depl_tags) - if libvrt_data and libvrt_data.get('value'): - deployment_tags.add(libvrt_data['value']) - - cluster_attrs['deployment_tags'] = set( - [tag.lower() for tag in deployment_tags] - ) - - return cluster_attrs - - -def _add_cluster_testing_pattern(session, cluster_data): - to_database = [] - - global TEST_REPOSITORY - - # populate cache if it's empty - if not TEST_REPOSITORY: - cache_test_repository(session) - - for test_set in TEST_REPOSITORY: - if nose_utils.is_test_available(cluster_data, test_set): - - testing_pattern = {} - testing_pattern['cluster_id'] = cluster_data['id'] - testing_pattern['test_set_id'] = test_set['test_set_id'] - testing_pattern['tests'] = [] - - for test in test_set['tests']: - if nose_utils.is_test_available(cluster_data, test): - testing_pattern['tests'].append(test['name']) - - to_database.append( - models.ClusterTestingPattern(**testing_pattern) - ) - - session.add_all(to_database) diff --git a/fuel_plugin/ostf_adapter/nailgun_hooks.py b/fuel_plugin/ostf_adapter/nailgun_hooks.py deleted file mode 100644 index c2714214..00000000 --- a/fuel_plugin/ostf_adapter/nailgun_hooks.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from distutils import version - -from sqlalchemy import create_engine -from sqlalchemy.engine import reflection -from sqlalchemy import inspect -from sqlalchemy import MetaData -from sqlalchemy.pool import NullPool -from sqlalchemy import schema - -from fuel_plugin.ostf_adapter.storage import alembic_cli - - -def _get_enums(conn): - """Return names for db types. - Please, be awared that for sqlalchemy of version >= 1.0.0 - get_enums() method of inspection object is available for the - purpose. - - Also this approach will work only for postgresql dialect. - """ - from sqlalchemy import __version__ - if version.StrictVersion(__version__) >= version.StrictVersion("1.0.0"): - return [e['name'] for e in inspect(conn).get_enums()] - else: - return conn.dialect._load_enums(conn).keys() - - -def clear_db(db_path): - db_engine = create_engine(db_path, poolclass=NullPool) - with db_engine.begin() as conn: - meta = MetaData() - meta.reflect(bind=db_engine) - inspector = reflection.Inspector.from_engine(db_engine) - - tbs = [] - all_fks = [] - - for table_name in inspector.get_table_names(): - fks = [] - for fk in inspector.get_foreign_keys(table_name): - if not fk['name']: - continue - fks.append( - schema.ForeignKeyConstraint(tuple(), - tuple(), - name=fk['name']) - ) - t = schema.Table( - table_name, - meta, - *fks, - extend_existing=True - ) - tbs.append(t) - all_fks.extend(fks) - - for fkc in all_fks: - conn.execute(schema.DropConstraint(fkc)) - - for table in tbs: - conn.execute(schema.DropTable(table)) - - # such construction is available only for postgresql - if db_engine.name == "postgresql": - for en in _get_enums(conn): - conn.execute("DROP TYPE {0}".format(en)) - - -def after_initialization_environment_hook(): - """Expect 0 on success by nailgun - Exception is good enough signal that something goes wrong - """ - alembic_cli.do_apply_migrations() - return 0 diff --git a/fuel_plugin/ostf_adapter/nose_plugin/__init__.py b/fuel_plugin/ostf_adapter/nose_plugin/__init__.py deleted file mode 100644 index b01c97e4..00000000 --- a/fuel_plugin/ostf_adapter/nose_plugin/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from stevedore import extension - - -_PLUGIN_MANAGER = None - - -def get_plugin(plugin): - global _PLUGIN_MANAGER - plugin_manager = _PLUGIN_MANAGER - - if plugin_manager is None: - PLUGINS_NAMESPACE = 'plugins' - plugin_manager = extension.ExtensionManager(PLUGINS_NAMESPACE, - invoke_on_load=True) - - _PLUGIN_MANAGER = plugin_manager - return _PLUGIN_MANAGER[plugin].obj diff --git a/fuel_plugin/ostf_adapter/nose_plugin/nose_adapter.py b/fuel_plugin/ostf_adapter/nose_plugin/nose_adapter.py deleted file mode 100644 index 07652fd8..00000000 --- a/fuel_plugin/ostf_adapter/nose_plugin/nose_adapter.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fcntl -import logging -import os -import signal - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - -from fuel_plugin import consts -from fuel_plugin.ostf_adapter import logger -from fuel_plugin.ostf_adapter.nose_plugin import nose_storage_plugin -from fuel_plugin.ostf_adapter.nose_plugin import nose_test_runner -from fuel_plugin.ostf_adapter.nose_plugin import nose_utils -from fuel_plugin.ostf_adapter.storage import engine -from fuel_plugin.ostf_adapter.storage import models - - -LOG = logging.getLogger(__name__) - - -class InterruptTestRunException(KeyboardInterrupt): - """Current class exception is used for cleanup action - as KeyboardInterrupt is the only exception that is reraised by - unittest (and nose correspondingly) into outside environment - """ - - -class NoseDriver(object): - def __init__(self): - LOG.warning('Initializing Nose Driver') - - def run(self, test_run, test_set, dbpath, - ostf_os_access_creds=None, - tests=None, token=None): - - if not ostf_os_access_creds: - ostf_os_access_creds = dict() - tests = tests or test_run.enabled_tests - if tests: - argv_add = [nose_utils.modify_test_name_for_nose(test) - for test in tests] - else: - argv_add = [test_set.test_path] + test_set.additional_arguments - - results_log = logger.ResultsLogger(test_set.id, test_run.cluster_id) - - lock_path = cfg.CONF.adapter.lock_dir - test_run.pid = nose_utils.run_proc(self._run_tests, - lock_path, - dbpath, - test_run.id, - test_run.cluster_id, - ostf_os_access_creds, - argv_add, - token, - results_log).pid - - def _run_tests(self, lock_path, dbpath, test_run_id, - cluster_id, ostf_os_access_creds, argv_add, token, - results_log): - cleanup_flag = False - - def raise_exception_handler(signum, stack_frame): - raise InterruptTestRunException() - signal.signal(signal.SIGUSR1, raise_exception_handler) - - with engine.contexted_session(dbpath) as session: - testrun = session.query(models.TestRun)\ - .filter_by(id=test_run_id)\ - .one() - - try: - if not os.path.exists(lock_path): - LOG.error('There is no directory to store locks') - raise Exception('There is no directory to store locks') - - aquired_locks = [] - for serie in testrun.test_set.exclusive_testsets: - lock_name = serie + str(testrun.cluster_id) - fd = open(os.path.join(lock_path, lock_name), 'w') - fcntl.flock(fd, fcntl.LOCK_EX) - aquired_locks.append(fd) - - nose_test_runner.SilentTestProgram( - addplugins=[nose_storage_plugin.StoragePlugin( - session, test_run_id, str(cluster_id), - ostf_os_access_creds, token, results_log - )], - exit=False, - argv=['ostf_tests'] + argv_add) - - except InterruptTestRunException: - # (dshulyak) after process is interrupted we need to - # disable existing handler - signal.signal(signal.SIGUSR1, lambda *args: signal.SIG_DFL) - if testrun.test_set.cleanup_path: - cleanup_flag = True - - except Exception: - LOG.exception('Test run ID: %s', test_run_id) - finally: - updated_data = {'status': consts.TESTRUN_STATUSES.finished, - 'pid': None} - - models.TestRun.update_test_run( - session, test_run_id, updated_data) - - for fd in aquired_locks: - fcntl.flock(fd, fcntl.LOCK_UN) - fd.close() - - if cleanup_flag: - self._clean_up(session, - test_run_id, - cluster_id, - testrun.test_set.cleanup_path) - - def kill(self, test_run): - try: - if test_run.pid: - os.kill(test_run.pid, signal.SIGUSR1) - return True - except OSError: - return False - return False - - def _clean_up(self, session, test_run_id, cluster_id, cleanup): - # need for performing proper cleaning up for current cluster - cluster_deployment_info = \ - session.query(models.ClusterState.deployment_tags)\ - .filter_by(id=cluster_id)\ - .scalar() - - try: - module_obj = __import__(cleanup, -1) - - os.environ['NAILGUN_HOST'] = str(cfg.CONF.adapter.nailgun_host) - os.environ['NAILGUN_PORT'] = str(cfg.CONF.adapter.nailgun_port) - os.environ['CLUSTER_ID'] = str(cluster_id) - - module_obj.cleanup.cleanup(cluster_deployment_info) - - except Exception: - LOG.exception( - 'Cleanup error. Test Run ID %s. Cluster ID %s', - test_run_id, - cluster_id - ) diff --git a/fuel_plugin/ostf_adapter/nose_plugin/nose_discovery.py b/fuel_plugin/ostf_adapter/nose_plugin/nose_discovery.py deleted file mode 100644 index e67f6ab9..00000000 --- a/fuel_plugin/ostf_adapter/nose_plugin/nose_discovery.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import re - -from nose import plugins - -from fuel_plugin.ostf_adapter.nose_plugin import nose_test_runner -from fuel_plugin.ostf_adapter.nose_plugin import nose_utils -from fuel_plugin.ostf_adapter.storage import models - - -LOG = logging.getLogger(__name__) - - -class DiscoveryPlugin(plugins.Plugin): - - enabled = True - name = 'discovery' - score = 15000 - - def __init__(self, session): - self.session = session - self.test_sets = {} - super(DiscoveryPlugin, self).__init__() - - def options(self, parser, env=os.environ): - pass - - def configure(self, options, conf): - pass - - def afterImport(self, filename, module): - module = __import__(module, fromlist=[module]) - LOG.info('Inspecting %s', filename) - if hasattr(module, '__profile__'): - profile = module.__profile__ - - profile['deployment_tags'] = [ - tag.lower() for tag in profile.get('deployment_tags', []) - ] - - try: - test_set = models.TestSet(**profile) - self.session.merge(test_set) - self.test_sets[test_set.id] = test_set - - # flush test_sets data into db - self.session.commit() - except Exception as e: - LOG.error( - ('An error has occured while processing' - ' data entity for %s. Error message: %s'), - module.__name__, - e.message - ) - LOG.info('%s discovered.', module.__name__) - - @classmethod - def test_belongs_to_testset(cls, test_id, test_set_id): - """Checks by name if test belongs to given test set.""" - test_set_pattern = re.compile( - r'(\b|_){0}(\b|_)'.format(test_set_id) - ) - return bool(test_set_pattern.search(test_id)) - - def addSuccess(self, test): - test_id = test.id() - for test_set_id in self.test_sets.keys(): - if self.test_belongs_to_testset(test_id, test_set_id): - test_kwargs = { - "title": "", - "description": "", - "duration": "", - "deployment_tags": [], - "available_since_release": "", - "test_set_id": test_set_id, - "name": test_id, - } - - test_kwargs.update(nose_utils.get_description(test)) - - try: - test_obj = models.Test(**test_kwargs) - self.session.merge(test_obj) - - # flush tests data into db - self.session.commit() - except Exception as e: - LOG.error( - ('An error has occured while ' - 'processing data entity for ' - 'test with name %s. Error message: %s'), - test_id, - e.message - ) - LOG.info('%s added for %s', test_id, test_set_id) - - -def discovery(path, session): - """Will discover all tests on provided path and save info in db - """ - LOG.info('Starting discovery for %r.', path) - - nose_test_runner.SilentTestProgram( - addplugins=[DiscoveryPlugin(session)], - exit=False, - argv=['tests_discovery', '--collect-only', '--nocapture', path] - ) diff --git a/fuel_plugin/ostf_adapter/nose_plugin/nose_storage_plugin.py b/fuel_plugin/ostf_adapter/nose_plugin/nose_storage_plugin.py deleted file mode 100644 index d0f39ae3..00000000 --- a/fuel_plugin/ostf_adapter/nose_plugin/nose_storage_plugin.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import time - -from nose import plugins -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - -from fuel_plugin import consts -from fuel_plugin.ostf_adapter.nose_plugin import nose_utils -from fuel_plugin.ostf_adapter.storage import models - -CONF = cfg.CONF - - -LOG = logging.getLogger(__name__) - - -class StoragePlugin(plugins.Plugin): - enabled = True - name = 'storage' - score = 15000 - - def __init__(self, session, test_run_id, cluster_id, - ostf_os_access_creds, token, results_log): - - self.session = session - self.test_run_id = test_run_id - self.cluster_id = cluster_id - self.ostf_os_access_creds = ostf_os_access_creds - self.results_log = results_log - - super(StoragePlugin, self).__init__() - self._start_time = None - self.token = token - - def options(self, parser, env=os.environ): - env['NAILGUN_HOST'] = str(CONF.adapter.nailgun_host) - env['NAILGUN_PORT'] = str(CONF.adapter.nailgun_port) - if self.token is not None: - env['NAILGUN_TOKEN'] = self.token - if self.cluster_id: - env['CLUSTER_ID'] = str(self.cluster_id) - - for var_name in self.ostf_os_access_creds: - env[var_name.upper()] = self.ostf_os_access_creds[var_name] - - def configure(self, options, conf): - self.conf = conf - - def _add_test_results(self, test, data): - test_id = test.id() - - models.Test.add_result( - self.session, - self.test_run_id, - test_id, - data - ) - if data['status'] != consts.TEST_STATUSES.running: - test_name = nose_utils.get_description(test)["title"] - self.results_log.log_results( - test_id, - test_name=test_name, - status=data['status'], - message=data['message'], - traceback=data['traceback'], - ) - - def _add_message(self, test, err=None, status=None): - data = { - 'status': status, - 'time_taken': self.taken, - 'traceback': u'', - 'step': None, - 'message': u'' - } - if err: - exc_type, exc_value, exc_traceback = err - - if not status == consts.TEST_STATUSES.error: - data['step'], data['message'] = \ - nose_utils.format_failure_message(exc_value) - - if status != consts.TEST_STATUSES.skipped: - data['traceback'] = nose_utils.format_exception(err) - - tests_to_update = nose_utils.get_tests_to_update(test) - - for test in tests_to_update: - self._add_test_results(test, data) - self.session.commit() - - def addSuccess(self, test, capt=None): - self._add_message(test, status=consts.TEST_STATUSES.success) - - def addFailure(self, test, err): - LOG.error('%s', test.id(), exc_info=err) - self._add_message( - test, err=err, status=consts.TEST_STATUSES.failure) - - def addError(self, test, err): - if err[0] is AssertionError: - LOG.error('%s', test.id(), exc_info=err) - self._add_message( - test, err=err, status=consts.TEST_STATUSES.failure) - elif issubclass(err[0], plugins.skip.SkipTest): - LOG.warning('%s is skipped', test.id()) - self._add_message( - test, err=err, status=consts.TEST_STATUSES.skipped) - else: - LOG.error('%s', test.id(), exc_info=err) - self._add_message( - test, err=err, status=consts.TEST_STATUSES.error) - - def beforeTest(self, test): - self._start_time = time.time() - self._add_message(test, status=consts.TEST_STATUSES.running) - - def describeTest(self, test): - return test.test._testMethodDoc - - @property - def taken(self): - if self._start_time: - return time.time() - self._start_time - return 0 diff --git a/fuel_plugin/ostf_adapter/nose_plugin/nose_test_runner.py b/fuel_plugin/ostf_adapter/nose_plugin/nose_test_runner.py deleted file mode 100644 index c2ba0f7a..00000000 --- a/fuel_plugin/ostf_adapter/nose_plugin/nose_test_runner.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nose import core - - -class SilentTestRunner(core.TextTestRunner): - def run(self, test): - """Overrides to provide plugin hooks and defer all output to - the test result class. - """ - result = self._makeResult() - test(result) - return result - - -class SilentTestProgram(core.TestProgram): - def runTests(self): - """Run Tests. Returns true on success, false on failure, and sets - self.success to the same value. - """ - self.testRunner = SilentTestRunner(stream=self.config.stream, - verbosity=0, - config=self.config) - return self.testRunner.run(self.test).wasSuccessful() diff --git a/fuel_plugin/ostf_adapter/nose_plugin/nose_utils.py b/fuel_plugin/ostf_adapter/nose_plugin/nose_utils.py deleted file mode 100644 index 14509082..00000000 --- a/fuel_plugin/ostf_adapter/nose_plugin/nose_utils.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from distutils import version -import itertools -import multiprocessing -import os -import re -import traceback - -from nose import case -from nose import suite - -try: - from oslo.serialization import jsonutils -except ImportError: - from oslo_serialization import jsonutils - - -def parse_json_file(file_path): - current_directory = os.path.dirname(os.path.realpath(__file__)) - commands_path = os.path.join( - current_directory, file_path) - with open(commands_path, 'r') as f: - return jsonutils.load(f) - - -def get_exc_message(exception_value): - """Gets message from exception - - :param exception_value: Exception type object - """ - _exc_long = str(exception_value) - if isinstance(_exc_long, basestring): - return _exc_long.split('\n')[0] - return u"" - - -def _process_docstring(docstring, pattern): - pattern_matcher = re.search(pattern, docstring) - - if pattern_matcher: - value = pattern_matcher.group(1) - docstring = (docstring[:pattern_matcher.start()] + - docstring[pattern_matcher.end():]) - else: - value = None - - return docstring, value - - -def get_description(test_obj): - """Parses docstring of test object in order - to get necessary data. - - test_obj.test._testMethodDoc is using directly - instead of calling test_obj.shortDescription() - for the sake of compability with python 2.6 where - this method works pretty buggy. - """ - if isinstance(test_obj, case.Test): - docstring = test_obj.test._testMethodDoc - - test_data = {} - if docstring: - deployment_tags_pattern = r'Deployment tags:.?(?P.+)?' - docstring, deployment_tags = _process_docstring( - docstring, - deployment_tags_pattern - ) - - # if deployment tags is empty or absent - # _process_docstring returns None so we - # must check this and prevent - if deployment_tags: - deployment_tags = [ - tag.strip().lower() for tag in deployment_tags.split(',') - ] - test_data['deployment_tags'] = deployment_tags - - rel_vers_pattern = "Available since release:.?(?P.+)" - docstring, rel_vers = _process_docstring( - docstring, - rel_vers_pattern - ) - if rel_vers: - test_data["available_since_release"] = rel_vers - - duration_pattern = r'Duration:.?(?P.+)' - docstring, duration = _process_docstring( - docstring, - duration_pattern - ) - if duration: - test_data['duration'] = duration - - docstring = docstring.split('\n') - test_data['title'] = docstring.pop(0) - test_data['description'] = \ - u'\n'.join(docstring) if docstring else u"" - - return test_data - - -def modify_test_name_for_nose(test_path): - test_module, test_class, test_method = test_path.rsplit('.', 2) - return '{0}:{1}.{2}'.format(test_module, test_class, test_method) - - -def format_exception(exc_info): - ec, ev, tb = exc_info - - # formatError() may have turned our exception object into a string, and - # Python 3's traceback.format_exception() doesn't take kindly to that (it - # expects an actual exception object). So we work around it, by doing the - # work ourselves if ev is a string. - if isinstance(ev, basestring): - tb_data = ''.join(traceback.format_tb(tb)) - return tb_data + ev - else: - return ''.join(traceback.format_exception(*exc_info)) - - -def format_failure_message(message): - message = get_exc_message(message) - matcher = re.search( - r'^[a-zA-Z]+\s?(\d+)\s?[a-zA-Z]+\s?[\.:]\s?(.+)', - message) - if matcher: - step, msg = matcher.groups() - return int(step), msg - return None, message - - -def run_proc(func, *args): - proc = multiprocessing.Process( - target=func, - args=args) - proc.daemon = True - proc.start() - return proc - - -def get_module(module_path): - pass - - -def get_tests_to_update(test): - """Sometimes (e.g. unhandles exception is occured in - setUpClass of test case) tests can be packed in - separate ContextSuite each. At the moment of following code - creation depth of this packaging was unknown so - current function is implemented with recursion - (which is not good by any means and you are free to - modify that if you can) - """ - tests = [] - - if isinstance(test, case.Test): - tests.append(test) - elif isinstance(test, suite.ContextSuite): - for sub_test in test._tests: - tests.extend(get_tests_to_update(sub_test)) - - return tests - - -def _process_deployment_tags(cluster_depl_tags, test_depl_tags): - """Process alternative deployment tags for testsets and tests - and determines whether current test entity (testset or test) - is appropriate for cluster. - """ - - test_depl_tags = [ - [alt_tag.strip() for alt_tag in tag.split('|')] - for tag in test_depl_tags - ] - - for comb in itertools.product(*test_depl_tags): - if set(comb).issubset(cluster_depl_tags): - return True - - return False - - -def _compare_release_versions(cluster_release_version, test_release_version): - cl_openstack_ver, cl_fuel_ver = cluster_release_version.split('-') - test_openstack_ver, test_fuel_ver = test_release_version.split('-') - - cond = ( - (version.LooseVersion(cl_openstack_ver) >= - version.LooseVersion(test_openstack_ver)) - and - (version.StrictVersion(cl_fuel_ver) >= - version.StrictVersion(test_fuel_ver)) - ) - return cond - - -def is_test_available(cluster_data, test_entity_data): - is_test_available = False - is_rel_ver_suitable = False - - # if 'available_since_release' attritube of test entity - # is empty then this test entity is available for cluster - # in other case execute release comparator logic - if not test_entity_data['available_since_release']: - is_rel_ver_suitable = True - else: - is_rel_ver_suitable = _compare_release_versions( - cluster_data['release_version'], - test_entity_data['available_since_release'] - ) - - # if release version of test entity is suitable for cluster - # then check test entity compatibility with cluster - # by deployment tags - if is_rel_ver_suitable: - is_depl_tags_suitable = _process_deployment_tags( - cluster_data['deployment_tags'], - test_entity_data['deployment_tags'] - ) - if is_depl_tags_suitable: - is_test_available = True - - return is_test_available diff --git a/fuel_plugin/ostf_adapter/server.py b/fuel_plugin/ostf_adapter/server.py deleted file mode 100644 index 74f5a9ed..00000000 --- a/fuel_plugin/ostf_adapter/server.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import logging -import os -import signal -import sys - -from gevent import pywsgi -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - -from fuel_plugin.ostf_adapter import config as ostf_config -from fuel_plugin.ostf_adapter import logger -from fuel_plugin.ostf_adapter import mixins -from fuel_plugin.ostf_adapter import nailgun_hooks -from fuel_plugin.ostf_adapter.nose_plugin import nose_discovery -from fuel_plugin.ostf_adapter.storage import engine -from fuel_plugin.ostf_adapter.wsgi import app - - -CONF = cfg.CONF - - -def main(): - - ostf_config.init_config(sys.argv[1:]) - - logger.setup(log_file=CONF.adapter.log_file) - - log = logging.getLogger(__name__) - log.info('Start app configuration') - - root = app.setup_app({}) - - # completely clean db (drop tables, constraints and types) - # plus drop alembic_version table (needed if, for example, head migration - # script was changed after applying) - if CONF.clear_db: - return nailgun_hooks.clear_db(CONF.adapter.dbpath) - - if CONF.after_initialization_environment_hook: - return nailgun_hooks.after_initialization_environment_hook() - - with engine.contexted_session(CONF.adapter.dbpath) as session: - # performing cleaning of expired data (if any) in db - mixins.delete_db_data(session) - log.info('Cleaned up database.') - # discover testsets and their tests - CORE_PATH = CONF.debug_tests or 'fuel_health' - - log.info('Performing nose discovery with {0}.'.format(CORE_PATH)) - - nose_discovery.discovery(path=CORE_PATH, session=session) - - # cache needed data from test repository - mixins.cache_test_repository(session) - - log.info('Discovery is completed') - host, port = CONF.adapter.server_host, CONF.adapter.server_port - srv = pywsgi.WSGIServer((host, port), root) - - log.info('Starting server in PID %s', os.getpid()) - log.info("serving on http://%s:%s", host, port) - - try: - signal.signal(signal.SIGCHLD, signal.SIG_IGN) - srv.serve_forever() - except KeyboardInterrupt: - pass diff --git a/fuel_plugin/ostf_adapter/storage/__init__.py b/fuel_plugin/ostf_adapter/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/ostf_adapter/storage/alembic.ini b/fuel_plugin/ostf_adapter/storage/alembic.ini deleted file mode 100644 index abf6d995..00000000 --- a/fuel_plugin/ostf_adapter/storage/alembic.ini +++ /dev/null @@ -1,49 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s/migrations - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -sqlalchemy.url = postgresql+psycopg2://ostf:ostf@localhost/ostf - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/fuel_plugin/ostf_adapter/storage/alembic_cli.py b/fuel_plugin/ostf_adapter/storage/alembic_cli.py deleted file mode 100644 index c3948a7d..00000000 --- a/fuel_plugin/ostf_adapter/storage/alembic_cli.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os - -from alembic import command -from alembic import config - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - - -log = logging.getLogger(__name__) - - -def do_apply_migrations(): - alembic_conf = config.Config( - os.path.join(os.path.dirname(__file__), 'alembic.ini') - ) - alembic_conf.set_main_option('script_location', - 'fuel_plugin.ostf_adapter.storage:migrations') - alembic_conf.set_main_option('sqlalchemy.url', cfg.CONF.adapter.dbpath) - - # apply initial migration - command.upgrade(alembic_conf, 'head') diff --git a/fuel_plugin/ostf_adapter/storage/engine.py b/fuel_plugin/ostf_adapter/storage/engine.py deleted file mode 100644 index 9cea5fd1..00000000 --- a/fuel_plugin/ostf_adapter/storage/engine.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import logging - -from sqlalchemy import create_engine, orm - - -LOG = logging.getLogger(__name__) - - -@contextlib.contextmanager -def contexted_session(dbpath): - """Allows to handle session via context manager - """ - LOG.debug('Starting session with dbpath={0}'.format(dbpath)) - engine = create_engine(dbpath) - session = orm.Session(bind=engine) - try: - LOG.debug('Before yielding session.') - yield session - session.commit() - except Exception: - LOG.exception('Raised error in contexted session.') - session.rollback() - raise - finally: - session.close() - - -def get_session(dbpath): - """Returns SQLAlchemy scoped session for given DB configuration string.""" - engine = create_engine(dbpath) - session = orm.scoped_session(orm.sessionmaker()) - session.configure(bind=engine) - return session diff --git a/fuel_plugin/ostf_adapter/storage/fields.py b/fuel_plugin/ostf_adapter/storage/fields.py deleted file mode 100644 index 6f474854..00000000 --- a/fuel_plugin/ostf_adapter/storage/fields.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.types import TypeDecorator, VARCHAR - -try: - from oslo.serialization import jsonutils -except ImportError: - from oslo_serialization import jsonutils - - -class JsonField(TypeDecorator): - impl = VARCHAR - - def process_bind_param(self, value, dialect): - if value is not None: - value = jsonutils.dumps(value) - - return value - - def process_result_value(self, value, dialect): - if value is not None: - value = jsonutils.loads(value) - return value - - -class ListField(JsonField): - def process_bind_param(self, value, dialect): - value = list(value) if value else [] - return super(ListField, self).process_bind_param(value, dialect) - - def process_result_value(self, value, dialect): - value = super(ListField, self).process_result_value(value, dialect) - return list(value) if value else [] diff --git a/fuel_plugin/ostf_adapter/storage/migrations/README b/fuel_plugin/ostf_adapter/storage/migrations/README deleted file mode 100644 index 98e4f9c4..00000000 --- a/fuel_plugin/ostf_adapter/storage/migrations/README +++ /dev/null @@ -1 +0,0 @@ -Generic single-database configuration. \ No newline at end of file diff --git a/fuel_plugin/ostf_adapter/storage/migrations/__init__.py b/fuel_plugin/ostf_adapter/storage/migrations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/ostf_adapter/storage/migrations/env.py b/fuel_plugin/ostf_adapter/storage/migrations/env.py deleted file mode 100644 index 6a3ae84a..00000000 --- a/fuel_plugin/ostf_adapter/storage/migrations/env.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import with_statement -import logging.config - -from sqlalchemy import engine_from_config, pool - -from alembic import context -from fuel_plugin.ostf_adapter.storage import models - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -logging.config.fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = models.BASE.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure(url=url) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - engine = engine_from_config( - config.get_section(config.config_ini_section), - prefix='sqlalchemy.', - poolclass=pool.NullPool) - - connection = engine.connect() - context.configure( - connection=connection, - target_metadata=target_metadata - ) - - try: - with context.begin_transaction(): - context.run_migrations() - finally: - connection.close() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/fuel_plugin/ostf_adapter/storage/migrations/script.py.mako b/fuel_plugin/ostf_adapter/storage/migrations/script.py.mako deleted file mode 100644 index 04cd3706..00000000 --- a/fuel_plugin/ostf_adapter/storage/migrations/script.py.mako +++ /dev/null @@ -1,22 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): -${upgrades if upgrades else "pass"} - - -def downgrade(): -${downgrades if downgrades else "pass"} diff --git a/fuel_plugin/ostf_adapter/storage/migrations/versions/36e3fd684a9e_versioning.py b/fuel_plugin/ostf_adapter/storage/migrations/versions/36e3fd684a9e_versioning.py deleted file mode 100644 index 6efef29f..00000000 --- a/fuel_plugin/ostf_adapter/storage/migrations/versions/36e3fd684a9e_versioning.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""versioning - -Revision ID: 36e3fd684a9e -Revises: 54904076d82d -Create Date: 2015-02-12 15:45:23.885397 - -""" - -# revision identifiers, used by Alembic. -revision = '36e3fd684a9e' -down_revision = '54904076d82d' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('test_sets', sa.Column('available_since_release', - sa.String(64), - default="")) - op.add_column('tests', sa.Column('available_since_release', - sa.String(64), - default="")) - op.add_column('cluster_state', sa.Column('release_version', sa.String(64))) - - -def downgrade(): - op.drop_column('test_sets', 'available_since_release') - op.drop_column('tests', 'available_since_release') - op.drop_column('cluster_state', 'release_version') diff --git a/fuel_plugin/ostf_adapter/storage/migrations/versions/5133b1e66258_pid_field_for_testru.py b/fuel_plugin/ostf_adapter/storage/migrations/versions/5133b1e66258_pid_field_for_testru.py deleted file mode 100644 index 4e3384b1..00000000 --- a/fuel_plugin/ostf_adapter/storage/migrations/versions/5133b1e66258_pid_field_for_testru.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""pid_field_for_testrun - -Revision ID: 5133b1e66258 -Revises: 53af7c2d9ccc -Create Date: 2014-02-14 16:34:18.751738 - -""" - -# revision identifiers, used by Alembic. -revision = '5133b1e66258' -down_revision = '53af7c2d9ccc' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('test_runs', sa.Column('pid', sa.Integer(), nullable=True)) - - -def downgrade(): - op.drop_column('test_runs', 'pid') diff --git a/fuel_plugin/ostf_adapter/storage/migrations/versions/53af7c2d9ccc_initial.py b/fuel_plugin/ostf_adapter/storage/migrations/versions/53af7c2d9ccc_initial.py deleted file mode 100644 index 09df01b2..00000000 --- a/fuel_plugin/ostf_adapter/storage/migrations/versions/53af7c2d9ccc_initial.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""initial - -Revision ID: 53af7c2d9ccc -Revises: None -Create Date: 2013-12-04 13:32:29.109891 - -""" - -# revision identifiers, used by Alembic. -revision = '53af7c2d9ccc' -down_revision = None - -from alembic import op -import sqlalchemy as sa -from sqlalchemy.dialects import postgresql - -from fuel_plugin.ostf_adapter.storage import fields - - -def upgrade(): - op.create_table( - 'cluster_state', - sa.Column('id', sa.Integer(), autoincrement=False, nullable=False), - sa.Column('deployment_tags', postgresql.ARRAY(sa.String(length=64)), - nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table( - 'test_sets', - sa.Column('id', sa.String(length=128), nullable=False), - sa.Column('description', sa.String(length=256), nullable=True), - sa.Column('test_path', sa.String(length=256), nullable=True), - sa.Column('driver', sa.String(length=128), nullable=True), - sa.Column('additional_arguments', fields.ListField(), nullable=True), - sa.Column('cleanup_path', sa.String(length=128), nullable=True), - sa.Column('meta', fields.JsonField(), nullable=True), - sa.Column('deployment_tags', postgresql.ARRAY(sa.String(length=64)), - nullable=True), - sa.Column('test_runs_ordering_priority', sa.Integer(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table( - 'cluster_testing_pattern', - sa.Column('cluster_id', sa.Integer(), nullable=False), - sa.Column('test_set_id', sa.String(length=128), nullable=False), - sa.Column('tests', postgresql.ARRAY(sa.String(length=512)), - nullable=True), - sa.ForeignKeyConstraint(['cluster_id'], ['cluster_state.id'], ), - sa.ForeignKeyConstraint(['test_set_id'], ['test_sets.id'], ), - sa.PrimaryKeyConstraint('cluster_id', 'test_set_id') - ) - op.create_table( - 'test_runs', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('status', - sa.Enum('running', 'finished', name='test_run_states'), - nullable=False), - sa.Column('meta', fields.JsonField(), nullable=True), - sa.Column('started_at', sa.DateTime(), nullable=True), - sa.Column('ended_at', sa.DateTime(), nullable=True), - sa.Column('test_set_id', sa.String(length=128), nullable=True), - sa.Column('cluster_id', sa.Integer(), nullable=True), - sa.ForeignKeyConstraint(['test_set_id', 'cluster_id'], - ['cluster_testing_pattern.test_set_id', - 'cluster_testing_pattern.cluster_id'], - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id') - ) - op.create_table( - 'tests', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('name', sa.String(length=512), nullable=True), - sa.Column('title', sa.String(length=512), nullable=True), - sa.Column('description', sa.Text(), nullable=True), - sa.Column('duration', sa.String(length=512), nullable=True), - sa.Column('message', sa.Text(), nullable=True), - sa.Column('traceback', sa.Text(), nullable=True), - sa.Column('status', sa.Enum('wait_running', 'running', 'failure', - 'success', 'error', 'stopped', - 'disabled', 'skipped', name='test_states'), - nullable=True), - sa.Column('step', sa.Integer(), nullable=True), - sa.Column('time_taken', sa.Float(), nullable=True), - sa.Column('meta', fields.JsonField(), nullable=True), - sa.Column('deployment_tags', postgresql.ARRAY(sa.String(length=64)), - nullable=True), - sa.Column('test_run_id', sa.Integer(), nullable=True), - sa.Column('test_set_id', sa.String(length=128), nullable=True), - sa.ForeignKeyConstraint(['test_run_id'], ['test_runs.id'], - ondelete='CASCADE'), - sa.ForeignKeyConstraint(['test_set_id'], ['test_sets.id'], - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id') - ) - - -def downgrade(): - op.drop_table('tests') - op.drop_table('test_runs') - op.drop_table('cluster_testing_pattern') - op.drop_table('test_sets') - op.drop_table('cluster_state') diff --git a/fuel_plugin/ostf_adapter/storage/migrations/versions/54904076d82d_list_of_excl_testset.py b/fuel_plugin/ostf_adapter/storage/migrations/versions/54904076d82d_list_of_excl_testset.py deleted file mode 100644 index e7e6a0af..00000000 --- a/fuel_plugin/ostf_adapter/storage/migrations/versions/54904076d82d_list_of_excl_testset.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""list_of_excl_testsets - -Revision ID: 54904076d82d -Revises: 53af7c2d9ccc -Create Date: 2014-02-13 18:57:46.854934 - -""" - -# revision identifiers, used by Alembic. -revision = '54904076d82d' -down_revision = '5133b1e66258' - -from alembic import op -import sqlalchemy as sa -from sqlalchemy.dialects import postgresql - - -def upgrade(): - op.add_column('test_sets', sa.Column('exclusive_testsets', - postgresql.ARRAY( - sa.String(length=128) - ), - nullable=True)) - - -def downgrade(): - op.drop_column('test_sets', 'exclusive_testsets') diff --git a/fuel_plugin/ostf_adapter/storage/models.py b/fuel_plugin/ostf_adapter/storage/models.py deleted file mode 100644 index 17ce1723..00000000 --- a/fuel_plugin/ostf_adapter/storage/models.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import logging - -import sqlalchemy as sa -from sqlalchemy import desc -from sqlalchemy.dialects.postgresql import ARRAY -from sqlalchemy.ext.associationproxy import association_proxy -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import joinedload, relationship, object_mapper - -from fuel_plugin import consts -from fuel_plugin.ostf_adapter import nose_plugin -from fuel_plugin.ostf_adapter.storage import engine -from fuel_plugin.ostf_adapter.storage import fields - - -LOG = logging.getLogger(__name__) - - -BASE = declarative_base() - - -class ClusterState(BASE): - """Represents clusters currently - present in the system. Holds info - about deployment type which is using in - redeployment process. - - Is linked with TestSetToCluster entity - that implements many-to-many relationship with - TestSet. - """ - - __tablename__ = 'cluster_state' - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=False) - deployment_tags = sa.Column(ARRAY(sa.String(64))) - release_version = sa.Column(sa.String(64)) - - -class ClusterTestingPattern(BASE): - """Stores cluster's pattern for testsets and tests.""" - - __tablename__ = 'cluster_testing_pattern' - - cluster_id = sa.Column( - sa.Integer, - sa.ForeignKey('cluster_state.id'), - primary_key=True - ) - - test_set_id = sa.Column( - sa.String(128), - sa.ForeignKey('test_sets.id'), - primary_key=True - ) - - tests = sa.Column(ARRAY(sa.String(512))) - - test_set = relationship('TestSet') - - -class TestSet(BASE): - - __tablename__ = 'test_sets' - - id = sa.Column(sa.String(128), primary_key=True) - description = sa.Column(sa.String(256)) - test_path = sa.Column(sa.String(256)) - driver = sa.Column(sa.String(128)) - additional_arguments = sa.Column(fields.ListField()) - cleanup_path = sa.Column(sa.String(128)) - meta = sa.Column(fields.JsonField()) - deployment_tags = sa.Column(ARRAY(sa.String(64))) - test_runs_ordering_priority = sa.Column(sa.Integer) - - # list of test sets that cannot be executed simultaneously - # with current test set - exclusive_testsets = sa.Column(ARRAY(sa.String(128))) - - available_since_release = sa.Column(sa.String(64), default="") - - tests = relationship( - 'Test', - backref='test_set', - order_by='Test.name', - cascade='delete' - ) - - @property - def frontend(self): - return {'id': self.id, 'name': self.description} - - @classmethod - def get_test_set(cls, session, test_set): - return session.query(cls)\ - .filter_by(id=test_set)\ - .first() - - -class Test(BASE): - - __tablename__ = 'tests' - - id = sa.Column(sa.Integer(), primary_key=True) - name = sa.Column(sa.String(512)) - title = sa.Column(sa.String(512)) - description = sa.Column(sa.Text()) - duration = sa.Column(sa.String(512)) - message = sa.Column(sa.Text()) - traceback = sa.Column(sa.Text()) - status = sa.Column(sa.Enum(consts.TEST_STATUSES, name='test_states')) - step = sa.Column(sa.Integer()) - time_taken = sa.Column(sa.Float()) - meta = sa.Column(fields.JsonField()) - deployment_tags = sa.Column(ARRAY(sa.String(64))) - available_since_release = sa.Column(sa.String(64), default="") - - test_run_id = sa.Column( - sa.Integer(), - sa.ForeignKey( - 'test_runs.id', - ondelete='CASCADE' - ) - ) - - test_set_id = sa.Column( - sa.String(length=128), - sa.ForeignKey( - 'test_sets.id', - ondelete='CASCADE' - ) - ) - - @property - def frontend(self): - return { - 'id': self.name, - 'testset': self.test_set_id, - 'name': self.title, - 'description': self.description, - 'duration': self.duration, - 'message': self.message, - 'step': self.step, - 'status': self.status, - 'taken': self.time_taken - } - - @classmethod - def add_result(cls, session, test_run_id, test_name, data): - session.query(cls).\ - filter(cls.name == test_name, - cls.test_run_id == test_run_id).\ - update(data, synchronize_session='fetch') - - @classmethod - def update_running_tests(cls, session, test_run_id, - status=consts.TEST_STATUSES.stopped): - session.query(cls). \ - filter(cls.test_run_id == test_run_id, - cls.status.in_( - (consts.TEST_STATUSES.running, - consts.TEST_STATUSES.wait_running))). \ - update({'status': status}, synchronize_session='fetch') - - @classmethod - def update_test_run_tests(cls, session, test_run_id, - tests_names, - status=consts.TEST_STATUSES.wait_running): - session.query(cls). \ - filter(cls.name.in_(tests_names), - cls.test_run_id == test_run_id). \ - update({'status': status, 'time_taken': None}, - synchronize_session='fetch') - - def copy_test(self, test_run, predefined_tests): - """Performs copying of tests for newly created - test_run. - """ - new_test = self.__class__() - mapper = object_mapper(self) - primary_keys = set([col.key for col in mapper.primary_key]) - for column in mapper.iterate_properties: - if column.key not in primary_keys: - setattr(new_test, column.key, getattr(self, column.key)) - new_test.test_run_id = test_run.id - if predefined_tests and new_test.name not in predefined_tests: - new_test.status = consts.TEST_STATUSES.disabled - else: - new_test.status = consts.TEST_STATUSES.wait_running - return new_test - - -class TestRun(BASE): - - __tablename__ = 'test_runs' - - id = sa.Column(sa.Integer(), primary_key=True) - cluster_id = sa.Column(sa.Integer(), nullable=False) - status = sa.Column(sa.Enum(consts.TESTRUN_STATUSES, - name='test_run_states'), - nullable=False) - meta = sa.Column(fields.JsonField()) - started_at = sa.Column(sa.DateTime, default=datetime.datetime.utcnow) - ended_at = sa.Column(sa.DateTime) - pid = sa.Column(sa.Integer) - - test_set_id = sa.Column(sa.String(128)) - cluster_id = sa.Column(sa.Integer) - - __table_args__ = ( - sa.ForeignKeyConstraint( - ['test_set_id', 'cluster_id'], - ['cluster_testing_pattern.test_set_id', - 'cluster_testing_pattern.cluster_id'], - ondelete='CASCADE' - ), - {} - ) - - cluster_testing_pattern = relationship('ClusterTestingPattern') - test_set = association_proxy( - 'cluster_testing_pattern', 'test_set' - ) - - tests = relationship( - 'Test', - backref='test_run', - order_by='Test.name', - cascade='delete' - ) - - def update(self, status): - self.status = status - if status == 'finished': - self.ended_at = datetime.datetime.utcnow() - - @property - def enabled_tests(self): - return [test.name for test - in self.tests if test.status != consts.TEST_STATUSES.disabled] - - def is_finished(self): - return self.status == consts.TESTRUN_STATUSES.finished - - @property - def frontend(self): - test_run_data = { - 'id': self.id, - 'testset': self.test_set_id, - 'meta': self.meta, - 'cluster_id': self.cluster_id, - 'status': self.status, - 'started_at': self.started_at, - 'ended_at': self.ended_at, - 'tests': [] - } - if self.tests: - test_run_data['tests'] = [test.frontend for test in self.tests] - return test_run_data - - @classmethod - def add_test_run(cls, session, test_set, cluster_id, - status=consts.TESTRUN_STATUSES.running, - tests=None): - """Creates new test_run object with given data - and makes copy of tests that will be bound - with this test_run. Copying is performed by - copy_test method of Test class. - """ - predefined_tests = tests or [] - tests_names = session.query(ClusterTestingPattern.tests)\ - .filter_by(test_set_id=test_set, cluster_id=cluster_id)\ - .scalar() - - tests = session.query(Test)\ - .filter(Test.name.in_(tests_names))\ - .filter_by(test_set_id=test_set)\ - .filter_by(test_run_id=None) - - test_run = cls(test_set_id=test_set, cluster_id=cluster_id, - status=status) - session.add(test_run) - - for test in tests: - new_test = test.copy_test(test_run, predefined_tests) - session.add(new_test) - test_run.tests.append(new_test) - # NOTE(akostrikov) Seems there is a problem with transaction - # isolation, so we need not only to flush, but also to commit. - # We fork and then in forks we flush sql items. But it seems that - # it happens in transaction so we are not getting in other - # processes add results. So I force transaction commit to provide - # changes to all forks os OSTF. - session.commit() - session.flush() - - return test_run - - @classmethod - def get_last_test_run(cls, session, test_set, cluster_id): - test_run = session.query(cls). \ - filter_by(cluster_id=cluster_id, test_set_id=test_set). \ - order_by(desc(cls.id)).first() - return test_run - - @classmethod - def get_test_results(cls): - session = engine.get_session() - test_runs = session.query(cls). \ - options(joinedload('tests')). \ - order_by(desc(cls.id)) - session.commit() - session.close() - return test_runs - - @classmethod - def get_test_run(cls, session, test_run_id, joined=False): - if not joined: - test_run = session.query(cls). \ - filter_by(id=test_run_id).first() - else: - test_run = session.query(cls). \ - options(joinedload('tests')). \ - filter_by(id=test_run_id).first() - return test_run - - @classmethod - def update_test_run(cls, session, test_run_id, updated_data): - if updated_data.get('status') in [consts.TESTRUN_STATUSES.finished]: - updated_data['ended_at'] = datetime.datetime.utcnow() - - session.query(cls). \ - filter(cls.id == test_run_id). \ - update(updated_data, synchronize_session='fetch') - - @classmethod - def is_last_running(cls, session, test_set, cluster_id): - """Checks whether there one can perform creation of new - test_run by testing of existing of test_run object - with given data or test_run with 'finished' status. - """ - test_run = cls.get_last_test_run(session, test_set, cluster_id) - return not bool(test_run) or test_run.is_finished() - - @classmethod - def start(cls, session, test_set, metadata, tests, dbpath, token=None): - plugin = nose_plugin.get_plugin(test_set.driver) - if cls.is_last_running(session, test_set.id, - metadata['cluster_id']): - - test_run = cls.add_test_run( - session, test_set.id, - metadata['cluster_id'], tests=tests) - - plugin.run(test_run, test_set, dbpath, - metadata.get('ostf_os_access_creds'), token=token) - - return test_run.frontend - return {} - - def restart(self, session, dbpath, - ostf_os_access_creds, tests=None, token=None): - """Restart test run with - if tests given they will be enabled - """ - if TestRun.is_last_running(session, - self.test_set_id, - self.cluster_id): - plugin = nose_plugin.get_plugin(self.test_set.driver) - - self.update(consts.TEST_STATUSES.running) - if tests: - Test.update_test_run_tests( - session, self.id, tests) - - plugin.run(self, self.test_set, dbpath, - ostf_os_access_creds, tests, token=token) - return self.frontend - return {} - - def stop(self, session): - """Stop test run if running - """ - plugin = nose_plugin.get_plugin(self.test_set.driver) - killed = plugin.kill(self) - if killed: - Test.update_running_tests( - session, self.id, status=consts.TEST_STATUSES.stopped) - return self.frontend diff --git a/fuel_plugin/ostf_adapter/wsgi/__init__.py b/fuel_plugin/ostf_adapter/wsgi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/ostf_adapter/wsgi/access_control.py b/fuel_plugin/ostf_adapter/wsgi/access_control.py deleted file mode 100644 index c0a93d86..00000000 --- a/fuel_plugin/ostf_adapter/wsgi/access_control.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystonemiddleware import auth_token -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - - -def setup(app): - if cfg.CONF.adapter.auth_enable: - return auth_token.AuthProtocol(app, dict(cfg.CONF.keystone_authtoken)) - else: - return app diff --git a/fuel_plugin/ostf_adapter/wsgi/app.py b/fuel_plugin/ostf_adapter/wsgi/app.py deleted file mode 100644 index 6dc4b1ef..00000000 --- a/fuel_plugin/ostf_adapter/wsgi/app.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg -import pecan - -from fuel_plugin.ostf_adapter.storage import engine -from fuel_plugin.ostf_adapter.wsgi import access_control -from fuel_plugin.ostf_adapter.wsgi import hooks - -CONF = cfg.CONF - - -def setup_config(custom_pecan_config): - """Updates defaults values for pecan server - by those supplied via command line arguments - when ostf-server is started - """ - config_to_use = { - 'server': { - 'host': CONF.adapter.server_host, - 'port': CONF.adapter.server_port - }, - 'dbpath': CONF.adapter.dbpath, - 'debug': CONF.debug, - 'debug_tests': CONF.debug_tests, - 'lock_dir': CONF.adapter.lock_dir, - 'nailgun': { - 'host': CONF.adapter.nailgun_host, - 'port': CONF.adapter.nailgun_port - }, - 'app': { - 'root': 'fuel_plugin.ostf_adapter.wsgi.root.RootController', - 'modules': ['fuel_plugin.ostf_adapter.wsgi'] - }, - } - config_to_use.update(custom_pecan_config) - pecan.conf.update(config_to_use) - - -def setup_app(config=None, session=None): - setup_config(config or {}) - session = session or engine.get_session(pecan.conf.dbpath) - app_hooks = [ - hooks.CustomTransactionalHook(session), - hooks.AddTokenHook() - ] - app = pecan.make_app( - pecan.conf.app.root, - debug=pecan.conf.debug, - force_canonical=True, - hooks=app_hooks, - ) - return access_control.setup(app) diff --git a/fuel_plugin/ostf_adapter/wsgi/controllers.py b/fuel_plugin/ostf_adapter/wsgi/controllers.py deleted file mode 100644 index 70fcec6c..00000000 --- a/fuel_plugin/ostf_adapter/wsgi/controllers.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg -try: - from oslo.serialization import jsonutils -except ImportError: - from oslo_serialization import jsonutils -from pecan import abort -from pecan import expose -from pecan import request -from pecan import rest -from sqlalchemy import func -from sqlalchemy.orm import joinedload - -from fuel_plugin import consts -from fuel_plugin.ostf_adapter import mixins -from fuel_plugin.ostf_adapter.storage import models - - -class BaseRestController(rest.RestController): - def _handle_get(self, method, remainder, request=None): - if len(remainder): - method_name = remainder[0] - if method.upper() in self._custom_actions.get(method_name, []): - controller = self._find_controller( - 'get_%s' % method_name, - method_name - ) - if controller: - return controller, remainder[1:] - return super(BaseRestController, self)._handle_get(method, remainder, - request) - - -class TestsetsController(BaseRestController): - - @expose('json') - def get(self, cluster): - mixins.discovery_check(request.session, cluster, request.token) - - needed_testsets = request.session\ - .query(models.ClusterTestingPattern.test_set_id)\ - .filter_by(cluster_id=cluster) - - test_sets = request.session.query(models.TestSet)\ - .filter(models.TestSet.id.in_(needed_testsets))\ - .order_by(models.TestSet.test_runs_ordering_priority)\ - .all() - - if test_sets: - return [item.frontend for item in test_sets] - return {} - - -class TestsController(BaseRestController): - - @expose('json') - def get(self, cluster): - mixins.discovery_check(request.session, cluster, request.token) - needed_tests_list = request.session\ - .query(models.ClusterTestingPattern.tests)\ - .filter_by(cluster_id=cluster) - - result = [] - for tests in needed_tests_list: - tests_to_return = request.session.query(models.Test)\ - .filter(models.Test.name.in_(tests[0]))\ - .all() - - result.extend(tests_to_return) - - result.sort(key=lambda test: test.name) - - if result: - return [item.frontend for item in result] - - return {} - - -class TestrunsController(BaseRestController): - - _custom_actions = { - 'last': ['GET'], - } - - @expose('json') - def get_all(self): - test_runs = request.session.query(models.TestRun).all() - - return [item.frontend for item in test_runs] - - @expose('json') - def get_one(self, test_run_id): - test_run = request.session.query(models.TestRun)\ - .filter_by(id=test_run_id).first() - if test_run and isinstance(test_run, models.TestRun): - return test_run.frontend - return {} - - @expose('json') - def get_last(self, cluster_id): - test_run_ids = request.session.query(func.max(models.TestRun.id)) \ - .group_by(models.TestRun.test_set_id)\ - .filter_by(cluster_id=cluster_id) - - test_runs = request.session.query(models.TestRun)\ - .options(joinedload('tests'))\ - .filter(models.TestRun.id.in_(test_run_ids)) - - return [item.frontend for item in test_runs] - - @expose('json') - def post(self): - test_runs = jsonutils.loads(request.body) - if 'objects' in test_runs: - test_runs = test_runs['objects'] - - # Discover tests for all clusters in request - clusters_ids = [] - nedded_testsets = set() - for test_run in test_runs: - cluster_id = test_run['metadata']['cluster_id'] - if cluster_id not in clusters_ids: - clusters_ids.append(cluster_id) - mixins.discovery_check(request.session, - cluster_id, - request.token) - nedded_testsets.add(test_run['testset']) - # Validate testsets from request - test_sets = set([testset.id for testset in request. - session.query(models.TestSet).all()]) - if nedded_testsets - test_sets: - abort(400) - - res = [] - for test_run in test_runs: - test_set = test_run['testset'] - metadata = test_run['metadata'] - tests = test_run.get('tests', []) - - test_set = models.TestSet.get_test_set( - request.session, - test_set - ) - - test_run = models.TestRun.start( - request.session, - test_set, - metadata, - tests, - cfg.CONF.adapter.dbpath, - token=request.token - ) - - res.append(test_run) - - return res - - @expose('json') - def put(self): - test_runs = jsonutils.loads(request.body) - if 'objects' in test_runs: - test_runs = test_runs['objects'] - - data = [] - with request.session.begin(subtransactions=True): - for test_run in test_runs: - status = test_run.get('status') - tests = test_run.get('tests', []) - ostf_os_access_creds = test_run.get('ostf_os_access_creds') - - test_run = models.TestRun.get_test_run(request.session, - test_run['id']) - if status == consts.TESTRUN_STATUSES.stopped: - data.append(test_run.stop(request.session)) - elif status == consts.TESTRUN_STATUSES.restarted: - data.append(test_run.restart(request.session, - cfg.CONF.adapter.dbpath, - ostf_os_access_creds, - tests=tests, - token=request.token)) - return data diff --git a/fuel_plugin/ostf_adapter/wsgi/hooks.py b/fuel_plugin/ostf_adapter/wsgi/hooks.py deleted file mode 100644 index 1285cc6e..00000000 --- a/fuel_plugin/ostf_adapter/wsgi/hooks.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from pecan import hooks - - -LOG = logging.getLogger(__name__) - - -class CustomTransactionalHook(hooks.TransactionHook): - def __init__(self, session): - self.session = session - - def start(): - pass - - def commit(): - self.session.commit() - - def rollback(): - self.session.rollback() - - def clear(): - # not all GET controllers doesn't write to db - self.session.commit() - - self.session.remove() - - super(CustomTransactionalHook, self).__init__(start, - start, - commit, - rollback, - clear) - - def before(self, state): - super(CustomTransactionalHook, self).before(state) - state.request.session = self.session - - def on_error(self, state, exc): - super(CustomTransactionalHook, self).on_error(state, exc) - LOG.exception('Pecan state %r', state) - - -class AddTokenHook(hooks.PecanHook): - - def before(self, state): - # (dshulyak) just utility to get token - state.request.token = state.request.headers.get('X-Auth-Token', None) diff --git a/fuel_plugin/ostf_adapter/wsgi/root.py b/fuel_plugin/ostf_adapter/wsgi/root.py deleted file mode 100644 index dbc9f495..00000000 --- a/fuel_plugin/ostf_adapter/wsgi/root.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from fuel_plugin.ostf_adapter.wsgi import controllers -from pecan import expose - - -class V1Controller(object): - # TODO(???) Rewrite it with wsme expose - tests = controllers.TestsController() - testsets = controllers.TestsetsController() - testruns = controllers.TestrunsController() - - -class RootController(object): - v1 = V1Controller() - - @expose('json', generic=True) - def index(self): - return {} diff --git a/fuel_plugin/ostf_client/__init__.py b/fuel_plugin/ostf_client/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/ostf_client/client.py b/fuel_plugin/ostf_client/client.py deleted file mode 100644 index ec7147dc..00000000 --- a/fuel_plugin/ostf_client/client.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests -import time - -try: - from oslo.serialization import jsonutils -except ImportError: - from oslo_serialization import jsonutils - - -class TestingAdapterClient(object): - def __init__(self, url): - self.url = url - - def _request(self, method, url, data=None): - headers = {'content-type': 'application/json'} - - ostf_os_access_creds = { - 'ostf_os_username': 'ostf', - 'ostf_os_password': 'ostf', - 'ostf_os_tenant_name': 'ostf' - } - - if data: - for data_el in data: - if 'metadata' in data_el: - data_el['metadata']['ostf_os_access_creds'] = \ - ostf_os_access_creds - else: - data_el['ostf_os_access_creds'] = ostf_os_access_creds - - data = jsonutils.dumps({'objects': data}) - - r = requests.request( - method, - url, - data=data, - headers=headers, - timeout=30.0 - ) - - if 2 != r.status_code / 100: - raise AssertionError( - '{method} "{url}" responded with ' - '"{code}" status code'.format( - method=method.upper(), - url=url, code=r.status_code) - ) - return r - - def testsets(self, cluster_id): - url = ''.join( - [self.url, '/testsets/', str(cluster_id)] - ) - return self._request('GET', url) - - def tests(self, cluster_id): - url = ''.join( - [self.url, '/tests/', str(cluster_id)] - ) - return self._request('GET', url) - - def testruns(self): - url = ''.join( - [self.url, '/testruns/'] - ) - return self._request('GET', url) - - def testruns_last(self, cluster_id): - url = ''.join([self.url, '/testruns/last/', - str(cluster_id)]) - return self._request('GET', url) - - def start_testrun(self, testset, cluster_id): - return self.start_testrun_tests(testset, [], cluster_id) - - def start_testrun_tests(self, testset, tests, cluster_id): - url = ''.join([self.url, '/testruns']) - data = [ - { - 'testset': testset, - 'tests': tests, - 'metadata': {'cluster_id': str(cluster_id)} - } - ] - return self._request('POST', url, data) - - def start_multiple_testruns(self, testsets, cluster_id): - url = ''.join([self.url, '/testruns']) - data = [ - { - 'testset': testset, - 'tests': [], - 'metadata': {'cluster_id': str(cluster_id)} - } - for testset in testsets - ] - return self._request('POST', url, data) - - def stop_testrun(self, testrun_id): - url = ''.join([self.url, '/testruns']) - data = [ - { - "id": testrun_id, - "status": "stopped" - } - ] - return self._request("PUT", url, data) - - def stop_testrun_last(self, testset, cluster_id): - latest = self.testruns_last(cluster_id).json() - testrun_id = [ - item['id'] for item in latest - if item['testset'] == testset - ][0] - return self.stop_testrun(testrun_id) - - def restart_tests(self, tests, testrun_id): - url = ''.join([self.url, '/testruns']) - data = [ - { - 'id': str(testrun_id), - 'tests': tests, - 'status': 'restarted' - } - ] - return self._request('PUT', url, data) - - def restart_tests_last(self, testset, tests, cluster_id): - latest = self.testruns_last(cluster_id).json() - testrun_id = [ - item['id'] for item in latest - if item['testset'] == testset - ][0] - return self.restart_tests(tests, testrun_id) - - def _with_timeout(self, action, testset, cluster_id, - timeout, polling=5, polling_hook=None): - start_time = time.time() - decoded_json = action().json() - - if decoded_json == [{}]: - self.stop_testrun_last(testset, cluster_id) - time.sleep(1) - action() - - while time.time() - start_time <= timeout: - time.sleep(polling) - - current_response = self.testruns_last(cluster_id) - if polling_hook: - polling_hook(current_response) - current_status, current_tests = \ - [(item['status'], item['tests']) for item - in current_response.json() if item['testset'] == testset][0] - - if current_status == 'finished': - break - else: - stopped_response = self.stop_testrun_last(testset, cluster_id) - if polling_hook: - polling_hook(stopped_response) - stopped_response = self.testruns_last(cluster_id) - stopped_status = [ - item['status'] for item in stopped_response.json() - if item['testset'] == testset - ][0] - - msg = '{0} is still in {1} state. Now the state is {2}'.format( - testset, current_status, stopped_status) - msg_tests = '\n'.join( - [ - '{0} -> {1}, {2}'.format( - item['id'], item['status'], item['taken'] - ) - for item in current_tests - ] - ) - - raise AssertionError('\n'.join([msg, msg_tests])) - return current_response - - def run_with_timeout(self, testset, tests, cluster_id, timeout, polling=5, - polling_hook=None): - action = lambda: self.start_testrun_tests(testset, tests, cluster_id) - return self._with_timeout(action, testset, cluster_id, timeout, - polling, polling_hook) - - def run_testset_with_timeout(self, testset, cluster_id, timeout, - polling=5, polling_hook=None): - return self.run_with_timeout(testset, [], cluster_id, timeout, - polling, polling_hook) - - def restart_with_timeout(self, testset, tests, cluster_id, timeout): - action = lambda: self.restart_tests_last(testset, tests, cluster_id) - return self._with_timeout(action, testset, cluster_id, timeout) diff --git a/fuel_plugin/testing/__init__.py b/fuel_plugin/testing/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/fixture/__init__.py b/fuel_plugin/testing/fixture/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/fixture/dummy_tests/__init__.py b/fuel_plugin/testing/fixture/dummy_tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/fixture/dummy_tests/config_test.py b/fuel_plugin/testing/fixture/dummy_tests/config_test.py deleted file mode 100644 index 1ee60f7f..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/config_test.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -try: - from oslo.config import cfg -except ImportError: - from oslo_config import cfg - -import unittest - -opts = [ - cfg.StrOpt('quantum', default='fake') -] - - -class Config(unittest.TestCase): - def test_config(self): - cfg.CONF diff --git a/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/__init__.py b/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_first_test.py b/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_first_test.py deleted file mode 100644 index 5c5318a5..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_first_test.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "gemini_first", - "driver": "nose", - "test_path": ("fuel_plugin/tests/functional/" - "dummy_tests/dependent_testsets/gemini_first.py"), - "description": "Inersects with gemini_second testset", - "deployment_tags": ["dependent_tests"], - "test_runs_ordering_priority": 10, - "exclusive_testsets": ["gemini"] -} - -import time -import unittest2 - - -class TestGeminiFirst(unittest2.TestCase): - def test_fake_long_succes_gf(self): - time.sleep(30) - self.assertTrue(True) - - def test_fake_quick_success_gf(self): - self.assertTrue(True) diff --git a/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_second_test.py b/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_second_test.py deleted file mode 100644 index 4058f308..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/dependent_testsets/gemini_second_test.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "gemini_second", - "driver": "nose", - "test_path": ("fuel_plugin/tests/functional/" - "dummy_tests/dependent_testsets/gemini_second.py"), - "description": "Intersects with gemini_first testset", - "deployment_tags": ["dependent_tests"], - "test_runs_ordering_priority": 11, - "exclusive_testsets": ["gemini"] -} - -import time -import unittest2 - - -class TestGeminiSecond(unittest2.TestCase): - def test_fake_long_succes_gs(self): - time.sleep(30) - self.assertTrue(True) - - def test_fake_quick_success_gs(self): - self.assertTrue(True) diff --git a/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/__init__.py b/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/alternative_depl_tags_test.py b/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/alternative_depl_tags_test.py deleted file mode 100644 index eec292f8..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/alternative_depl_tags_test.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "alternative_depl_tags_test", - "driver": "nose", - "test_path": ("fuel_plugin/tests/functional/dummy_tests/" - "deployment_types_tests/alternative_depl_tags_test.py"), - "description": "Fake testset to test alternative deployment tags", - "deployment_tags": ["alternative | alternative_test"], - "test_runs_ordering_priority": 5, - "exclusive_testsets": [] -} - -import unittest - - -class AlternativeDeplTagsTests(unittest.TestCase): - - def test_simple_fake_test(self): - """fake empty test - This is fake empty test with - example of description of alternative - deployment tags - Deployment tags: one_tag| another_tag, other_tag - """ - self.assertTrue(True) diff --git a/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/ha_deployment_test.py b/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/ha_deployment_test.py deleted file mode 100644 index 9a8daabe..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/ha_deployment_test.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "ha_deployment_test", - "driver": "nose", - "test_path": ("fuel_plugin/tests/functional/deployment_types_tests/" - "ha_deployment_test.py"), - "description": "Fake tests for HA deployment", - "deployment_tags": ["Ha"], - "test_runs_ordering_priority": 3, - "exclusive_testsets": [] -} - -import unittest - - -class HATest(unittest.TestCase): - - def test_ha_rhel_depl(self): - """fake empty test - This is fake tests for ha - rhel deployment - Duration: 0sec - Deployment tags: Ha, Rhel - """ - self.assertTrue(True) - - def test_ha_rhel_quantum_depl(self): - """fake empty test - This is a fake test for - ha rhel with quantum - Duration: 0sec - Deployment tags: ha, rhel, quantum - """ - self.assertTrue(True) - - def test_ha_ubuntu_depl(self): - """fake empty test - This is fake test for ha - ubuntu deployment - Deployment tags: ha, ubuntu - """ - self.assertTrue(True) - - def test_ha_ubuntu_novanet_depl(self): - """fake empty test - This is empty test for ha - ubuntu with nova-network - deployment - Deployment tags: ha, ubuntu, nova_network - """ - self.assertTrue(True) - - def test_ha_depl(self): - """fake empty test - This is empty test for any - ha deployment - """ - self.assertTrue(True) diff --git a/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/multinode_deployment_test.py b/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/multinode_deployment_test.py deleted file mode 100644 index 01e90cf0..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/deployment_types_tests/multinode_deployment_test.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "multinode_deployment_test", - "driver": "nose", - "test_path": ("fuel_plugin/tests/functional/deployment_types_tests/" - "multinode_deployment.py"), - "description": "Fake tests for multinode deployment on ubuntu", - "deployment_tags": ["multinode", "ubuntu"], - "test_runs_ordering_priority": 4, - "exclusive_testsets": [] -} - -import unittest - - -class MultinodeTest(unittest.TestCase): - - def test_multi_novanet_depl(self): - """fake empty test - This is fake empty test - for multinode on ubuntu with - nova-network deployment - Duration: 0sec - Deployment tags: multinode, ubuntu, nova_network - """ - self.assertTrue(True) - - def test_multi_quantum_depl(self): - """fake empty test - This is fake empty test - for multinode on ubuntu with - quatum deployment - Duration: 0sec - Deployment tags: multinode, ubuntu, quantum - """ - self.assertTrue(True) - - def test_multi_depl(self): - """fake empty test - This is fake empty test - for mutlinode on ubuntu - deployment - Duration: 1sec - """ - self.assertTrue(True) diff --git a/fuel_plugin/testing/fixture/dummy_tests/general_test.py b/fuel_plugin/testing/fixture/dummy_tests/general_test.py deleted file mode 100644 index ee4bf374..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/general_test.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "general_test", - "driver": "nose", - "test_path": "fuel_plugin/tests/functional/dummy_tests/general_test.py", - "description": "General fake tests", - "deployment_tags": [], - "test_runs_ordering_priority": 1, - "exclusive_testsets": [] -} - -import httplib -import time -import unittest2 - - -class Dummy_test(unittest2.TestCase): - """Class docstring is required? - """ - - def test_fast_pass(self): - """fast pass test - This is a simple always pass test - Duration: 1sec - """ - self.assertTrue(True) - - def test_long_pass(self): - """Will sleep 5 sec - This is a simple test - it will run for 5 sec - Duration: 5sec - """ - time.sleep(5) - self.assertTrue(True) - - def test_fast_fail(self): - """Fast fail - """ - self.assertTrue(False, msg='Something goes wroooong') - - def test_fast_error(self): - """And fast error - """ - conn = httplib.HTTPSConnection('random.random/random') - conn.request("GET", "/random.aspx") - - def test_fail_with_step(self): - """Fast fail with step - """ - self.fail('Step 3 Failed: Fake fail message') - - def test_skip(self): - """Skip - """ - msg = 'The reason to skip goes here' - self.skipTest(msg) - - def test_skip_directly(self): - """Skip with exception - """ - msg = 'Nothing to see here' - raise unittest2.SkipTest(msg) diff --git a/fuel_plugin/testing/fixture/dummy_tests/stopped_test.py b/fuel_plugin/testing/fixture/dummy_tests/stopped_test.py deleted file mode 100644 index 76fb004e..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/stopped_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "stopped_test", - "driver": "nose", - "test_path": "fuel_plugin/tests/functional/dummy_tests/stopped_test.py", - "description": "Long running 25 secs fake tests", - "deployment_tags": [], - "test_runs_ordering_priority": 2, - "exclusive_testsets": [] -} - -import time -import unittest - - -class dummy_tests_stopped(unittest.TestCase): - - def test_really_long(self): - """This is long running tests - Duration: 25sec - """ - time.sleep(25) - self.assertTrue(True) - - def test_one_no_so_long(self): - """What i am doing here? You ask me???? - """ - time.sleep(5) - self.assertFalse(1 == 2) - - def test_not_long_at_all(self): - """You know.. for testing - Duration: 1sec - """ - self.assertTrue(True) diff --git a/fuel_plugin/testing/fixture/dummy_tests/test_environment_variables.py b/fuel_plugin/testing/fixture/dummy_tests/test_environment_variables.py deleted file mode 100644 index d301dde5..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/test_environment_variables.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "environment_variables", - "driver": "nose", - "test_path": ("fuel_plugin/tests/functional/dummy_tests/" - "test_environment_variables.py"), - "description": ("Test for presence of env variables inside of" - " testrun subprocess"), - "deployment_tags": [], - "test_runs_ordering_priority": 12, - "exclusive_testsets": [] -} - -import os -import unittest2 - - -class TestEnvVariables(unittest2.TestCase): - def test_os_credentials_env_variables(self): - self.assertTrue(os.environ.get('OSTF_OS_USERNAME')) - self.assertTrue(os.environ.get('OSTF_OS_PASSWORD')) - self.assertTrue(os.environ.get('OSTF_OS_TENANT_NAME')) diff --git a/fuel_plugin/testing/fixture/dummy_tests/test_versioning.py b/fuel_plugin/testing/fixture/dummy_tests/test_versioning.py deleted file mode 100644 index cec459bb..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/test_versioning.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -__profile__ = { - "id": "test_versioning", - "driver": "nose", - "test_path": "fuel_plugin/tests/functional/dummy_tests/test_versioning.py", - "description": "Test suite that contains fake tests for versioning check", - "deployment_tags": ["releases_comparison"], - "test_runs_ordering_priority": 13, - "exclusive_testsets": [], - "available_since_release": "2015.2-6.0", -} - -import unittest2 - - -class TestVersioning(unittest2.TestCase): - def test_simple_fake_first(self): - """This is simple fake test - for versioning checking. - It should be discovered for - releases == of >= 2015.2-6.0 - Available since release: 2015.2-6.0 - Deployment tags: releases_comparison - """ - self.assertTrue(True) - - def test_simple_fake_second(self): - """This is simple fake test - for versioning checking. - It should be discovered for - releases == of >= 2015.2-6.1 - Available since release: 2015.2-6.1 - Deployment tags: releases_comparison - """ - self.assertTrue(True) - - def test_simple_fake_alphabetic(self): - """This is simple fake test - for versioning checking. - It should be discovered for - releases == of >= liberty-8.0 - Available since release: liberty-8.0 - Deployment tags: releases_comparison - """ - self.assertTrue(True) diff --git a/fuel_plugin/testing/fixture/dummy_tests/test_with_error.py b/fuel_plugin/testing/fixture/dummy_tests/test_with_error.py deleted file mode 100644 index de969bd5..00000000 --- a/fuel_plugin/testing/fixture/dummy_tests/test_with_error.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__profile__ = { - "id": "test_with_error", - "driver": "nose", - "test_path": "fuel_plugin/tests/functional/dummy_tests/test_with_error.py", - "description": "Test that introduces error while setting up", - "deployment_tags": ['test_error'], - "test_runs_ordering_priority": 6, - "exclusive_testsets": [] -} - -import unittest - - -class FakeTests(unittest.TestCase): - - def test_successfully_passed(self): - """imitation of work - """ - self.assertTrue(True) - - -class WithErrorTest(unittest.TestCase): - """This is supoused to introduce errorness behaviour - in means that it have exception raised in setUp method for - testing purposes. - """ - @classmethod - def setUpClass(cls): - raise Exception('Unhandled exception in setUpClass') - - def setUp(self): - raise Exception('Error in setUp method') - - def test_supposed_to_be_success(self): - """test in errorness class - """ - self.assertTrue(True) - - def test_supposed_to_be_fail(self): - self.assertFalse(False) diff --git a/fuel_plugin/testing/tests/__init__.py b/fuel_plugin/testing/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/tests/base.py b/fuel_plugin/testing/tests/base.py deleted file mode 100644 index e91aac39..00000000 --- a/fuel_plugin/testing/tests/base.py +++ /dev/null @@ -1,498 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import requests_mock -from sqlalchemy import create_engine -from sqlalchemy import event -from sqlalchemy.orm import sessionmaker, scoped_session -import unittest2 -import webtest - -from fuel_plugin.ostf_adapter import config -from fuel_plugin.ostf_adapter import mixins -from fuel_plugin.ostf_adapter.nose_plugin import nose_discovery -from fuel_plugin.ostf_adapter.storage import models -from fuel_plugin.ostf_adapter.wsgi import app - - -TEST_PATH = 'fuel_plugin/testing/fixture/dummy_tests' - - -CLUSTERS = { - 1: { - 'cluster_meta': { - 'release_id': 1, - 'mode': 'ha' - }, - 'release_data': { - 'operating_system': 'rhel', - 'version': '2015.2-1.0', - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': {}, - 'common': {} - } - } - }, - 2: { - 'cluster_meta': { - 'release_id': 2, - 'mode': 'multinode', - }, - 'release_data': { - 'operating_system': 'ubuntu', - 'version': '2015.2-1.0', - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': {}, - 'common': {} - } - } - }, - 3: { - 'cluster_meta': { - 'release_id': 3, - 'mode': 'ha' - }, - 'release_data': { - 'operating_system': 'rhel', - 'version': '2015.2-1.0', - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': { - 'murano': { - 'value': True - }, - 'sahara': { - 'value': False - } - }, - 'common': {} - } - } - }, - 4: { - 'cluster_meta': { - 'release_id': 4, - 'mode': 'test_error' - }, - 'release_data': { - 'operating_system': 'none', - 'version': '2015.2-1.0', - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': {}, - 'common': {} - } - } - }, - 5: { - 'cluster_meta': { - 'release_id': 5, - 'mode': 'dependent_tests' - }, - 'release_data': { - 'operating_system': 'none', - 'version': '2015.2-1.0', - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': {}, - 'common': {} - } - } - }, - 6: { - 'cluster_meta': { - 'release_id': 6, - 'mode': 'releases_comparison' - }, - 'release_data': { - 'operating_system': '', - 'version': '2015.2-6.0', - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': {}, - 'common': {} - } - } - }, - 7: { - 'cluster_meta': { - 'release_id': 7, - 'mode': 'ha' - }, - 'release_data': { - 'operating_system': 'rhel', - 'version': '2015.2-1.0', - }, - 'cluster_node': [ - { - "hostname": "node-1", - 'id': "1", - 'roles': "compute" - }, - ], - 'node_interfaces': [ - { - 'interface_properties': { - 'sriov': { - 'enabled': 'true' - } - } - - } - ], - 'cluster_attributes': { - 'editable': { - 'additional_components': { - 'murano': { - 'value': True - }, - 'sahara': { - 'value': False - } - }, - 'common': {} - } - } - }, - 8: { - 'cluster_meta': { - 'release_id': 8, - 'mode': 'ha', - 'net_provider': 'neutron' - }, - 'release_data': { - 'operating_system': 'rhel', - 'version': '2015.2-1.0', - }, - 'cluster_node': [ - { - "hostname": "node-1", - 'id': "1", - 'roles': "compute" - }, - { - "hostname": "node-2", - 'id': "2", - 'roles': "compute" - }, - ], - 'node-1_interfaces': [ - { - 'interface_properties': { - 'dpdk': { - 'enabled': 'true' - } - } - - }, - ], - 'node-2_interfaces': [ - { - 'interface_properties': { - 'dpdk': { - 'available': 'false' - } - } - - }, - ], - 'cluster_attributes': { - 'editable': { - 'additional_components': {}, - 'common': {} - } - } - }, - 9: { - 'cluster_meta': { - 'release_id': 9, - 'mode': 'multinode' - }, - 'release_data': { - 'operating_system': 'ubuntu', - 'version': '2016.1-9.0' - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'detach-murano': { - 'metadata': { - 'enabled': True, - 'versions': [ - { - 'murano_glance_artifacts': { - "value": True - } - } - ] - } - }, - 'additional_components': {}, - 'common': {} - } - } - }, - 10: { - 'cluster_meta': { - 'release_id': 10, - 'mode': 'multinode' - }, - 'release_data': { - 'operating_system': 'ubuntu', - 'version': '2016.1-9.0' - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'detach-murano': { - 'metadata': { - 'enabled': True, - 'versions': [ - { - 'murano_glance_artifacts': { - "value": False - } - } - ] - } - }, - 'additional_components': {}, - 'common': {} - } - } - }, - 11: { - 'cluster_meta': { - 'release_id': 11, - 'mode': 'multinode' - }, - 'release_data': { - 'operating_system': 'ubuntu', - 'version': '2016.1-9.0' - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': { - 'murano': { - 'value': True - } - }, - 'murano_settings': { - 'murano_glance_artifacts_plugin': { - 'value': True - } - }, - 'common': {} - } - } - }, - 12: { - 'cluster_meta': { - 'release_id': 12, - 'mode': 'multinode' - }, - 'release_data': { - 'operating_system': 'ubuntu', - 'version': '2016.1-9.0' - }, - 'cluster_node': { - }, - 'cluster_attributes': { - 'editable': { - 'additional_components': { - 'murano': { - 'value': True - } - }, - 'murano_settings': { - 'murano_glance_artifacts_plugin': { - 'value': False - } - }, - 'common': {} - } - } - }, -} - - -class BaseUnitTest(unittest2.TestCase): - """Base class for all unit tests.""" - - -class BaseIntegrationTest(BaseUnitTest): - """Base class for all integration tests.""" - - @classmethod - def setUpClass(cls): - config.init_config([]) - # db connection - cls.dbpath = config.cfg.CONF.adapter.dbpath - cls.engine = create_engine(cls.dbpath) - - # mock http requests - cls.requests_mock = requests_mock.Mocker() - cls.requests_mock.start() - - @classmethod - def tearDownClass(cls): - # stop https requests mocking - cls.requests_mock.stop() - - def setUp(self): - self.connection = self.engine.connect() - self.trans = self.connection.begin() - self.session = scoped_session(sessionmaker()) - self.session.configure(bind=self.connection) - - # supprot tests with rollbacks - # start the session in a SAVEPOINT... - self.session.begin_nested() - - # # then each time that SAVEPOINT ends, reopen it - @event.listens_for(self.session, "after_transaction_end") - def restart_savepoint(session, transaction): - if transaction.nested and not transaction._parent.nested: - session.begin_nested() - - def discovery(self): - """Discover dummy tests used for testsing.""" - mixins.TEST_REPOSITORY = [] - nose_discovery.discovery(path=TEST_PATH, session=self.session) - mixins.cache_test_repository(self.session) - self.session.flush() - - def tearDown(self): - # rollback changes to database - # made by tests - self.trans.rollback() - self.session.close() - self.connection.close() - - def mock_api_for_cluster(self, cluster_id): - """Mock requests to Nailgun to mimic behavior of - Nailgun's API - """ - cluster = CLUSTERS[cluster_id] - release_id = cluster['cluster_meta']['release_id'] - - self.requests_mock.register_uri( - 'GET', - '/api/clusters/{0}'.format(cluster_id), - json=cluster['cluster_meta']) - - self.requests_mock.register_uri( - 'GET', - '/api/releases/{0}'.format(release_id), - json=cluster['release_data']) - - self.requests_mock.register_uri( - 'GET', - '/api/nodes?cluster_id={0}'.format(cluster_id), - json=cluster['cluster_node']) - - self.requests_mock.register_uri( - 'GET', - '/api/clusters/{0}/attributes'.format(cluster_id), - json=cluster['cluster_attributes']) - - -class BaseWSGITest(BaseIntegrationTest): - - def setUp(self): - super(BaseWSGITest, self).setUp() - self.ext_id = 'fuel_plugin.testing.fixture.dummy_tests.' - self.expected = { - 'cluster': { - 'id': 1, - 'deployment_tags': set(['ha', 'rhel', 'nova_network', - 'public_on_all_nodes', - 'enable_without_ceph']) - }, - 'test_sets': ['general_test', - 'stopped_test', 'ha_deployment_test', - 'environment_variables'], - 'tests': [self.ext_id + test for test in [ - ('deployment_types_tests.ha_deployment_test.' - 'HATest.test_ha_depl'), - ('deployment_types_tests.ha_deployment_test.' - 'HATest.test_ha_rhel_depl'), - 'general_test.Dummy_test.test_fast_pass', - 'general_test.Dummy_test.test_long_pass', - 'general_test.Dummy_test.test_fast_fail', - 'general_test.Dummy_test.test_fast_error', - 'general_test.Dummy_test.test_fail_with_step', - 'general_test.Dummy_test.test_skip', - 'general_test.Dummy_test.test_skip_directly', - 'stopped_test.dummy_tests_stopped.test_really_long', - 'stopped_test.dummy_tests_stopped.test_one_no_so_long', - 'stopped_test.dummy_tests_stopped.test_not_long_at_all', - ('test_environment_variables.TestEnvVariables.' - 'test_os_credentials_env_variables') - ]] - } - - self.discovery() - - self.app = webtest.TestApp(app.setup_app(session=self.session)) - - def is_background_working(self): - is_working = True - - cluster_state = self.session.query(models.ClusterState)\ - .filter_by(id=self.expected['cluster']['id'])\ - .one() - is_working = is_working and set(cluster_state.deployment_tags) == \ - self.expected['cluster']['deployment_tags'] - - cluster_testing_patterns = self.session\ - .query(models.ClusterTestingPattern)\ - .filter_by(cluster_id=self.expected['cluster']['id'])\ - .all() - - for testing_pattern in cluster_testing_patterns: - is_working = is_working and \ - (testing_pattern.test_set_id in self.expected['test_sets']) - - is_working = is_working and set(testing_pattern.tests)\ - .issubset(set(self.expected['tests'])) - - return is_working diff --git a/fuel_plugin/testing/tests/functional/__init__.py b/fuel_plugin/testing/tests/functional/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/tests/functional/base.py b/fuel_plugin/testing/tests/functional/base.py deleted file mode 100644 index b1f09de6..00000000 --- a/fuel_plugin/testing/tests/functional/base.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import unittest - -from fuel_plugin.ostf_client import client - - -class EmptyResponseError(Exception): - pass - - -class Response(object): - """This is testing_adapter response object.""" - test_name_mapping = {} - - def __init__(self, response): - self.is_empty = False - if isinstance(response, list): - self._parse_json(response) - self.request = None - else: - self._parse_json(response.json()) - self.request = '{0} {1} \n with {2}'\ - .format( - response.request.method, - response.request.url, - response.request.body - ) - - def __getattr__(self, item): - if item in self.test_sets: - return self.test_sets.get(item) - - def __str__(self): - if self.is_empty: - return "Empty" - return self.test_sets.__str__() - - def _parse_json(self, json): - if json == [{}]: - self.is_empty = True - return - else: - self.is_empty = False - - self.test_sets = {} - self._tests = {} - - for testset in json: - self.test_sets[testset.pop('testset')] = testset - - -class AdapterClientProxy(object): - - def __init__(self, url): - self.client = client.TestingAdapterClient(url) - - def __getattr__(self, item): - if item in client.TestingAdapterClient.__dict__: - call = getattr(self.client, item) - return self._decorate_call(call) - - def _decorate_call(self, call): - @functools.wraps(call) - def inner(*args, **kwargs): - r = call(*args, **kwargs) - return Response(r) - return inner - - -class SubsetException(Exception): - pass - - -class BaseAdapterTest(unittest.TestCase): - def compare(self, response, comparable): - if response.is_empty: - msg = '{0} is empty'.format(response.request) - raise AssertionError(msg) - if not isinstance(comparable, Response): - comparable = Response(comparable) - - for test_set in comparable.test_sets.keys(): - test_set_data = comparable.test_sets[test_set] - tests = test_set_data['tests'] - diff = [] - - for item in test_set_data: - if item == 'tests': - continue - if response.test_sets[test_set][item] != test_set_data[item]: - msg = 'Actual "{0}" != expected "{1}" in {2}.{3}'.format( - response.test_sets[test_set][item], - test_set_data[item], - test_set, - item - ) - diff.append(msg) - raise AssertionError(msg) - - tests = dict([(test['id'], test) for test in tests]) - response_tests = dict( - [ - (test['id'], test) for test in - response.test_sets[test_set]['tests'] - ] - ) - - for test_id, test_data in tests.iteritems(): - for data_key, data_value in test_data.iteritems(): - if not response_tests[test_id][data_key] == data_value: - msg = ('Actual "{4}" != expected data value ' - '"{3}" with key "{2}" for test with id' - ' "{1}" of testset "{0}"') - msg = msg.format( - test_set, - test_id, - data_key, - data_value, - response_tests[test_id][data_key] - ) - raise AssertionError(msg) - - @staticmethod - def init_client(url): - ac = AdapterClientProxy(url) - return ac diff --git a/fuel_plugin/testing/tests/functional/tests.py b/fuel_plugin/testing/tests/functional/tests.py deleted file mode 100644 index 98308048..00000000 --- a/fuel_plugin/testing/tests/functional/tests.py +++ /dev/null @@ -1,673 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from sqlalchemy import create_engine -import time - -from fuel_plugin.ostf_client import client -from fuel_plugin.testing.tests.functional import base - - -class AdapterTests(base.BaseAdapterTest): - - @classmethod - def setUpClass(cls): - - url = 'http://0.0.0.0:8777/v1' - - cls.mapping = { - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'): 'fast_pass', - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_error'): 'fast_error', - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_fail'): 'fast_fail', - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_long_pass'): 'long_pass', - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fail_with_step'): 'fail_step', - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip'): 'skip', - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip_directly'): 'skip_directly', - ('fuel_plugin.testing.fixture.dummy_tests.stopped_test.' - 'dummy_tests_stopped.test_really_long'): 'really_long', - ('fuel_plugin.testing.fixture.dummy_tests.stopped_test.' - 'dummy_tests_stopped.test_not_long_at_all'): 'not_long', - ('fuel_plugin.testing.fixture.dummy_tests.stopped_test.' - 'dummy_tests_stopped.test_one_no_so_long'): 'so_long', - ('fuel_plugin.testing.fixture.dummy_tests.deployment_types_tests.' - 'ha_deployment_test.HATest.test_ha_depl'): 'ha_depl', - ('fuel_plugin.testing.fixture.dummy_tests.deployment_types_tests.' - 'ha_deployment_test.HATest.test_ha_rhel_depl'): 'ha_rhel_depl', - ('fuel_plugin.testing.fixture.dummy_tests.' - 'test_environment_variables.TestEnvVariables.' - 'test_os_credentials_env_variables'): 'test_env_vars' - } - cls.testsets = { - "ha_deployment_test": [], - "general_test": [ - 'fast_pass', - 'fast_error', - 'fast_fail', - 'long_pass', - 'skip', - 'skip_directly' - ], - "stopped_test": [ - 'really_long', - 'not_long', - 'so_long' - ] - } - - cls.adapter = client.TestingAdapterClient(url) - cls.client = cls.init_client(url) - - @classmethod - def tearDownClass(cls): - lock_path = '/tmp/ostf_locks' - if os.path.exists(lock_path): - for f in os.listdir(lock_path): - f_path = os.path.join(lock_path, f) - if os.path.isfile(f_path): - os.remove(f_path) - - def tearDown(self): - eng = create_engine( - 'postgresql+psycopg2://ostf:ostf@localhost/ostf' - ) - - eng.execute('delete from cluster_testing_pattern;') - eng.execute('delete from cluster_state;') - - def test_list_testsets(self): - """Verify that self.testsets are in json response - """ - cluster_id = 1 - - json = self.adapter.testsets(cluster_id).json() - response_testsets = [item['id'] for item in json] - for testset in self.testsets: - msg = '"{test}" not in "{response}"'.format( - test=testset, - response=response_testsets - ) - self.assertTrue(testset in response_testsets, msg) - - def test_list_tests(self): - """Verify that self.tests are in json response - """ - cluster_id = 1 - json = self.adapter.tests(cluster_id).json() - response_tests = [item['id'] for item in json] - for test in self.mapping.keys(): - msg = '"{test}" not in "{response}"'.format( - test=test.capitalize(), - response=response_tests - ) - self.assertTrue(test in response_tests, msg) - - def test_run_testset(self): - """Verify that test status changes in time from running to success - """ - testsets = ["general_test", "stopped_test"] - cluster_id = 1 - - # make sure we have data about test_sets in db - self.adapter.testsets(cluster_id) - for testset in testsets: - self.client.start_testrun(testset, cluster_id) - - time.sleep(5) - - resp = self.client.testruns_last(cluster_id) - - assertions = base.Response( - [ - { - 'testset': 'general_test', - 'status': 'running', - 'tests': [], - 'meta': None, - 'cluster_id': 1, - }, - { - 'testset': 'stopped_test', - 'status': 'running', - 'tests': [], - 'meta': None, - 'cluster_id': 1, - } - ] - ) - - self.compare(resp, assertions) - time.sleep(30) - - resp = self.client.testruns_last(cluster_id) - - assertions.general_test['status'] = 'finished' - assertions.stopped_test['status'] = 'finished' - - self.compare(resp, assertions) - - def test_stop_testset(self): - """Verify that long running testrun can be stopped - """ - testset = "stopped_test" - cluster_id = 1 - - # make sure we have all needed data in db - # for this test case - self.adapter.testsets(cluster_id) - - self.client.start_testrun(testset, cluster_id) - time.sleep(20) - - resp = self.client.testruns_last(cluster_id) - - assertions = base.Response([ - { - 'testset': 'stopped_test', - 'status': 'running', - 'tests': [], - 'meta': None, - 'cluster_id': 1 - } - ]) - - self.compare(resp, assertions) - - self.client.stop_testrun_last(testset, cluster_id) - time.sleep(5) - resp = self.client.testruns_last(cluster_id) - - assertions.stopped_test['status'] = 'finished' - self.compare(resp, assertions) - - def test_cant_start_while_running(self): - """Verify that you can't start new testrun - for the same cluster_id while previous run - is running - """ - testsets = [ - "stopped_test", - "general_test" - ] - cluster_id = 1 - - self.adapter.testsets(cluster_id) - - for testset in testsets: - self.client.start_testrun(testset, cluster_id) - self.client.testruns_last(cluster_id) - - for testset in testsets: - resp = self.client.start_testrun(testset, cluster_id) - - msg = ( - "Response {0} is not empty when you try to start testrun" - " with testset and cluster_id that are already running" - ).format(resp) - - self.assertTrue(resp.is_empty, msg) - - def test_start_many_runs(self): - """Verify that you can start more than one - testruns in a row with different cluster_id - """ - testset = "general_test" - cluster_id = 1 - - self.adapter.testsets(cluster_id) - - for cluster_id in range(1, 2): - resp = self.client.start_testrun(testset, cluster_id) - msg = '{0} was empty'.format(resp.request) - self.assertFalse(resp.is_empty, msg) - - """TODO: Rewrite assertions to verity that all - 5 testruns ended with appropriate status - """ - - def test_run_single_test(self): - """Verify that you can run individual tests from given testset.""" - testset = "general_test" - tests = [ - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'), - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_error') - ] - cluster_id = 1 - - # make sure that we have all needed data in db - self.adapter.testsets(cluster_id) - - resp = self.client.start_testrun_tests(testset, tests, cluster_id) - - assertions = base.Response([ - { - 'testset': 'general_test', - 'status': 'running', - 'tests': [ - { - 'status': 'disabled', - 'name': 'Fast fail with step', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fail_with_step'), - }, - { - 'status': 'wait_running', - 'name': 'And fast error', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_error'), - }, - { - 'status': 'disabled', - 'name': 'Fast fail', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_fail'), - }, - { - 'status': 'wait_running', - 'name': 'fast pass test', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'), - }, - { - 'status': 'disabled', - 'name': 'Will sleep 5 sec', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_long_pass'), - }, - { - 'status': 'disabled', - 'name': 'Skip', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip'), - }, - { - 'status': 'disabled', - 'name': 'Skip with exception', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip_directly'), - } - ], - 'cluster_id': 1, - } - ]) - - self.compare(resp, assertions) - - time.sleep(3) - - resp = self.client.testruns_last(cluster_id) - - assertions.general_test['status'] = 'finished' - for test in assertions.general_test['tests']: - if test['name'] == 'And fast error': - test['status'] = 'error' - elif test['name'] == 'fast pass test': - test['status'] = 'success' - - self.compare(resp, assertions) - - def test_single_test_restart(self): - """Verify that you restart individual tests for given testrun.""" - testset = "general_test" - tests = [ - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'), - ('fuel_plugin.testing.fixture.dummy_tests.general_test.' - 'Dummy_test.test_fast_fail') - ] - cluster_id = 1 - - # make sure we have all needed data in db - self.adapter.testsets(cluster_id) - - self.client.run_testset_with_timeout(testset, cluster_id, 10) - - resp = self.client.restart_tests_last(testset, tests, cluster_id) - - assertions = base.Response([ - { - 'testset': 'general_test', - 'status': 'running', - 'tests': [ - { - 'status': 'failure', - 'name': 'Fast fail with step', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fail_with_step'), - }, - { - 'status': 'error', - 'name': 'And fast error', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_error'), - }, - { - 'status': 'wait_running', - 'name': 'Fast fail', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_fail'), - }, - { - 'status': 'wait_running', - 'name': 'fast pass test', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'), - }, - { - 'status': 'success', - 'name': 'Will sleep 5 sec', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_long_pass'), - }, - { - 'status': 'skipped', - 'name': 'Skip', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip'), - }, - { - 'status': 'skipped', - 'name': 'Skip with exception', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip_directly'), - } - ], - 'cluster_id': 1, - } - ]) - - self.compare(resp, assertions) - time.sleep(10) - - resp = self.client.testruns_last(cluster_id) - - assertions.general_test['status'] = 'finished' - for test in assertions.general_test['tests']: - if test['name'] == 'Fast fail': - test['status'] = 'failure' - elif test['name'] == 'fast pass test': - test['status'] = 'success' - - self.compare(resp, assertions) - - def test_restart_combinations(self): - """Verify that you can restart both tests that - ran and did not run during single test start - """ - testset = "general_test" - tests = [ - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'), - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_fail') - ] - disabled_test = [ - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_error') - ] - cluster_id = 1 - - # make sure we have all needed data in db - self.adapter.testsets(cluster_id) - - self.client.run_with_timeout(testset, tests, cluster_id, 70) - self.client.restart_with_timeout(testset, tests, cluster_id, 10) - - resp = self.client.restart_tests_last(testset, disabled_test, - cluster_id) - - assertions = base.Response([ - { - 'testset': 'general_test', - 'status': 'running', - 'tests': [ - { - 'status': 'disabled', - 'name': 'Fast fail with step', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fail_with_step'), - }, - { - 'status': 'wait_running', - 'name': 'And fast error', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_error'), - }, - { - 'status': 'failure', - 'name': 'Fast fail', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_fail'), - }, - { - 'status': 'success', - 'name': 'fast pass test', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'), - }, - { - 'status': 'disabled', - 'name': 'Will sleep 5 sec', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_long_pass'), - }, - { - 'status': 'disabled', - 'name': 'Skip', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip'), - }, - { - 'status': 'disabled', - 'name': 'Skip with exception', - 'id': ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_skip_directly'), - } - ], - 'cluster_id': 1, - } - ]) - self.compare(resp, assertions) - time.sleep(5) - - resp = self.client.testruns_last(cluster_id) - - assertions.general_test['status'] = 'finished' - for test in assertions.general_test['tests']: - if test['name'] == 'And fast error': - test['status'] = 'error' - self.compare(resp, assertions) - - def test_cant_restart_during_run(self): - testset = 'general_test' - tests = [ - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass'), - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_fail'), - ('fuel_plugin.testing.fixture.dummy_tests.' - 'general_test.Dummy_test.test_fast_pass') - ] - cluster_id = 1 - - # make sure that we have all needen data in db - self.adapter.testsets(cluster_id) - - self.client.start_testrun(testset, cluster_id) - time.sleep(2) - - resp = self.client.restart_tests_last(testset, tests, cluster_id) - msg = ('Response was not empty after trying' - ' to restart running testset:\n {0}').format(resp.request) - self.assertTrue(resp.is_empty, msg) - - def test_nose_adapter_error_while_running_tests(self): - testset = 'test_with_error' - cluster_id = 4 - - # make sure we have all needed data in db - self.adapter.testsets(cluster_id) - - self.client.start_testrun(testset, cluster_id) - time.sleep(5) - - resp = self.client.testruns_last(cluster_id) - - assertions = base.Response([ - { - 'testset': 'test_with_error', - 'status': 'finished', - 'cluster_id': 4, - 'tests': [ - { - 'id': ( - 'fuel_plugin.testing.fixture.' - 'dummy_tests.test_with_error.WithErrorTest.' - 'test_supposed_to_be_fail' - ), - 'status': 'error' - }, - { - 'id': ( - 'fuel_plugin.testing.fixture.' - 'dummy_tests.test_with_error.WithErrorTest.' - 'test_supposed_to_be_success' - ), - 'status': 'error' - } - ] - - } - ]) - - self.compare(resp, assertions) - - def test_dependent_testsets(self): - testsets = ['gemini_first', 'gemini_second'] - cluster_id = 5 - - # make sure we have all needed data in db - self.adapter.testsets(cluster_id) - - self.client.start_multiple_testruns(testsets, cluster_id) - time.sleep(5) - - resp = self.client.testruns() - - assertions = base.Response([ - { - 'testset': 'gemini_first', - 'status': 'running', - 'tests': [ - { - 'id': ( - 'fuel_plugin.testing.fixture.' - 'dummy_tests.dependent_testsets.' - 'gemini_first_test.TestGeminiFirst.' - 'test_fake_long_succes_gf' - ), - 'status': 'running' - }, - { - 'id': ( - 'fuel_plugin.testing.fixture.' - 'dummy_tests.dependent_testsets.' - 'gemini_first_test.TestGeminiFirst.' - 'test_fake_quick_success_gf' - ), - 'status': 'wait_running' - } - ] - }, - { - 'testset': 'gemini_second', - 'status': 'running', - 'tests': [ - { - 'id': ( - 'fuel_plugin.testing.fixture.' - 'dummy_tests.dependent_testsets.' - 'gemini_second_test.TestGeminiSecond.' - 'test_fake_long_succes_gs' - ), - 'status': 'wait_running' - }, - { - 'id': ( - 'fuel_plugin.testing.fixture.' - 'dummy_tests.dependent_testsets.' - 'gemini_second_test.TestGeminiSecond.' - 'test_fake_quick_success_gs' - ), - 'status': 'wait_running' - } - ] - } - ]) - - self.compare(resp, assertions) - - def test_env_variables_are_set(self): - assertions = base.Response([ - { - 'testset': 'environment_variables', - 'status': 'finished', - 'tests': [ - { - 'id': ( - 'fuel_plugin.testing.fixture.' - 'dummy_tests.test_environment_variables.' - 'TestEnvVariables.' - 'test_os_credentials_env_variables' - ), - 'status': 'success' - }, - ] - }, - ]) - - def check_testrun_res(): - resp = self.client.testruns() - self.compare(resp, assertions) - - cluster_id = 1 - testset = 'environment_variables' - tests = [ - ('fuel_plugin.testing.fixture.' - 'dummy_tests.test_environment_variables.' - 'TestEnvVariables.' - 'test_os_credentials_env_variables') - ] - - # make sure we have all needed data in db - self.adapter.testsets(cluster_id) - - self.adapter.start_testrun(testset, cluster_id) - time.sleep(5) - - check_testrun_res() - - self.client.restart_tests_last(testset, tests, cluster_id) - time.sleep(5) - - check_testrun_res() diff --git a/fuel_plugin/testing/tests/integration/__init__.py b/fuel_plugin/testing/tests/integration/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/tests/integration/test_models_methods.py b/fuel_plugin/testing/tests/integration/test_models_methods.py deleted file mode 100644 index 4ef4934a..00000000 --- a/fuel_plugin/testing/tests/integration/test_models_methods.py +++ /dev/null @@ -1,507 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock - -from fuel_plugin.testing.tests import base - -from fuel_plugin.ostf_adapter import mixins - -from fuel_plugin.ostf_adapter.storage import models - - -class TestModelTestMethods(base.BaseIntegrationTest): - - test_set_id = 'general_test' - cluster_id = 1 - - def setUp(self): - super(TestModelTestMethods, self).setUp() - - self.discovery() - - self.mock_api_for_cluster(self.cluster_id) - - mixins.discovery_check(self.session, self.cluster_id) - self.session.flush() - - self.test_obj = self.session.query(models.Test)\ - .filter_by(test_set_id=self.test_set_id)\ - .first() - - self.test_run = models.TestRun.add_test_run( - self.session, - self.test_obj.test_set_id, - self.cluster_id, - status='running', - tests=[self.test_obj.name] - ) - self.session.flush() - - @property - def test_to_check(self): - return self.session.query(models.Test)\ - .filter_by(test_run_id=self.test_run.id)\ - .filter_by(name=self.test_obj.name)\ - .first() - - def check_model_obj_attrs(self, obj, attrs): - for attr_name, attr_val in attrs.items(): - self.assertEqual(attr_val, getattr(obj, attr_name)) - - def test_add_result(self): - expected_data = { - 'message': 'test_message', - 'status': 'error', - 'time_taken': 10.4 - } - - models.Test.add_result(self.session, - self.test_run.id, - self.test_obj.name, - expected_data) - - self.check_model_obj_attrs(self.test_to_check, expected_data) - - def test_update_running_tests_default_status(self): - models.Test.update_running_tests(self.session, - self.test_run.id) - - self.assertEqual(self.test_to_check.status, 'stopped') - - def test_update_running_tests_with_status(self): - expected_status = 'success' - - models.Test.update_running_tests(self.session, - self.test_run.id, - status=expected_status) - - self.assertEqual(self.test_to_check.status, expected_status) - - def test_update_only_running_tests(self): - # the method should update only running tests - expected_status = 'error' - models.Test.add_result(self.session, self.test_run.id, - self.test_obj.name, - {'status': expected_status}) - - models.Test.update_running_tests(self.session, self.test_run.id) - - # check that status of test is not updated to 'stopped' - self.assertEqual(self.test_to_check.status, expected_status) - - def test_update_test_run_tests_default_status(self): - models.Test.add_result(self.session, self.test_run.id, - self.test_obj.name, - {'time_taken': 10.4}) - - models.Test.update_test_run_tests(self.session, self.test_run.id, - [self.test_obj.name]) - - expected_attrs = { - 'status': 'wait_running', - 'time_taken': None - } - - self.check_model_obj_attrs(self.test_to_check, expected_attrs) - - def test_properly_copied_test(self): - new_test = self.test_obj.copy_test(self.test_run, predefined_tests=[]) - - copied_attrs_list = [ - 'name', - 'title', - 'description', - 'duration', - 'message', - 'traceback', - 'step', - 'time_taken', - 'meta', - 'deployment_tags', - 'available_since_release', - 'test_set_id', - ] - attrs = {} - for attr_name in copied_attrs_list: - attrs[attr_name] = getattr(self.test_obj, attr_name) - - self.check_model_obj_attrs(new_test, attrs) - - self.assertEqual(new_test.test_run_id, self.test_run.id) - self.assertEqual(new_test.status, 'wait_running') - - def test_copy_test_with_predefined_list(self): - predefined_tests_names = ['some_other_test'] - new_test = self.test_obj.copy_test(self.test_run, - predefined_tests_names) - - self.assertEqual(new_test.status, 'disabled') - - -class TestModelTestSetMethods(base.BaseIntegrationTest): - - test_set_id = 'general_test' - - def setUp(self): - super(TestModelTestSetMethods, self).setUp() - self.discovery() - - def test_get_test_set(self): - self.assertIsNotNone( - models.TestSet.get_test_set( - self.session, - self.test_set_id - ) - ) - self.assertIsNone( - models.TestSet.get_test_set( - self.session, - 'fake_test' - ) - ) - - def test_frontend_property(self): - test_set = self.session.query(models.TestSet)\ - .filter_by(id=self.test_set_id)\ - .first() - expected = {'id': test_set.id, 'name': test_set.description} - self.assertEqual(expected, test_set.frontend) - - -class TestModelTestRunMethods(base.BaseIntegrationTest): - - test_set_id = 'general_test' - cluster_id = 1 - - def setUp(self): - super(TestModelTestRunMethods, self).setUp() - self.discovery() - - self.mock_api_for_cluster(self.cluster_id) - mixins.discovery_check(self.session, self.cluster_id) - self.session.flush() - - def check_enabled(self, expected_test_names, test_run_tests): - enabled_tests = [ - test.name for test in test_run_tests - if test.status == 'wait_running' - ] - - self.assertItemsEqual(expected_test_names, enabled_tests) - - def test_add_test_run(self): - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - - for attr in ('test_set_id', 'cluster_id'): - self.assertEqual(getattr(self, attr), getattr(test_run, attr)) - - # default status for newly created test_run is 'running' - self.assertEqual(test_run.status, 'running') - - unassigned_tests = self.session.query(models.Test)\ - .filter_by(test_set_id=self.test_set_id)\ - .filter_by(test_run_id=None) - - test_names_from_test_set = [ - test.name for test in unassigned_tests - ] - test_names_from_test_run = [ - test.name for test in test_run.tests - ] - self.assertItemsEqual(test_names_from_test_run, - test_names_from_test_set) - - def test_add_test_run_non_default_status(self): - expected_status = 'finished' - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id, status=expected_status - ) - self.assertEqual(test_run.status, expected_status) - - def test_add_test_run_with_predefined_tests(self): - expected_test_names = [ - test.name for test in - self.session.query(models.Test) - .filter_by(test_set_id=self.test_set_id) - .filter_by(test_run_id=None) - ][:3] - - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id, tests=expected_test_names - ) - self.check_enabled(expected_test_names, test_run.tests) - - def test_add_test_run_tests_from_another_test_set_in_predefined(self): - expected_test_names = [ - test.name for test in - self.session.query(models.Test) - .filter_by(test_set_id=self.test_set_id) - .filter_by(test_run_id=None) - ][:3] - - additional_test = self.session.query(models.Test)\ - .filter_by(test_set_id='stopped_test')\ - .first() - - tests = expected_test_names + [additional_test] - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id, - tests=tests - ) - - self.assertNotIn( - additional_test.name, - [test.name for test in test_run.tests] - ) - self.check_enabled(expected_test_names, test_run.tests) - - def test_update_testrun_not_finished_status(self): - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - - expected_status = 'stopped' - test_run.update(expected_status) - - self.assertEqual(test_run.status, expected_status) - self.assertIsNone(test_run.ended_at) - - @mock.patch('fuel_plugin.ostf_adapter.storage.models.datetime') - def test_update_testrun_with_finished_status(self, mock_dt): - expected_status = 'finished' - expected_date = datetime.datetime.utcnow() - mock_dt.datetime.utcnow.return_value = expected_date - - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - - test_run.update(expected_status) - - self.assertEqual(test_run.status, expected_status) - self.assertEqual(test_run.ended_at, expected_date) - - def test_get_last_test_run(self): - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - - last_test_run = models.TestRun.get_last_test_run( - self.session, self.test_set_id, self.cluster_id) - - self.assertEqual(test_run, last_test_run) - - @mock.patch('fuel_plugin.ostf_adapter.storage.models.nose_plugin') - def test_start_testrun(self, nose_plugin_mock): - test_set = self.session.query(models.TestSet)\ - .filter_by(id=self.test_set_id).one() - - kwargs = { - 'session': self.session, - 'test_set': test_set, - 'metadata': {'cluster_id': self.cluster_id}, - 'dbpath': 'fake_db_path', - 'token': 'fake_token', - 'tests': None, - } - - plugin_inst_mock = mock.Mock() - nose_plugin_mock.get_plugin = mock.Mock( - return_value=plugin_inst_mock - ) - - with mock.patch.object( - models.TestRun, 'is_last_running', - new=mock.Mock(return_value=True)) as is_last_run_mock: - - frontend = models.TestRun.start( - **kwargs - ) - - added_test_run = self.session.query(models.TestRun)\ - .first() - self.assertEqual(frontend, added_test_run.frontend) - - nose_plugin_mock.get_plugin.called_once_with(test_set.driver) - is_last_run_mock.called_once_with( - self.session, - test_set.id, - self.cluster_id - ) - plugin_inst_mock.run.called_once_with( - added_test_run, test_set, kwargs['dbpath'], None, kwargs['token'] - ) - - @mock.patch('fuel_plugin.ostf_adapter.storage.models.nose_plugin') - def test_start_test_run_already_running(self, nose_plugin_mock): - test_set = self.session.query(models.TestSet)\ - .filter_by(id=self.test_set_id).one() - - kwargs = { - 'session': self.session, - 'test_set': test_set, - 'metadata': {'cluster_id': self.cluster_id}, - 'dbpath': 'fake_db_path', - 'token': 'fake_token', - 'tests': None, - } - - with mock.patch.object( - models.TestRun, 'is_last_running', - new=mock.Mock(return_value=False)): - - frontend = models.TestRun.start( - **kwargs - ) - - added_test_run = self.session.query(models.TestRun)\ - .first() - self.assertIsNone(added_test_run) - - self.assertEqual(frontend, {}) - - @mock.patch('fuel_plugin.ostf_adapter.storage.models.nose_plugin') - def test_restart_test_run(self, nose_plugin_mock): - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - - kwargs = { - 'session': self.session, - 'ostf_os_access_creds': [], - 'dbpath': 'fake_db_path', - 'token': 'fake_token', - 'tests': 'fake_tests' - } - - plugin_inst_mock = mock.Mock() - nose_plugin_mock.get_plugin = mock.Mock( - return_value=plugin_inst_mock - ) - - with mock.patch.object( - models.TestRun, 'is_last_running', - new=mock.Mock(return_value=True)) as is_last_run_mock: - - with mock.patch.object( - models.Test, 'update_test_run_tests') as update_tests_mock: - frontend = test_run.restart(**kwargs) - - self.assertEqual(test_run.frontend, frontend) - self.assertEqual(test_run.status, 'running') - - is_last_run_mock.called_once_with( - self.session, - test_run.test_set_id, - test_run.cluster_id - ) - nose_plugin_mock.get_plugin.assert_called_once_with( - test_run.test_set.driver - ) - update_tests_mock.assert_called_once_with( - self.session, test_run.id, kwargs['tests'] - ) - plugin_inst_mock.run.assert_called_once_with( - test_run, test_run.test_set, kwargs['dbpath'], - kwargs['ostf_os_access_creds'], kwargs['tests'], - token=kwargs['token'] - ) - - def test_run_restart_is_running(self): - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - - kwargs = { - 'session': self.session, - 'ostf_os_access_creds': [], - 'dbpath': 'fake_db_path', - 'token': 'fake_token', - 'tests': 'fake_tests' - } - - with mock.patch.object( - models.TestRun, 'is_last_running', - new=mock.Mock(return_value=False)) as is_last_run_mock: - frontend = test_run.restart(**kwargs) - - is_last_run_mock.called_once_with( - self.session, - test_run.test_set_id, - test_run.cluster_id - ) - self.assertEqual(frontend, {}) - - @mock.patch('fuel_plugin.ostf_adapter.storage.models.nose_plugin') - def test_stop_test_run(self, nose_plugin_mock): - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - - plugin_inst_mock = mock.Mock() - kill_mock = mock.Mock(return_value=True) - plugin_inst_mock.kill = kill_mock - nose_plugin_mock.get_plugin = mock.Mock( - return_value=plugin_inst_mock - ) - - with mock.patch.object( - models.Test, 'update_running_tests') as update_tests_mock: - frontend = test_run.stop(self.session) - - self.assertEqual(frontend, test_run.frontend) - - nose_plugin_mock.get_plugin.assert_called_once_with( - test_run.test_set.driver - ) - kill_mock.assert_called_once_with(test_run) - update_tests_mock.assert_called_once_with( - self.session, test_run.id, status='stopped' - ) - - def test_is_last_running(self): - is_last_running = models.TestRun.is_last_running( - self.session, self.test_set_id, self.cluster_id - ) - self.assertTrue(is_last_running) - - test_run = models.TestRun.add_test_run( - self.session, self.test_set_id, - self.cluster_id - ) - is_last_running = models.TestRun.is_last_running( - self.session, self.test_set_id, self.cluster_id - ) - self.assertFalse(is_last_running) - - test_run.status = 'finished' - is_last_running = models.TestRun.is_last_running( - self.session, self.test_set_id, self.cluster_id - ) - self.assertTrue(is_last_running) diff --git a/fuel_plugin/testing/tests/integration/test_wsgi_controllers.py b/fuel_plugin/testing/tests/integration/test_wsgi_controllers.py deleted file mode 100644 index 9bb436fa..00000000 --- a/fuel_plugin/testing/tests/integration/test_wsgi_controllers.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from fuel_plugin.ostf_adapter.storage import models -from fuel_plugin.testing.tests import base - - -class TestTestsController(base.BaseWSGITest): - - def test_get(self): - cluster_id = self.expected['cluster']['id'] - self.mock_api_for_cluster(cluster_id) - resp = self.app.get( - '/v1/tests/{0}'.format(cluster_id) - ) - resp_tests = [test['id'] for test in resp.json] - - self.assertTrue(self.is_background_working) - - self.assertItemsEqual( - resp_tests, - self.expected['tests'] - ) - - -class TestTestSetsController(base.BaseWSGITest): - - def test_get(self): - self.expected['test_set_description'] = [ - 'General fake tests', - 'Long running 25 secs fake tests', - 'Fake tests for HA deployment', - 'Test for presence of env variables inside of testrun subprocess' - ] - - cluster_id = self.expected['cluster']['id'] - self.mock_api_for_cluster(cluster_id) - - resp = self.app.get( - '/v1/testsets/{0}'.format(cluster_id) - ) - resp_testsets_ids = [testset['id'] for testset in resp.json] - - self.assertTrue(self.is_background_working) - - self.assertItemsEqual( - resp_testsets_ids, - self.expected['test_sets'] - ) - - self.assertItemsEqual( - [testset['name'] for testset in resp.json], - self.expected['test_set_description'] - ) - - test_sets_order = ( - 'general_test', - 'stopped_test', - 'ha_deployment_test', - 'environment_variables', - ) - self.assertSequenceEqual(resp_testsets_ids, test_sets_order) - - -class TestTestRunsController(base.BaseWSGITest): - - def setUp(self): - super(TestTestRunsController, self).setUp() - self.plugin_mock = mock.Mock() - self.plugin_mock.kill.return_value = True - - self.nose_plugin_patcher = mock.patch( - 'fuel_plugin.ostf_adapter.storage.models.nose_plugin.get_plugin', - lambda *args: self.plugin_mock - ) - self.nose_plugin_patcher.start() - - self.cluster_id = self.expected['cluster']['id'] - self.mock_api_for_cluster(self.cluster_id) - - def tearDown(self): - super(TestTestRunsController, self).tearDown() - self.nose_plugin_patcher.stop() - - def test_post(self): - self.expected['testrun_post'] = { - 'testset': 'ha_deployment_test', - 'status': 'running', - 'cluster_id': 1, - 'tests': { - 'names': [ - ('fuel_plugin.testing.fixture.dummy_tests.' - 'deployment_types_tests.ha_deployment_test.' - 'HATest.test_ha_depl'), - ('fuel_plugin.testing.fixture.dummy_tests.' - 'deployment_types_tests.ha_deployment_test.' - 'HATest.test_ha_rhel_depl') - ] - } - } - - resp = self.app.post_json('/v1/testruns/', ( - { - 'testset': 'ha_deployment_test', - 'metadata': {'cluster_id': self.cluster_id} - }, - )) - - resp_testrun = resp.json[0] - - for key in self.expected['testrun_post']: - if key == 'tests': - self.assertItemsEqual( - self.expected['testrun_post'][key]['names'], - [test['id'] for test in resp_testrun[key]] - ) - else: - self.assertEqual( - self.expected['testrun_post'][key], - resp_testrun[key] - ) - - self.session.query(models.TestRun)\ - .filter_by(test_set_id=self.expected['testrun_post']['testset'])\ - .filter_by(cluster_id=self.expected['testrun_post']['cluster_id'])\ - .one() - - testrun_tests = self.session.query(models.Test)\ - .filter(models.Test.test_run_id != (None))\ - .all() - - tests_names = [ - test.name for test in testrun_tests - ] - self.assertItemsEqual( - tests_names, - self.expected['testrun_post']['tests']['names'] - ) - - def test_put_stopped(self): - resp = self.app.post_json('/v1/testruns/', ( - { - 'testset': 'ha_deployment_test', - 'metadata': {'cluster_id': self.cluster_id} - }, - )) - resp_testrun = resp.json[0] - - self.session.query(models.Test)\ - .filter_by(test_run_id=resp_testrun['id'])\ - .update({'status': 'running'}) - - # flush data which test is depend on into db - self.session.commit() - - self.expected['testrun_put'] = { - 'id': resp_testrun['id'], - 'testset': 'ha_deployment_test', - 'cluster_id': 1, - 'tests': { - 'names': [ - ('fuel_plugin.testing.fixture.dummy_tests.' - 'deployment_types_tests.ha_deployment_test.' - 'HATest.test_ha_depl'), - ('fuel_plugin.testing.fixture.dummy_tests.' - 'deployment_types_tests.ha_deployment_test.' - 'HATest.test_ha_rhel_depl') - ] - } - } - - resp = self.app.put_json('/v1/testruns/', ( - { - 'status': 'stopped', - 'id': resp_testrun['id'] - }, - )) - resp_testrun = resp.json[0] - - for key in self.expected['testrun_put'].keys(): - if key == 'tests': - self.assertItemsEqual( - self.expected['testrun_put'][key]['names'], - [test['id'] for test in resp_testrun[key]] - ) - else: - self.assertEqual( - self.expected['testrun_put'][key], resp_testrun[key] - ) - - testrun_tests = self.session.query(models.Test)\ - .filter_by(test_run_id=self.expected['testrun_put']['id'])\ - .all() - - tests_names = [ - test.name for test in testrun_tests - ] - self.assertItemsEqual( - tests_names, - self.expected['testrun_put']['tests']['names'] - ) - - self.assertTrue( - all( - [test.status == 'stopped' for test in testrun_tests] - ) - ) - - -class TestClusterRedeployment(base.BaseWSGITest): - - @mock.patch('fuel_plugin.ostf_adapter.mixins._get_cluster_attrs') - def test_cluster_redeployment_with_different_tags(self, - m_get_cluster_attrs): - m_get_cluster_attrs.return_value = { - 'deployment_tags': set(['multinode', 'centos']), - 'release_version': '2015.2-1.0' - } - - cluster_id = self.expected['cluster']['id'] - self.app.get('/v1/testsets/{0}'.format(cluster_id)) - - self.expected = { - 'cluster': { - 'id': 1, - 'deployment_tags': set(['multinode', 'ubuntu', 'nova_network']) - }, - 'test_sets': ['general_test', - 'stopped_test', 'multinode_deployment_test', - 'environment_variables'], - 'tests': [self.ext_id + test for test in [ - ('deployment_types_tests.multinode_deployment_test.' - 'MultinodeTest.test_multi_novanet_depl'), - ('deployment_types_tests.multinode_deployment_test.' - 'MultinodeTest.test_multi_depl'), - 'general_test.Dummy_test.test_fast_pass', - 'general_test.Dummy_test.test_long_pass', - 'general_test.Dummy_test.test_fast_fail', - 'general_test.Dummy_test.test_fast_error', - 'general_test.Dummy_test.test_fail_with_step', - 'general_test.Dummy_test.test_skip', - 'general_test.Dummy_test.test_skip_directly', - 'stopped_test.dummy_tests_stopped.test_really_long', - 'stopped_test.dummy_tests_stopped.test_one_no_so_long', - 'stopped_test.dummy_tests_stopped.test_not_long_at_all', - ('test_environment_variables.TestEnvVariables.' - 'test_os_credentials_env_variables') - ]] - } - - # patch request_to_nailgun function in orded to emulate - # redeployment of cluster - m_get_cluster_attrs.return_value = { - 'deployment_tags': set(['multinode', 'ubuntu', 'nova_network']), - 'release_version': '2015.2-1.0' - } - - self.app.get('/v1/testsets/{0}'.format(cluster_id)) - - self.assertTrue(self.is_background_working) - - -class TestVersioning(base.BaseWSGITest): - def test_discover_tests_with_versions(self): - cluster_id = 6 - self.mock_api_for_cluster(cluster_id) - self.app.get('/v1/testsets/{0}'.format(cluster_id)) - - self.expected = { - 'cluster': { - 'id': 6, - 'deployment_tags': set(['releases_comparison']) - }, - 'test_sets': ['general_test', 'stopped_test', 'test_versioning', - 'environment_variables'], - 'tests': [self.ext_id + test for test in [ - 'general_test.Dummy_test.test_fast_pass', - 'general_test.Dummy_test.test_long_pass', - 'general_test.Dummy_test.test_fast_fail', - 'general_test.Dummy_test.test_fast_error', - 'general_test.Dummy_test.test_fail_with_step', - 'general_test.Dummy_test.test_skip', - 'general_test.Dummy_test.test_skip_directly', - 'stopped_test.dummy_tests_stopped.test_really_long', - 'stopped_test.dummy_tests_stopped.test_one_no_so_long', - 'stopped_test.dummy_tests_stopped.test_not_long_at_all', - ('test_environment_variables.TestEnvVariables.' - 'test_os_credentials_env_variables'), - 'test_versioning.TestVersioning.test_simple_fake_first', - ]] - } - - self.assertTrue(self.is_background_working) diff --git a/fuel_plugin/testing/tests/integration/test_wsgi_interface.py b/fuel_plugin/testing/tests/integration/test_wsgi_interface.py deleted file mode 100644 index ac47c641..00000000 --- a/fuel_plugin/testing/tests/integration/test_wsgi_interface.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from fuel_plugin.ostf_adapter.storage import models -from fuel_plugin.testing.tests import base - - -class WsgiInterfaceTest(base.BaseWSGITest): - - def test_get_all_tests(self): - cluster_id = 1 - self.mock_api_for_cluster(cluster_id) - self.app.get('/v1/tests/{0}'.format(cluster_id)) - - def test_get_all_testsets(self): - cluster_id = 1 - self.mock_api_for_cluster(cluster_id) - self.app.get('/v1/testsets/{0}'.format(cluster_id)) - - def test_get_one_testruns(self): - self.app.get('/v1/testruns/1') - - def test_get_all_testruns(self): - self.app.get('/v1/testruns') - - @mock.patch.object(models.TestRun, 'start') - def test_post_testruns(self, mstart): - self.mock_api_for_cluster(3) - self.mock_api_for_cluster(4) - - testruns = [ - { - 'testset': 'general_test', - 'metadata': {'cluster_id': 3} - }, - { - 'testset': 'general_test', - 'metadata': {'cluster_id': 4} - } - ] - - mstart.return_value = {} - self.app.post_json('/v1/testruns', testruns) - - def test_put_testruns(self): - testruns = [ - { - 'id': 2, - 'metadata': {'cluster_id': 3}, - 'status': 'non_exist' - }, - { - 'id': 1, - 'metadata': {'cluster_id': 4}, - 'status': 'non_exist' - } - ] - - self.app.put_json('/v1/testruns', testruns) - - def test_get_last_testruns(self): - cluster_id = 1 - self.app.get('/v1/testruns/last/{0}'.format(cluster_id)) diff --git a/fuel_plugin/testing/tests/unit/__init__.py b/fuel_plugin/testing/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/fuel_plugin/testing/tests/unit/test_nose_discovery.py b/fuel_plugin/testing/tests/unit/test_nose_discovery.py deleted file mode 100644 index fd1186f9..00000000 --- a/fuel_plugin/testing/tests/unit/test_nose_discovery.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from mock import Mock -from nose import case - -from fuel_plugin.ostf_adapter.nose_plugin import nose_discovery -from fuel_plugin.ostf_adapter.nose_plugin import nose_utils -from fuel_plugin.ostf_adapter.storage import models -from fuel_plugin.testing.tests import base - - -TEST_PATH = 'fuel_plugin/testing/fixture/dummy_tests' - - -class TransactionBeginMock: - def __init__(inst, subtransactions): - pass - - def __enter__(self): - pass - - def __exit__(self, type, value, traceback): - pass - - -class TestNoseDiscovery(base.BaseUnitTest): - - @classmethod - def setUpClass(cls): - session_mock = Mock() - session_mock.begin = TransactionBeginMock - - nose_discovery.discovery( - path=TEST_PATH, - session=session_mock - ) - - cls.test_sets = [ - el[0][0] for el in session_mock.merge.call_args_list - if isinstance(el[0][0], models.TestSet) - ] - - cls.tests = [ - el[0][0] for el in session_mock.merge.call_args_list - if isinstance(el[0][0], models.Test) - ] - - def _find_needed_test(self, test_name): - return next(t for t in self.tests if t.name == test_name) - - def _find_needed_test_set(self, test_set_id): - return next(t for t in self.test_sets if t.id == test_set_id) - - def test_compare_release_versions(self): - def cmp_version(first, second): - if nose_utils._compare_release_versions(first, second): - return 1 - else: - return -1 - - expected = [ - '2014.2-6.0', - '2014.2.2-6.1', - '2015.1.0-7.0', - 'liberty-8.0' - ] - - releases = expected[:] - random.shuffle(releases) - self.assertEqual(expected, - sorted(releases, - cmp=cmp_version)) - - def test_discovery(self): - expected = { - 'test_sets_count': 10, - 'tests_count': 30 - } - - self.assertTrue( - all( - [len(self.test_sets) == expected['test_sets_count'], - len(self.tests) == expected['tests_count']] - ) - ) - - unique_test_sets = list( - set([testset.id for testset in self.test_sets]) - ) - unique_tests = list(set([test.name for test in self.tests])) - - self.assertTrue( - all( - [len(unique_test_sets) == len(self.test_sets), - len(unique_tests) == len(self.tests)] - ) - ) - - def test_get_proper_description(self): - expected = { - 'title': 'fake empty test', - 'name': ('fuel_plugin.testing.fixture.' - 'dummy_tests.deployment_types_tests.' - 'ha_deployment_test.HATest.test_ha_rhel_depl'), - 'duration': '0sec', - 'test_set_id': 'ha_deployment_test', - 'deployment_tags': ['ha', 'rhel'] - - } - test = [t for t in self.tests if t.name == expected['name']][0] - - self.assertTrue( - all( - [ - expected[key] == getattr(test, key) - for key in expected.keys() - ] - ) - ) - - def test_discovery_tests_with_alternative_depl_tags(self): - expected = { - 'testset': { - 'id': 'alternative_depl_tags_test', - 'deployment_tags': ['alternative | alternative_test'] - }, - 'test': { - 'name': ('fuel_plugin.testing.fixture.dummy_tests.' - 'deployment_types_tests.alternative_depl_tags_test.' - 'AlternativeDeplTagsTests.test_simple_fake_test'), - 'deployment_tags': ['one_tag| another_tag', 'other_tag'] - } - } - - needed_testset = self._find_needed_test_set(expected['testset']['id']) - - needed_test = self._find_needed_test(expected['test']['name']) - - self.assertEqual( - needed_testset.deployment_tags, - expected['testset']['deployment_tags'] - ) - - self.assertEqual( - needed_test.deployment_tags, - expected['test']['deployment_tags'] - ) - - def test_if_test_belongs_to_test_set(self): - test_set_id = 'ha' - pass_checks = ( - 'test_ha_sth', - 'test-ha-ha', - 'test.ha.sahara', - 'test.ha.sth', - ) - fail_checks = ( - 'test_sahara', - 'test.nonha.sth', - 'test.nonha.sahara', - ) - - for test_id in pass_checks: - self.assertTrue( - nose_discovery.DiscoveryPlugin.test_belongs_to_testset( - test_id, test_set_id) - ) - - for test_id in fail_checks: - self.assertFalse( - nose_discovery.DiscoveryPlugin.test_belongs_to_testset( - test_id, test_set_id) - ) - - def test_release_version_attribute(self): - for test_entity in (self.tests, self.test_sets): - self.assertTrue( - all( - [hasattr(t, 'available_since_release') - for t in test_entity] - ) - ) - - expected = { - 'test_set': { - 'id': 'test_versioning', - 'available_since_release': '2015.2-6.0', - }, - 'tests': [ - {'name': ('fuel_plugin.testing.fixture.dummy_tests.' - 'test_versioning.TestVersioning.' - 'test_simple_fake_first'), - 'available_since_release': '2015.2-6.0', }, - {'name': ('fuel_plugin.testing.fixture.dummy_tests.' - 'test_versioning.TestVersioning.' - 'test_simple_fake_second'), - 'available_since_release': '2015.2-6.1', }, - {'name': ('fuel_plugin.testing.fixture.dummy_tests.' - 'test_versioning.TestVersioning.' - 'test_simple_fake_alphabetic'), - 'available_since_release': 'liberty-8.0', } - ] - } - - needed_test_set = self._find_needed_test_set( - expected['test_set']['id'] - ) - self.assertEqual(needed_test_set.available_since_release, - expected['test_set']['available_since_release']) - - for test in expected['tests']: - needed_test = self._find_needed_test(test['name']) - self.assertEqual(needed_test.available_since_release, - test['available_since_release']) - - def test_description_parsing(self): - test_obj = Mock(spec=case.Test(Mock())) - - test_obj.test._testMethodDoc = """ - Dummy Test - Available since release: 2014.2-6.1 - Duration: 180 s. - Scenario: - 1. Step 1 - Deployment tags: Dummy Tags - """ - - data = nose_utils.get_description(test_obj) - expected = { - 'duration': '180 s.', - 'title': '', - 'deployment_tags': ['dummy tags'], - 'available_since_release': '2014.2-6.1' - } - - for key in expected: - self.assertEqual(data[key], expected[key]) - - self.assertNotIn('Duration', data['description']) diff --git a/fuel_plugin/testing/tests/unit/test_requirements.py b/fuel_plugin/testing/tests/unit/test_requirements.py deleted file mode 100644 index 638c6030..00000000 --- a/fuel_plugin/testing/tests/unit/test_requirements.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pkg_resources - - -def test_requirements(): - pkg_resources.require('fuel-ostf') diff --git a/fuel_plugin/testing/tests/unit/test_results_logger.py b/fuel_plugin/testing/tests/unit/test_results_logger.py deleted file mode 100644 index 4d759dcb..00000000 --- a/fuel_plugin/testing/tests/unit/test_results_logger.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from fuel_plugin.ostf_adapter import logger -from fuel_plugin.testing.tests import base - - -@mock.patch.object(logger.ResultsLogger, '_init_file_logger') -class TestResultsLogger(base.BaseUnitTest): - - def get_logger(self, **kwargs): - options = { - 'testset': 'testset', - 'cluster_id': 1, - } - options.update(kwargs) - return logger.ResultsLogger(**options) - - def test_filename(self, m_init_logger): - logger = self.get_logger(testset='testset_name', - cluster_id=99) - expected = "cluster_99_testset_name.log" - - self.assertEqual(logger.filename, expected) - - def test_log_format_on_success(self, m_init_logger): - logger = self.get_logger() - logger._logger = mock.Mock() - - logger.log_results( - test_id='tests.successful.test', test_name='Successful test', - status='SUCCESS', message='', traceback='') - - expected = 'SUCCESS Successful test (tests.successful.test) ' - logger._logger.info.assert_called_once_with(expected) - - def test_log_format_on_fail(self, m_init_logger): - logger = self.get_logger() - logger._logger = mock.Mock() - - logger.log_results( - test_id='tests.failing.test', test_name='Failing test', - status='FAIL', message='Message after fail', traceback='TRACEBACK') - - expected = ('FAIL Failing test (tests.failing.test) ' - 'Message after fail TRACEBACK') - logger._logger.info.assert_called_once_with(expected) - - def test_log_format_on_error(self, m_init_logger): - logger = self.get_logger() - logger._logger = mock.Mock() - - logger.log_results( - test_id='tests.error.test', test_name='Error test', - status='ERROR', message='Message after error', - traceback="TRACEBACK") - - expected = ('ERROR Error test (tests.error.test) ' - 'Message after error TRACEBACK') - logger._logger.info.assert_called_once_with(expected) diff --git a/fuel_plugin/testing/tests/unit/test_support_utilities.py b/fuel_plugin/testing/tests/unit/test_support_utilities.py deleted file mode 100644 index 697197d9..00000000 --- a/fuel_plugin/testing/tests/unit/test_support_utilities.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests_mock - -from fuel_plugin.ostf_adapter import config -from fuel_plugin.ostf_adapter import mixins -from fuel_plugin.testing.tests import base - - -class TestDeplTagsGetter(base.BaseUnitTest): - - def setUp(self): - config.init_config([]) - - def test_get_cluster_depl_tags(self): - expected = { - 'cluster_id': 3, - 'attrs': { - 'deployment_tags': set( - ['ha', 'rhel', 'additional_components', - 'murano', 'nova_network', 'public_on_all_nodes', - 'enable_without_ceph', 'computes_without_dpdk']), - 'release_version': '2015.2-1.0' - } - } - - with requests_mock.Mocker() as m: - cluster = base.CLUSTERS[expected['cluster_id']] - m.register_uri('GET', '/api/clusters/3', - json=cluster['cluster_meta']) - m.register_uri('GET', '/api/clusters/3/attributes', - json=cluster['cluster_attributes']) - m.register_uri('GET', '/api/releases/3', - json=cluster['release_data']) - m.register_uri('GET', '/api/nodes?cluster_id=3', - json=cluster['cluster_node']) - res = mixins._get_cluster_attrs(expected['cluster_id']) - - self.assertEqual(res, expected['attrs']) - - def test_sriov_deployment_tag(self): - expected = { - 'cluster_id': 7, - 'attrs': { - 'deployment_tags': set( - ['ha', 'rhel', 'additional_components', - 'murano', 'nova_network', 'public_on_all_nodes', - 'enable_without_ceph', 'sriov', - 'computes_without_dpdk']), - 'release_version': '2015.2-1.0' - } - } - - with requests_mock.Mocker() as m: - cluster = base.CLUSTERS[expected['cluster_id']] - m.register_uri('GET', '/api/clusters/7', - json=cluster['cluster_meta']) - m.register_uri('GET', '/api/clusters/7/attributes', - json=cluster['cluster_attributes']) - m.register_uri('GET', '/api/releases/7', - json=cluster['release_data']) - m.register_uri('GET', '/api/nodes?cluster_id=7', - json=cluster['cluster_node']) - m.register_uri('GET', '/api/nodes/1/interfaces', - json=cluster['node_interfaces']) - res = mixins._get_cluster_attrs(expected['cluster_id']) - - self.assertEqual(res, expected['attrs']) - - def test_dpdk_deployment_tag(self): - expected = { - 'cluster_id': 8, - 'attrs': { - 'deployment_tags': set( - ['computes_with_dpdk', 'neutron', 'enable_without_ceph', - 'ha', 'public_on_all_nodes', 'rhel', - 'computes_without_dpdk']), - 'release_version': '2015.2-1.0' - } - } - - with requests_mock.Mocker() as m: - cluster = base.CLUSTERS[expected['cluster_id']] - m.register_uri('GET', '/api/clusters/8', - json=cluster['cluster_meta']) - m.register_uri('GET', '/api/clusters/8/attributes', - json=cluster['cluster_attributes']) - m.register_uri('GET', '/api/releases/8', - json=cluster['release_data']) - m.register_uri('GET', '/api/nodes?cluster_id=8', - json=cluster['cluster_node']) - m.register_uri('GET', '/api/nodes/1/interfaces', - json=cluster['node-1_interfaces']) - m.register_uri('GET', '/api/nodes/2/interfaces', - json=cluster['node-2_interfaces']) - res = mixins._get_cluster_attrs(expected['cluster_id']) - - self.assertEqual(res, expected['attrs']) - - -class TestDeplMuranoTags(base.BaseUnitTest): - - def setUp(self): - config.init_config([]) - - self.expected = { - 'attrs': { - 'deployment_tags': set( - ['multinode', 'ubuntu', 'additional_components', - 'nova_network', 'public_on_all_nodes', - 'enable_without_ceph', 'computes_without_dpdk']), - 'release_version': '2016.1-9.0' - } - } - - def test_get_murano_plugin_tags_with_artifacts(self): - expected = self.expected - expected['cluster_id'] = 9 - expected['attrs']['deployment_tags'].add('murano_plugin') - expected['attrs']['deployment_tags'].add('murano_use_glare') - - with requests_mock.Mocker() as m: - cluster = base.CLUSTERS[expected['cluster_id']] - m.register_uri('GET', '/api/clusters/9', - json=cluster['cluster_meta']) - m.register_uri('GET', '/api/clusters/9/attributes', - json=cluster['cluster_attributes']) - m.register_uri('GET', '/api/releases/9', - json=cluster['release_data']) - m.register_uri('GET', '/api/nodes?cluster_id=9', - json=cluster['cluster_node']) - res = mixins._get_cluster_attrs(expected['cluster_id']) - - self.assertEqual(res, expected['attrs']) - - def test_get_murano_plugin_tags_without_artifacts(self): - expected = self.expected - expected['cluster_id'] = 10 - expected['attrs']['deployment_tags'].add('murano_plugin') - expected['attrs']['deployment_tags'].add('murano_without_glare') - - with requests_mock.Mocker() as m: - cluster = base.CLUSTERS[expected['cluster_id']] - m.register_uri('GET', '/api/clusters/10', - json=cluster['cluster_meta']) - m.register_uri('GET', '/api/clusters/10/attributes', - json=cluster['cluster_attributes']) - m.register_uri('GET', '/api/releases/10', - json=cluster['release_data']) - m.register_uri('GET', '/api/nodes?cluster_id=10', - json=cluster['cluster_node']) - res = mixins._get_cluster_attrs(expected['cluster_id']) - - self.assertEqual(res, expected['attrs']) - - def test_get_murano_tags_with_artifacts(self): - expected = self.expected - expected['cluster_id'] = 11 - expected['attrs']['deployment_tags'].add('murano') - expected['attrs']['deployment_tags'].add('murano_use_glare') - - with requests_mock.Mocker() as m: - cluster = base.CLUSTERS[expected['cluster_id']] - m.register_uri('GET', '/api/clusters/11', - json=cluster['cluster_meta']) - m.register_uri('GET', '/api/clusters/11/attributes', - json=cluster['cluster_attributes']) - m.register_uri('GET', '/api/releases/11', - json=cluster['release_data']) - m.register_uri('GET', '/api/nodes?cluster_id=11', - json=cluster['cluster_node']) - res = mixins._get_cluster_attrs(expected['cluster_id']) - - self.assertEqual(res, expected['attrs']) - - def test_get_murano_tags_without_artifacts(self): - expected = self.expected - expected['cluster_id'] = 12 - expected['attrs']['deployment_tags'].add('murano') - expected['attrs']['deployment_tags'].add('murano_without_glare') - - with requests_mock.Mocker() as m: - cluster = base.CLUSTERS[expected['cluster_id']] - m.register_uri('GET', '/api/clusters/12', - json=cluster['cluster_meta']) - m.register_uri('GET', '/api/clusters/12/attributes', - json=cluster['cluster_attributes']) - m.register_uri('GET', '/api/releases/12', - json=cluster['release_data']) - m.register_uri('GET', '/api/nodes?cluster_id=12', - json=cluster['cluster_node']) - res = mixins._get_cluster_attrs(expected['cluster_id']) - - self.assertEqual(res, expected['attrs']) diff --git a/ostf.service b/ostf.service deleted file mode 100644 index e28610c4..00000000 --- a/ostf.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Name=OSTF daemon -ConditionPathExists=/etc/ostf/ostf.conf - -[Service] -ExecStart=/usr/bin/ostf-server - -[Install] -WantedBy=multi-user.target diff --git a/pylintrc b/pylintrc deleted file mode 100644 index 3f8e00b3..00000000 --- a/pylintrc +++ /dev/null @@ -1,236 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Profiled execution. -profile=no - -# Add to the black list. It should be a base name, not a -# path. You may set this option multiple times. -ignore=CVS - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - - -[MESSAGES CONTROL] - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifier separated by comma (,) or put this option -# multiple time. -disable=F0401,R0201,W0311,C0111 - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html -output-format=parseable - -# Include message's id in output -include-ids=yes - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (R0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (R0004). -comment=no - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=120 - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching names used for dummy variables (i.e. not used). -dummy-variables-rgx=_|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). -ignored-classes=SQLObject - -# When zope mode is activated, add a predefined set of Zope acquired attributes -# to generated-members. -zope=no - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E0201 when accessed. -generated-members=REQUEST,acl_users,aq_parent - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[BASIC] - -# Required attributes for module, separated by a comma -required-attributes= - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,apply,input - -# Regular expression which should only match correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Good variable names which should always be accepted, separated by a comma -good-names=app,uwsgi,e,i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Regular expression which should only match functions or classes name which do -# not require a docstring -no-docstring-rgx=__.*__|[Tt]est.* - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=20 - -# Maximum number of return / yield for function / method body -max-returns=10 - -# Maximum number of branch for function / method body -max-branchs=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=10 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=0 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,string,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[CLASSES] - -# List of interface methods to ignore, separated by a comma. This is used for -# instance to not check methods defines in Zope's Interface base class. -ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index a739d571..00000000 --- a/requirements.txt +++ /dev/null @@ -1,28 +0,0 @@ -aodhclient>=0.5.0 # Apache-2.0 -python-cinderclient>=1.6.0,!=1.7.0,!=1.7.1 # Apache-2.0 -python-ceilometerclient>=2.5.0 # Apache-2.0 -python-keystoneclient>=2.0.0,!=2.1.0 # Apache-2.0 -python-muranoclient>=0.8.2 # Apache-2.0 -python-novaclient>=7.0 # Apache-2.0 -python-neutronclient>=5.1.0 # Apache-2.0 -python-heatclient>=1.4.0 # Apache-2.0 -python-glanceclient>=2.3.0,!=2.4.0 # Apache-2.0 -python-saharaclient>=0.18.0 # Apache-2.0 - -paramiko>=2.0 # LGPLv2.1+ -pbr>=1.6 # Apache-2.0 -requests>=2.10.0 # Apache-2.0 -unittest2 # BSD -PyYAML>=3.1.0 # MIT -testresources>=0.2.4 # Apache-2.0/BSD -nose # LGPL -SQLAlchemy>=1.0.10,<1.1.0 # MIT -alembic>=0.8.4 # MIT -gevent -keystonemiddleware>=4.0.0,!=4.1.0,!=4.5.0 # Apache-2.0 -oslo.config>=3.14.0 # Apache-2.0 -oslo.serialization>=1.10.0 # Apache-2.0 -oslo.utils>=3.16.0 # Apache-2.0 -pecan>=1.0.0,!=1.0.2,!=1.0.3,!=1.0.4 # BSD -psycopg2>=2.5 # LGPL/ZPL -stevedore>=1.16.0 # Apache-2.0 diff --git a/run_tests.sh b/run_tests.sh deleted file mode 100755 index 8d26e11a..00000000 --- a/run_tests.sh +++ /dev/null @@ -1,264 +0,0 @@ -#!/bin/bash - -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -set -eu - -function usage { - echo "Usage: $0 [OPTION]..." - echo "Run fuel-ostf test suite(s)" - echo "" - echo " -p, --flake8 Run FLAKE8 checks" - echo " -P, --no-flake8 Don't run FLAKE8 checks" - echo " -u, --unit Run unit tests" - echo " -U, --no-unit Don't run unit tests" - echo " -i, --integration Run integarion tests" - echo " -I, --no-integration Don't run inteagration tests" - echo " -t, --tests Run a given test files" - echo " -h, --help Print this usage message" - echo " -c, --with-cover Run tests with coverage" - echo "" - echo "Note: with no options specified, the script will try to run all available" - echo " tests with all available checks." - exit -} - -function process_options { - for arg in $@; do - case "$arg" in - -h|--help) usage;; - -p|--flake8) flake8_checks=1;; - -P|--no-flake8) no_flake8_checks=1;; - -u|--unit) unit_tests=1;; - -U|--no-unit) no_unit_tests=1;; - -i|--integration) integration_tests=1;; - -I|--no-integration) no_integration_tests=1;; - -t|--tests) certain_tests=1;; - -c|--with-cover) coverage=1;; - -*) testropts="$testropts $arg";; - *) testrargs="$testrargs $arg" - esac - done -} - -# settings -ROOT=$(dirname `readlink -f $0`) - -# test options -testrargs= -testropts="--with-timer --timer-warning=10 --timer-ok=2 --timer-top-n=10" - -# customizable options -ARTIFACTS=${ARTIFACTS:-`pwd`/test_run} -INTEGRATION_XUNIT=${INTEGRATION_XUNIT:-"$ROOT/integration.xml"} -OSTF_SERVER_PORT=${OSRF_SERVER_PORT:-8777} -UNIT_XUNIT=${UNIT_XUNIT:-"$ROOT/unittests.xml"} - -mkdir -p $ARTIFACTS - -# disabled/enabled flags that are setted from the cli. -# used for manipulating run logic. -flake8_checks=0 -no_flake8_checks=0 -unit_tests=0 -no_unit_tests=0 -integration_tests=0 -no_integration_tests=0 -certain_tests=0 -coverage=0 - -function run_tests { - run_cleanup - - # This variable collects all failed tests. It'll be printed in - # the end of this function as a small statistic for user. - local errors="" - - # If tests was specified in command line then run only these tests - if [[ $certain_tests -eq 1 ]]; then - local result=0 - - for testfile in $testrargs; do - local testfile=`readlink -f $testfile` - local tf=`echo $testfile | cut -d':' -f1` - - if [ ! -e $tf ]; then - echo "ERROR: File or directory $tf not found" - exit 1 - fi - - guess_test_run $testfile || result=1 - done - - exit $result - fi - - # Enable all tests if none was specified skipping all explicitly disabled tests. - if [[ $flake8_checks -eq 0 && \ - $integration_tests -eq 0 && \ - $unit_tests -eq 0 ]]; then - - if [[ $no_flake8_checks -ne 1 ]]; then flake8_checks=1; fi - if [[ $no_unit_tests -ne 1 ]]; then unit_tests=1; fi - if [[ $no_integration_tests -ne 1 ]]; then integration_tests=1; fi - fi - - # Run all enabled tests - if [[ $flake8_checks -eq 1 ]]; then - run_flake8 || errors+=" flake8_checks" - fi - - if [[ $unit_tests -eq 1 ]]; then - run_unit_tests || errors+=" unit_tests" - fi - - if [[ $integration_tests -eq 1 ]]; then - run_integration_tests || errors+=" integration_tests" - fi - - # print failed tests - if [[ -n "$errors" ]]; then - echo Failed tests: $errors - exit 1 - fi - - exit 0 -} - - -function guess_test_run { - local errors="" - - if [[ $1 == *integration* ]]; then - run_integration_tests $1 || errors=$1 - else - run_unit_tests $1 || errors=$1 - fi - - if [[ -n "${errors}" ]]; then - echo "ERROR: ${errors}" - return 1 - fi -} - - -# Remove temporary files. No need to run manually, since it's -# called automatically in `run_tests` function. -function run_cleanup { - find . -type f -name "*.pyc" -delete - rm -f *.log - rm -f *.pid -} - - -function run_flake8 { - echo "Starting flake8 checks" - local result=0 - tox -e pep8 || result=1 - - return $result -} - - -function run_unit_tests { - echo "Starting unit tests" - if [[ $coverage -eq 1 ]]; then - testropts="$testropts --with-coverage --cover-package fuel_plugin" - fi - - local TESTS="$ROOT/fuel_plugin/testing/tests/unit" - local options="-vv $testropts --xunit-file $UNIT_XUNIT" - local result=0 - - if [[ $# -ne 0 ]]; then - TESTS=$@ - fi - - # run tests - tox -epy26 -- $options $TESTS || result=1 - - return $result -} - - -function create_ostf_conf { - local config_path=$1 - local artifacts_path=$2 - local SERVER_PORT=${3:-$OSTF_SERVER_PORT} - cat > $config_path < /dev/null -} - - -function cleardb { - local SERVER_SETTINGS=$1 - local RUN_CLEARDB="\ - ostf-server \ - --debug - --clear-db - --config-file $SERVER_SETTINGS" - - tox -evenv -- $RUN_CLEARDB > /dev/null -} - - -function run_integration_tests { - echo "Starting integration tests" - if [[ $coverage -eq 1 ]]; then - testropts="$testropts --with-coverage --cover-package fuel_plugin" - fi - - local TESTS="$ROOT/fuel_plugin/testing/tests/integration" - local options="-vv $testropts --xunit-file $INTEGRATION_XUNIT" - local result=0 - local artifacts=$ARTIFACTS/integration - local config=$artifacts/ostf.conf - mkdir -p $artifacts - - if [[ $# -ne 0 ]]; then - TESTS=$@ - fi - - create_ostf_conf $config $artifacts - - cleardb $config - syncdb $config - - # run tests - tox -epy26 -- $options $TESTS || result=1 - - return $result -} - - -# parse command line arguments and run the tests -process_options $@ -run_tests diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index b4b315a8..00000000 --- a/setup.cfg +++ /dev/null @@ -1,39 +0,0 @@ -[metadata] -name = fuel-ostf -version = 10.0.0 -summary = Library for cloud computing testing -author = Mirantis Inc. -author-email = product@mirantis.com -home-page = https://launchpad.net/fuel -license = Apache License, Version 2.0 -classifier = - Development Status :: 3 - Alpha - Environment :: OpenStack - Intended Audience :: Information Technology, - Intended Audience :: System Administrator', - License :: OSI Approved :: Apache Software License, - Operating System :: POSIX :: Linux, - Programming Language :: Python - Programming Language :: Python :: 2.7 - -[global] -setup-hooks = - pbr.hooks.setup_hook - fuel_health.hooks.setup_hook - -[files] -packages = - fuel_plugin - fuel_health - -[entry_points] -plugins= - nose = fuel_plugin.ostf_adapter.nose_plugin.nose_adapter:NoseDriver -console_scripts = - ostf-server = fuel_plugin.ostf_adapter.server:main - -[compile_catalog] -domain = fuel-ostf - -[wheel] -universal = 1 diff --git a/setup.py b/setup.py deleted file mode 100644 index 76b2e9b7..00000000 --- a/setup.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2013 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True, -) diff --git a/specs/fuel-ostf.spec b/specs/fuel-ostf.spec deleted file mode 100644 index b15b2727..00000000 --- a/specs/fuel-ostf.spec +++ /dev/null @@ -1,140 +0,0 @@ -%define name fuel-ostf -%define service_name ostf -%{!?version: %define version 10.0.0} -%{!?release: %define release 1} - -Summary: cloud computing testing -Name: %{name} -Version: %{version} -Release: %{release} -Source0: %{name}-%{version}.tar.gz -License: Apache -Group: Development/Libraries -BuildRoot: %{_tmppath}/%{name}-%{version}-buildroot -Prefix: %{_prefix} -BuildRequires: python-setuptools -BuildRequires: python-pbr >= 1.6 -BuildArch: noarch - -# fuel_health_reqs -Requires: python-aodhclient >= 0.1.0 -Requires: python-amqplib >= 1.0.2 -Requires: python-anyjson >= 0.3.3 -Requires: python-oslo-config >= 1.1.1 -Requires: python-ceilometerclient >= 1.0.9 -Requires: python-cinderclient >= 1.0.6 -Requires: python-ironicclient >= 0.3.3 -Requires: python-keystoneclient >= 0.11 -Requires: python-kombu >= 1:3.0.16 -Requires: python-novaclient >= 1:2.15.0 -Requires: python-heatclient >= 0.2.5 -Requires: python-muranoclient >= 0.2.11 -Requires: python-neutronclient >= 2.3.6 -Requires: python-saharaclient >= 0.6 -Requires: python-swiftclient >= 2.3.1 -Requires: python-glanceclient >= 0.14.1 -Requires: python-paramiko >= 1.10.1 -Requires: python-pbr >= 1.6 -Requires: python-requests >= 1.1 -Requires: python-unittest2 >= 0.5.1 -Requires: PyYAML >= 3.10 -Requires: python-testresources >= 0.2.7 -%if 0%{?rhel} >= 5 && 0%{?rhel} < 7 -Requires: python-argparse >= 1.2.1 -%endif - -# fuel_ostf_reqs -Requires: python-keystonemiddleware >= 1.2.0 -Requires: python-nose >= 1.3.0 -Requires: python-sqlalchemy >= 0.7.8 -Requires: python-alembic >= 0.5.0 -Requires: python-gevent >= 0.13.8 -Requires: python-pecan >= 0.3.0 -Requires: python-psycopg2 >= 2.5.1 -Requires: python-stevedore >= 0.10 -Requires: python-oslo-serialization >= 1.0.0 - - -%if 0%{?fedora} > 16 || 0%{?rhel} > 6 -Requires(post): systemd-units -Requires(preun): systemd-units -Requires(postun): systemd-units -BuildRequires: systemd-units -%endif - -# test_requires -#mock >= 1.0.1 -#pep8 >= 1.4.6 -#py >= 1.4.15 -#Requires: python-six >= 1.4.1 -#tox >= 1.5.0 - -#Requires: python-mako >= 0.8.1 -#Requires: python-markupsafe >= 0.18 -#Requires: python-webob >= 1.2.3 -#Requires: python-webtest >= 2.0.6 -#Requires: python-argparse >= 1.2.1 -#Requires: python-beautifulsoup4 >= 4.2.1 -#Requires: python-cliff >= 1.4 -#Requires: python-cmd2 >= 0.6.5.1 -#Requires: python-d2to1 >= 0.2.10 -#Requires: python-distribute >= 0.7.3 -#Requires: python-extras >= 0.0.3 -#Requires: python-greenlet >= 0.4.1 -#Requires: python-httplib2 >= 0.8 -#Requires: python-iso8601 >= 0.1.4 -#Requires: python-jsonpatch >= 1.1 -#Requires: python-jsonpointer >= 1.0 -#Requires: python-jsonschema >= 2.0.0 -#Requires: python-logutils >= 0.3.3 -#Requires: python-netaddr >= 0.7.10 -#Requires: python-ordereddict >= 1.1 -#Requires: python-pbr >= 0.5.21 -#Requires: python-prettytable >= 0.7.2 -#Requires: python-psycogreen >= 1.0 -#Requires: python-pyopenssl >= 0.13 -#Requires: python-crypto >= 2.6 -#Requires: pyparsing >= 1.5.6 -#Requires: python-mimeparse >= 0.1.4 -#Requires: python-setuptools-git >= 1.0 -#Requires: python-simplegeneric >= 0.8.1 -#Requires: python-simplejson >= 3.3.0 -#Requires: python-testtools >= 0.9.32 -#Requires: python-waitress >= 0.8.5 -#Requires: python-warlock >= 1.0.1 -#Requires: python-wsgiref >= 0.1.2 - - -%description -fuel-ostf-tests - -%prep -%setup -cn %{name}-%{version} - -%build -cd %{_builddir}/%{name}-%{version} && python setup.py build - -%install - cd %{_builddir}/%{name}-%{version} && python setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=%{_builddir}/%{name}-%{version}/INSTALLED_FILES - -%if %{defined _unitdir} -install -D -m644 %{_builddir}/%{name}-%{version}/%{service_name}.service %{buildroot}/%{_unitdir}/%{service_name}.service -%endif - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -f %{_builddir}/%{name}-%{version}/INSTALLED_FILES -%defattr(-,root,root) -%if %{defined _unitdir} -/%{_unitdir}/%{service_name}.service - -%post -%systemd_post %{service_name}.servive - -%preun -%systemd_preun %{service_name}.service - -%postun -%systemd_postun_with_restart %{service_name}.service -%endif diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index e4773bc9..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ --r requirements.txt -WebTest>=2.0.17 -mock==1.0.1 -nose-timer>=0.4.3 -requests-mock>=0.5.1 -tox>=1.7.1 -coverage==3.6 diff --git a/tools/test-setup.sh b/tools/test-setup.sh deleted file mode 100755 index 07a07854..00000000 --- a/tools/test-setup.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -xe - -# This script will be run by OpenStack CI before unit tests are run, -# it sets up the test system as needed. -# Developers should setup their test systems in a similar way. - -# This setup needs to be run as a user that can run sudo. - -# The root password for the MySQL database; pass it in via -# MYSQL_ROOT_PW. -DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} - -# This user and its password are used by the tests, if you change it, -# your tests might fail. -DB_USER=openstack_citest -DB_PW=openstack_citest - -sudo -H mysqladmin -u root password $DB_ROOT_PW - -# It's best practice to remove anonymous users from the database. If -# a anonymous user exists, then it matches first for connections and -# other connections from that host will not work. -sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " - DELETE FROM mysql.user WHERE User=''; - FLUSH PRIVILEGES; - GRANT ALL PRIVILEGES ON *.* - TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" - -# Now create our database. -mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " - SET default_storage_engine=MYISAM; - DROP DATABASE IF EXISTS openstack_citest; - CREATE DATABASE openstack_citest CHARACTER SET utf8;" - -# Same for PostgreSQL -# The root password for the PostgreSQL database; pass it in via -# POSTGRES_ROOT_PW. -DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} - -# Setup user -root_roles=$(sudo -H -u postgres psql -t -c " - SELECT 'HERE' from pg_roles where rolname='$DB_USER'") -if [[ ${root_roles} == *HERE ]];then - sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -else - sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -fi - -# Store password for tests -cat << EOF > $HOME/.pgpass -*:*:*:$DB_USER:$DB_PW -EOF -chmod 0600 $HOME/.pgpass - -# Now create our database -psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" -createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 558d1ddf..00000000 --- a/tox.ini +++ /dev/null @@ -1,52 +0,0 @@ -# Tox (http://tox.testrun.org/) is a tool for running tests -# in multiple virtualenvs. This configuration file will run the -# test suite on all supported python versions. To use it, "pip install tox" -# and then run "tox" from this directory. - -[tox] -minversion = 1.6 -envlist = py27,pep8 -skipsdist = True - -[testenv] -usedevelop = True -whitelist_externals = bash -setenv = VIRTUAL_ENV={envdir} - CUSTOM_OSTF_CONFIG={toxinidir}/etc/tools/ostf_ci.conf - OSTF_LOGS={toxinidir}/ostf - OSTF_DB=openstack_citest - OSTF_DB_USER=openstack_citest - OSTF_DB_PW=openstack_citest - OSTF_DB_ROOT=postgres - OSTF_DB_ROOTPW=insecure_slave - OSTF_DB_ROOTPGPASS={toxinidir}/pgpass -passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY CUSTOM_OSTF_CONFIG -deps = -r{toxinidir}/test-requirements.txt -commands = - /bin/bash "{toxinidir}/etc/tools/prepare_settings.sh" - /bin/bash "{toxinidir}/etc/tools/prepare_database.sh" - ostf-server --config-file {toxinidir}/etc/tools/ostf_ci.conf --after-initialization-environment-hook - nosetests {posargs:fuel_plugin/testing/tests/unit} - nosetests {posargs:fuel_plugin/testing/tests/integration} - -[testenv:cover] -setenv = NOSE_WITH_COVERAGE=1 - -[testenv:venv] -deps = -r{toxinidir}/requirements.txt -commands = {posargs:} - -[testenv:pep8] -deps = hacking==0.7 -usedevelop = False -commands = - flake8 {posargs} - -[flake8] -exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,__init__.py,docs -show-pep8 = True -show-source = True -count = True - -[hacking] -import_exceptions = testtools.matchers