From 5288ade765669c177fdef7119a68a8a3f39dad22 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sat, 9 Sep 2023 12:26:36 -0700 Subject: [PATCH] Retire compute-hyperv: remove repo content Winstackers project has been retired - https://review.opendev.org/c/openstack/governance/+/886880 this commit remove the content of compute-hyperv deliverables of this project Depends-On: https://review.opendev.org/c/openstack/project-config/+/894408/1 Change-Id: I06e90d64c0211f87d3b1347667b27e5c81f85dac --- .coveragerc | 7 - .gitignore | 57 - .mailmap | 3 - .stestr.conf | 3 - .zuul.yaml | 19 - CONTRIBUTING.rst | 19 - HACKING.rst | 4 - LICENSE | 176 -- MANIFEST.in | 6 - README.rst | 24 +- babel.cfg | 2 - compute_hyperv/__init__.py | 15 - compute_hyperv/hacking/__init__.py | 0 compute_hyperv/hacking/checks.py | 521 ---- compute_hyperv/i18n.py | 36 - compute_hyperv/nova/README.rst | 44 - compute_hyperv/nova/__init__.py | 0 compute_hyperv/nova/block_device_manager.py | 362 --- compute_hyperv/nova/cluster/__init__.py | 0 compute_hyperv/nova/cluster/clusterops.py | 305 -- compute_hyperv/nova/cluster/driver.py | 103 - .../nova/cluster/livemigrationops.py | 125 - compute_hyperv/nova/cluster/volumeops.py | 50 - compute_hyperv/nova/conf.py | 90 - compute_hyperv/nova/constants.py | 117 - compute_hyperv/nova/coordination.py | 160 -- compute_hyperv/nova/driver.py | 479 ---- compute_hyperv/nova/eventhandler.py | 92 - compute_hyperv/nova/hostops.py | 386 --- compute_hyperv/nova/imagecache.py | 291 -- compute_hyperv/nova/livemigrationops.py | 158 -- compute_hyperv/nova/migrationops.py | 471 ---- compute_hyperv/nova/pathutils.py | 333 --- compute_hyperv/nova/pdk.py | 79 - compute_hyperv/nova/rdpconsoleops.py | 41 - compute_hyperv/nova/serialconsolehandler.py | 173 -- compute_hyperv/nova/serialconsoleops.py | 140 - compute_hyperv/nova/serialproxy.py | 129 - compute_hyperv/nova/snapshotops.py | 141 - compute_hyperv/nova/utils/__init__.py | 0 compute_hyperv/nova/utils/placement.py | 128 - compute_hyperv/nova/vif.py | 124 - compute_hyperv/nova/vmops.py | 1500 ---------- compute_hyperv/nova/volumeops.py | 780 ------ compute_hyperv/tests/__init__.py | 0 compute_hyperv/tests/fake_instance.py | 85 - compute_hyperv/tests/test.py | 161 -- compute_hyperv/tests/unit/__init__.py | 0 compute_hyperv/tests/unit/cluster/__init__.py | 0 .../tests/unit/cluster/test_clusterops.py | 443 --- .../tests/unit/cluster/test_driver.py | 204 -- .../unit/cluster/test_livemigrationops.py | 203 -- .../tests/unit/cluster/test_volumeops.py | 50 - compute_hyperv/tests/unit/test_base.py | 52 - .../tests/unit/test_block_device_manager.py | 631 ----- .../tests/unit/test_coordination.py | 116 - compute_hyperv/tests/unit/test_driver.py | 672 ----- .../tests/unit/test_eventhandler.py | 92 - compute_hyperv/tests/unit/test_hostops.py | 436 --- compute_hyperv/tests/unit/test_imagecache.py | 329 --- .../tests/unit/test_livemigrationops.py | 248 -- .../tests/unit/test_migrationops.py | 647 ----- compute_hyperv/tests/unit/test_pathutils.py | 491 ---- compute_hyperv/tests/unit/test_pdk.py | 131 - .../tests/unit/test_rdpconsoleops.py | 45 - .../tests/unit/test_serialconsolehandler.py | 263 -- .../tests/unit/test_serialconsoleops.py | 146 - compute_hyperv/tests/unit/test_serialproxy.py | 130 - compute_hyperv/tests/unit/test_snapshotops.py | 147 - compute_hyperv/tests/unit/test_vif.py | 126 - compute_hyperv/tests/unit/test_vmops.py | 2491 ----------------- compute_hyperv/tests/unit/test_volumeops.py | 1208 -------- compute_hyperv/tests/unit/utils/__init__.py | 0 .../tests/unit/utils/test_placement.py | 207 -- doc/requirements.txt | 11 - doc/source/conf.py | 86 - doc/source/configuration/config.rst | 16 - doc/source/configuration/index.rst | 234 -- doc/source/configuration/sample_config.rst | 22 - doc/source/contributing.rst | 92 - doc/source/index.rst | 33 - doc/source/install/index.rst | 33 - doc/source/install/install.rst | 63 - doc/source/install/next-steps.rst | 91 - doc/source/install/prerequisites.rst | 340 --- doc/source/install/verify.rst | 13 - doc/source/readme.rst | 1 - doc/source/troubleshooting/index.rst | 83 - doc/source/usage/index.rst | 491 ---- etc/compute-hyperv-config-generator.conf | 10 - nova/__init__.py | 26 - nova/virt/__init__.py | 15 - nova/virt/compute_hyperv/__init__.py | 0 nova/virt/compute_hyperv/cluster/__init__.py | 0 nova/virt/compute_hyperv/cluster/driver.py | 22 - nova/virt/compute_hyperv/driver.py | 24 - openstack-common.conf | 6 - releasenotes/notes/.placeholder | 0 ...er-distributed-locks-5f12252af6b3913b.yaml | 11 - .../drop-ovs-support-616ec2952580c93d.yaml | 6 - .../notes/drop-py-2-7-5cd36052d5c2e594.yaml | 6 - ...2-support-deprecated-02a956e3926351d6.yaml | 6 - .../instance-evacuate-2c46e63e3a6ae9c4.yaml | 4 - ...online-volume-resize-446d58c9f6f340b6.yaml | 7 - .../notes/rbd-support-9bb0037f69249785.yaml | 5 - ...date-device-metadata-7204fb0e85bad1e3.yaml | 5 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 281 -- releasenotes/source/index.rst | 15 - releasenotes/source/train.rst | 6 - releasenotes/source/unreleased.rst | 5 - releasenotes/source/ussuri.rst | 6 - releasenotes/source/victoria.rst | 6 - releasenotes/source/wallaby.rst | 6 - releasenotes/source/xena.rst | 6 - releasenotes/source/yoga.rst | 6 - releasenotes/source/zed.rst | 6 - requirements.txt | 19 - setup.cfg | 50 - setup.py | 20 - test-requirements.txt | 16 - tools/tox_install.sh | 56 - tox.ini | 117 - 124 files changed, 9 insertions(+), 19821 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .mailmap delete mode 100644 .stestr.conf delete mode 100644 .zuul.yaml delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE delete mode 100644 MANIFEST.in delete mode 100644 babel.cfg delete mode 100644 compute_hyperv/__init__.py delete mode 100644 compute_hyperv/hacking/__init__.py delete mode 100644 compute_hyperv/hacking/checks.py delete mode 100644 compute_hyperv/i18n.py delete mode 100644 compute_hyperv/nova/README.rst delete mode 100644 compute_hyperv/nova/__init__.py delete mode 100644 compute_hyperv/nova/block_device_manager.py delete mode 100644 compute_hyperv/nova/cluster/__init__.py delete mode 100644 compute_hyperv/nova/cluster/clusterops.py delete mode 100644 compute_hyperv/nova/cluster/driver.py delete mode 100644 compute_hyperv/nova/cluster/livemigrationops.py delete mode 100644 compute_hyperv/nova/cluster/volumeops.py delete mode 100644 compute_hyperv/nova/conf.py delete mode 100644 compute_hyperv/nova/constants.py delete mode 100644 compute_hyperv/nova/coordination.py delete mode 100644 compute_hyperv/nova/driver.py delete mode 100644 compute_hyperv/nova/eventhandler.py delete mode 100644 compute_hyperv/nova/hostops.py delete mode 100644 compute_hyperv/nova/imagecache.py delete mode 100644 compute_hyperv/nova/livemigrationops.py delete mode 100644 compute_hyperv/nova/migrationops.py delete mode 100644 compute_hyperv/nova/pathutils.py delete mode 100644 compute_hyperv/nova/pdk.py delete mode 100644 compute_hyperv/nova/rdpconsoleops.py delete mode 100644 compute_hyperv/nova/serialconsolehandler.py delete mode 100644 compute_hyperv/nova/serialconsoleops.py delete mode 100644 compute_hyperv/nova/serialproxy.py delete mode 100644 compute_hyperv/nova/snapshotops.py delete mode 100644 compute_hyperv/nova/utils/__init__.py delete mode 100644 compute_hyperv/nova/utils/placement.py delete mode 100644 compute_hyperv/nova/vif.py delete mode 100644 compute_hyperv/nova/vmops.py delete mode 100644 compute_hyperv/nova/volumeops.py delete mode 100644 compute_hyperv/tests/__init__.py delete mode 100644 compute_hyperv/tests/fake_instance.py delete mode 100644 compute_hyperv/tests/test.py delete mode 100644 compute_hyperv/tests/unit/__init__.py delete mode 100644 compute_hyperv/tests/unit/cluster/__init__.py delete mode 100644 compute_hyperv/tests/unit/cluster/test_clusterops.py delete mode 100644 compute_hyperv/tests/unit/cluster/test_driver.py delete mode 100644 compute_hyperv/tests/unit/cluster/test_livemigrationops.py delete mode 100644 compute_hyperv/tests/unit/cluster/test_volumeops.py delete mode 100644 compute_hyperv/tests/unit/test_base.py delete mode 100644 compute_hyperv/tests/unit/test_block_device_manager.py delete mode 100644 compute_hyperv/tests/unit/test_coordination.py delete mode 100644 compute_hyperv/tests/unit/test_driver.py delete mode 100644 compute_hyperv/tests/unit/test_eventhandler.py delete mode 100644 compute_hyperv/tests/unit/test_hostops.py delete mode 100644 compute_hyperv/tests/unit/test_imagecache.py delete mode 100644 compute_hyperv/tests/unit/test_livemigrationops.py delete mode 100644 compute_hyperv/tests/unit/test_migrationops.py delete mode 100644 compute_hyperv/tests/unit/test_pathutils.py delete mode 100644 compute_hyperv/tests/unit/test_pdk.py delete mode 100644 compute_hyperv/tests/unit/test_rdpconsoleops.py delete mode 100644 compute_hyperv/tests/unit/test_serialconsolehandler.py delete mode 100644 compute_hyperv/tests/unit/test_serialconsoleops.py delete mode 100644 compute_hyperv/tests/unit/test_serialproxy.py delete mode 100644 compute_hyperv/tests/unit/test_snapshotops.py delete mode 100644 compute_hyperv/tests/unit/test_vif.py delete mode 100644 compute_hyperv/tests/unit/test_vmops.py delete mode 100644 compute_hyperv/tests/unit/test_volumeops.py delete mode 100644 compute_hyperv/tests/unit/utils/__init__.py delete mode 100644 compute_hyperv/tests/unit/utils/test_placement.py delete mode 100644 doc/requirements.txt delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/configuration/config.rst delete mode 100644 doc/source/configuration/index.rst delete mode 100644 doc/source/configuration/sample_config.rst delete mode 100644 doc/source/contributing.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/install/install.rst delete mode 100644 doc/source/install/next-steps.rst delete mode 100644 doc/source/install/prerequisites.rst delete mode 100644 doc/source/install/verify.rst delete mode 100644 doc/source/readme.rst delete mode 100644 doc/source/troubleshooting/index.rst delete mode 100644 doc/source/usage/index.rst delete mode 100644 etc/compute-hyperv-config-generator.conf delete mode 100644 nova/__init__.py delete mode 100644 nova/virt/__init__.py delete mode 100644 nova/virt/compute_hyperv/__init__.py delete mode 100644 nova/virt/compute_hyperv/cluster/__init__.py delete mode 100644 nova/virt/compute_hyperv/cluster/driver.py delete mode 100644 nova/virt/compute_hyperv/driver.py delete mode 100644 openstack-common.conf delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/cluster-distributed-locks-5f12252af6b3913b.yaml delete mode 100644 releasenotes/notes/drop-ovs-support-616ec2952580c93d.yaml delete mode 100644 releasenotes/notes/drop-py-2-7-5cd36052d5c2e594.yaml delete mode 100644 releasenotes/notes/hyper-v-server-2012-support-deprecated-02a956e3926351d6.yaml delete mode 100644 releasenotes/notes/instance-evacuate-2c46e63e3a6ae9c4.yaml delete mode 100644 releasenotes/notes/online-volume-resize-446d58c9f6f340b6.yaml delete mode 100644 releasenotes/notes/rbd-support-9bb0037f69249785.yaml delete mode 100644 releasenotes/notes/update-device-metadata-7204fb0e85bad1e3.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/train.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 releasenotes/source/ussuri.rst delete mode 100644 releasenotes/source/victoria.rst delete mode 100644 releasenotes/source/wallaby.rst delete mode 100644 releasenotes/source/xena.rst delete mode 100644 releasenotes/source/yoga.rst delete mode 100644 releasenotes/source/zed.rst delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100755 tools/tox_install.sh delete mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 34e72289..00000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = compute_hyperv -omit = compute_hyperv/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 9e9ab0f4..00000000 --- a/.gitignore +++ /dev/null @@ -1,57 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -.stestr/ -.venv - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build -doc/source/_static/compute-hyperv.conf.sample -etc/compute-hyperv.conf.sample - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -# Files created by releasenotes build -releasenotes/build diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6fe..00000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index cef1311f..00000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=./compute_hyperv/tests/unit -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index a6e4abe0..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,19 +0,0 @@ -- job: - name: compute-hyperv-openstack-tox-docs - parent: openstack-tox-docs - description: | - Run tox docs job with the nova dependency. - required-projects: - - openstack/nova - -- project: - templates: - - build-release-notes-jobs-python3 - - check-requirements - - openstack-python3-jobs - check: - jobs: - - compute-hyperv-openstack-tox-docs - gate: - jobs: - - compute-hyperv-openstack-tox-docs diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 2098f48c..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -The source repository for this project can be found at: - - https://opendev.org/openstack/compute-hyperv - -Pull requests submitted through GitHub are not monitored. - -To start contributing to OpenStack, follow the steps in the contribution guide -to set up and use Gerrit: - - https://docs.openstack.org/contributors/code-and-documentation/quick-start.html - -Bugs should be filed on Launchpad: - - https://bugs.launchpad.net/compute-hyperv - -For more specific information about contributing to this repository, see the -compute-hyperv contributor guide: - - https://docs.openstack.org/compute-hyperv/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 5f08aff5..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -compute-hyperv Style Commandments -=============================================== - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index c978a52d..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include AUTHORS -include ChangeLog -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc diff --git a/README.rst b/README.rst index d8b7bbad..e85d62c0 100644 --- a/README.rst +++ b/README.rst @@ -1,16 +1,10 @@ -=============================== -compute-hyperv -=============================== +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -Hyper-V Nova Driver - -* Free software: Apache license -* Documentation: https://docs.openstack.org/nova/pike/admin/configuration/hypervisors.html -* Source: http://git.openstack.org/cgit/openstack/compute-hyperv -* Bugs: http://bugs.launchpad.net/compute-hyperv -* Release Notes: https://docs.openstack.org/releasenotes/compute-hyperv - -Features --------- - -* TODO +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb7..00000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/compute_hyperv/__init__.py b/compute_hyperv/__init__.py deleted file mode 100644 index 42067924..00000000 --- a/compute_hyperv/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__import__('pkg_resources').declare_namespace(__name__) diff --git a/compute_hyperv/hacking/__init__.py b/compute_hyperv/hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/hacking/checks.py b/compute_hyperv/hacking/checks.py deleted file mode 100644 index dd80b19c..00000000 --- a/compute_hyperv/hacking/checks.py +++ /dev/null @@ -1,521 +0,0 @@ -# Copyright (c) 2012, Cloudscaling -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -import re - -from hacking import core - -""" -Guidelines for writing new hacking checks - - - Use only for Nova specific tests. OpenStack general tests - should be submitted to the common 'hacking' module. - - Pick numbers in the range N3xx. Find the current test with - the highest allocated number and then pick the next value. - - Keep the test method code in the source file ordered based - on the N3xx value. - - List the new rule in the top level HACKING.rst file - - Add test cases for each new rule to nova/tests/unit/test_hacking.py - -""" - -UNDERSCORE_IMPORT_FILES = [] - -session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]") -cfg_re = re.compile(r".*\scfg\.") -vi_header_re = re.compile(r"^#\s+vim?:.+") -virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/") -virt_import_re = re.compile( - r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)") -virt_config_re = re.compile( - r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)") -author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor:"), - re.compile("^\.\.\s+moduleauthor::")) -asse_trueinst_re = re.compile( - r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " - r"(\w|\.|\'|\"|\[|\])+\)\)") -asse_equal_type_re = re.compile( - r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " - "(\w|\.|\'|\"|\[|\])+\)") -asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\(" - r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") -asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\(" - r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") -asse_equal_end_with_none_re = re.compile( - r"assertEqual\(.*?,\s+None\)$") -asse_equal_start_with_none_re = re.compile( - r"assertEqual\(None,") -# NOTE(snikitin): Next two regexes weren't united to one for more readability. -# asse_true_false_with_in_or_not_in regex checks -# assertTrue/False(A in B) cases where B argument has no spaces -# asse_true_false_with_in_or_not_in_spaces regex checks cases -# where B argument has spaces and starts/ends with [, ', ". -# For example: [1, 2, 3], "some string", 'another string'. -# We have to separate these regexes to escape a false positives -# results. B argument should have spaces only if it starts -# with [, ", '. Otherwise checking of string -# "assertFalse(A in B and C in D)" will be false positives. -# In this case B argument is "B and C in D". -asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\(" - r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)") -asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)" - r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+" - r"[\[|'|\"](, .*)?\)") -asse_raises_regexp = re.compile(r"assertRaisesRegexp\(") -conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w") -translated_log = re.compile( - r"(.)*LOG\.(audit|debug|error|info|critical|exception|warning)" - "\(\s*_\(\s*('|\")") -mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") -string_translation = re.compile(r"[^_]*_\(\s*('|\")") -underscore_import_check = re.compile(r"(.)*import _(.)*") -import_translation_for_log_or_exception = re.compile( - r"(.)*(from\snova.i18n\simport)\s_") -# We need this for cases where they have created their own _ function. -custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") -api_version_re = re.compile(r"@.*api_version") -dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") -decorator_re = re.compile(r"@.*") - -# TODO(dims): When other oslo libraries switch over non-namespace'd -# imports, we need to add them to the regexp below. -oslo_namespace_imports = re.compile(r"from[\s]*oslo[.]" - r"(concurrency|config|db|i18n|messaging|" - r"middleware|serialization|utils|vmware)") -oslo_namespace_imports_2 = re.compile(r"from[\s]*oslo[\s]*import[\s]*" - r"(concurrency|config|db|i18n|messaging|" - r"middleware|serialization|utils|vmware)") -oslo_namespace_imports_3 = re.compile(r"import[\s]*oslo\." - r"(concurrency|config|db|i18n|messaging|" - r"middleware|serialization|utils|vmware)") - - -class BaseASTChecker(ast.NodeVisitor): - """Provides a simple framework for writing AST-based checks. - - Subclasses should implement visit_* methods like any other AST visitor - implementation. When they detect an error for a particular node the - method should call ``self.add_error(offending_node)``. Details about - where in the code the error occurred will be pulled from the node - object. - - Subclasses should also provide a class variable named CHECK_DESC to - be used for the human readable error message. - - """ - - def __init__(self, tree, filename): - """This object is created automatically by pep8. - - :param tree: an AST tree - :param filename: name of the file being analyzed - (ignored by our checks) - """ - self._tree = tree - self._errors = [] - - def run(self): - """Called automatically by pep8.""" - self.visit(self._tree) - return self._errors - - def add_error(self, node, message=None): - """Add an error caused by a node to the list of errors for pep8.""" - message = message or self.CHECK_DESC - error = (node.lineno, node.col_offset, message, self.__class__) - self._errors.append(error) - - def _check_call_names(self, call_node, names): - if isinstance(call_node, ast.Call): - if isinstance(call_node.func, ast.Name): - if call_node.func.id in names: - return True - return False - - -@core.flake8ext -def import_no_db_in_virt(logical_line, filename): - """Check for db calls from nova/virt - - As of grizzly-2 all the database calls have been removed from - nova/virt, and we want to keep it that way. - - N307 - """ - if "nova/virt" in filename and not filename.endswith("fake.py"): - if logical_line.startswith("from nova import db"): - yield (0, "N307: nova.db import not allowed in nova/virt/*") - - -@core.flake8ext -def no_db_session_in_public_api(logical_line, filename): - if "db/api.py" in filename: - if session_check.match(logical_line): - yield (0, "N309: public db api methods may not accept session") - - -@core.flake8ext -def use_timeutils_utcnow(logical_line, filename): - # tools are OK to use the standard datetime module - if "/tools/" in filename: - return - - msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()" - - datetime_funcs = ['now', 'utcnow'] - for f in datetime_funcs: - pos = logical_line.find('datetime.%s' % f) - if pos != -1: - yield (pos, msg % f) - - -def _get_virt_name(regex, data): - m = regex.match(data) - if m is None: - return None - driver = m.group(1) - # Ignore things we mis-detect as virt drivers in the regex - if driver in ["test_virt_drivers", "driver", "firewall", - "disk", "api", "imagecache", "cpu", "hardware"]: - return None - # TODO(berrange): remove once bugs 1261826 and 126182 are - # fixed, or baremetal driver is removed, which is first. - if driver == "baremetal": - return None - return driver - - -@core.flake8ext -def import_no_virt_driver_import_deps(physical_line, filename): - """Check virt drivers' modules aren't imported by other drivers - - Modules under each virt driver's directory are - considered private to that virt driver. Other drivers - in Nova must not access those drivers. Any code that - is to be shared should be refactored into a common - module - - N311 - """ - thisdriver = _get_virt_name(virt_file_re, filename) - thatdriver = _get_virt_name(virt_import_re, physical_line) - if (thatdriver is not None and - thisdriver is not None and - thisdriver != thatdriver): - return (0, "N311: importing code from other virt drivers forbidden") - - -@core.flake8ext -def import_no_virt_driver_config_deps(physical_line, filename): - """Check virt drivers' config vars aren't used by other drivers - - Modules under each virt driver's directory are - considered private to that virt driver. Other drivers - in Nova must not use their config vars. Any config vars - that are to be shared should be moved into a common module - - N312 - """ - thisdriver = _get_virt_name(virt_file_re, filename) - thatdriver = _get_virt_name(virt_config_re, physical_line) - if (thatdriver is not None and - thisdriver is not None and - thisdriver != thatdriver): - return (0, "N312: using config vars from other virt drivers forbidden") - - -@core.flake8ext -def capital_cfg_help(logical_line, tokens): - msg = "N313: capitalize help string" - - if cfg_re.match(logical_line): - for t in range(len(tokens)): - if tokens[t][1] == "help": - txt = tokens[t + 2][1] - if len(txt) > 1 and txt[1].islower(): - yield(0, msg) - - -@core.flake8ext -def assert_true_instance(logical_line): - """Check for assertTrue(isinstance(a, b)) sentences - - N316 - """ - if asse_trueinst_re.match(logical_line): - yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed") - - -@core.flake8ext -def assert_equal_type(logical_line): - """Check for assertEqual(type(A), B) sentences - - N317 - """ - if asse_equal_type_re.match(logical_line): - yield (0, "N317: assertEqual(type(A), B) sentences not allowed") - - -@core.flake8ext -def assert_equal_none(logical_line): - """Check for assertEqual(A, None) or assertEqual(None, A) sentences - - N318 - """ - res = (asse_equal_start_with_none_re.search(logical_line) or - asse_equal_end_with_none_re.search(logical_line)) - if res: - yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) " - "sentences not allowed") - - -@core.flake8ext -def no_translate_logs(logical_line): - """Check for 'LOG.*(_(' - - Starting with the Pike series, OpenStack no longer supports log - translation. We shouldn't translate logs. - - - This check assumes that 'LOG' is a logger. - - Use filename so we can start enforcing this in specific folders - instead of needing to do so all at once. - - C312 - """ - if translated_log.match(logical_line): - yield(0, "C312: Log messages should not be translated!") - - -@core.flake8ext -def no_import_translation_in_tests(logical_line, filename): - """Check for 'from nova.i18n import _' - N337 - """ - if 'nova/tests/' in filename: - res = import_translation_for_log_or_exception.match(logical_line) - if res: - yield(0, "N337 Don't import translation in tests") - - -@core.flake8ext -def no_setting_conf_directly_in_tests(logical_line, filename): - """Check for setting CONF.* attributes directly in tests - - The value can leak out of tests affecting how subsequent tests run. - Using self.flags(option=value) is the preferred method to temporarily - set config options in tests. - - N320 - """ - if 'nova/tests/' in filename: - res = conf_attribute_set_re.match(logical_line) - if res: - yield (0, "N320: Setting CONF.* attributes directly in tests is " - "forbidden. Use self.flags(option=value) instead") - - -@core.flake8ext -def no_mutable_default_args(logical_line): - msg = "N322: Method's default argument shouldn't be mutable!" - if mutable_default_args.match(logical_line): - yield (0, msg) - - -@core.flake8ext -def check_explicit_underscore_import(logical_line, filename): - """Check for explicit import of the _ function - - We need to ensure that any files that are using the _() function - to translate logs are explicitly importing the _ function. We - can't trust unit test to catch whether the import has been - added so we need to check for it here. - """ - - # Build a list of the files that have _ imported. No further - # checking needed once it is found. - if filename in UNDERSCORE_IMPORT_FILES: - pass - elif (underscore_import_check.match(logical_line) or - custom_underscore_check.match(logical_line)): - UNDERSCORE_IMPORT_FILES.append(filename) - elif string_translation.match(logical_line): - yield(0, "N323: Found use of _() without explicit import of _ !") - - -@core.flake8ext -def use_jsonutils(logical_line, filename): - # the code below that path is not meant to be executed from neutron - # tree where jsonutils module is present, so don't enforce its usage - # for this subdirectory - if "plugins/xenserver" in filename: - return - - # tools are OK to use the standard json module - if "/tools/" in filename: - return - - msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s" - - if "json." in logical_line: - json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] - for f in json_funcs: - pos = logical_line.find('json.%s' % f) - if pos != -1: - yield (pos, msg % {'fun': f[:-1]}) - - -@core.flake8ext -def check_api_version_decorator(logical_line, previous_logical, blank_before, - filename): - msg = ("N332: the api_version decorator must be the first decorator" - " on a method.") - if blank_before == 0 and re.match(api_version_re, logical_line) \ - and re.match(decorator_re, previous_logical): - yield(0, msg) - - -class CheckForStrUnicodeExc(BaseASTChecker): - """Checks for the use of str() or unicode() on an exception. - - This currently only handles the case where str() or unicode() - is used in the scope of an exception handler. If the exception - is passed into a function, returned from an assertRaises, or - used on an exception created in the same scope, this does not - catch it. - """ - - name = "check_for_str_unicode_exc" - version = "1.0" - CHECK_DESC = ('N325 str() and unicode() cannot be used on an ' - 'exception. Remove or use six.text_type()') - - def __init__(self, tree, filename): - super(CheckForStrUnicodeExc, self).__init__(tree, filename) - self.name = [] - self.already_checked = [] - - def visit_TryExcept(self, node): - for handler in node.handlers: - if handler.name: - self.name.append(handler.name.id) - super(CheckForStrUnicodeExc, self).generic_visit(node) - self.name = self.name[:-1] - else: - super(CheckForStrUnicodeExc, self).generic_visit(node) - - def visit_Call(self, node): - if self._check_call_names(node, ['str', 'unicode']): - if node not in self.already_checked: - self.already_checked.append(node) - if isinstance(node.args[0], ast.Name): - if node.args[0].id in self.name: - self.add_error(node.args[0]) - super(CheckForStrUnicodeExc, self).generic_visit(node) - - -class CheckForTransAdd(BaseASTChecker): - """Checks for the use of concatenation on a translated string. - - Translations should not be concatenated with other strings, but - should instead include the string being added to the translated - string to give the translators the most information. - """ - - name = "check_for_trans_add" - version = "1.0" - CHECK_DESC = ('N326 Translated messages cannot be concatenated. ' - 'String should be included in translated message.') - - TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC'] - - def visit_BinOp(self, node): - if isinstance(node.op, ast.Add): - if self._check_call_names(node.left, self.TRANS_FUNC): - self.add_error(node.left) - elif self._check_call_names(node.right, self.TRANS_FUNC): - self.add_error(node.right) - super(CheckForTransAdd, self).generic_visit(node) - - -@core.flake8ext -def check_oslo_namespace_imports(logical_line, blank_before, filename): - if re.match(oslo_namespace_imports, logical_line): - msg = ("N333: '%s' must be used instead of '%s'.") % ( - logical_line.replace('oslo.', 'oslo_'), - logical_line) - yield(0, msg) - match = re.match(oslo_namespace_imports_2, logical_line) - if match: - msg = ("N333: 'module %s should not be imported " - "from oslo namespace.") % match.group(1) - yield(0, msg) - match = re.match(oslo_namespace_imports_3, logical_line) - if match: - msg = ("N333: 'module %s should not be imported " - "from oslo namespace.") % match.group(1) - yield(0, msg) - - -@core.flake8ext -def assert_true_or_false_with_in(logical_line): - """Check for assertTrue/False(A in B), assertTrue/False(A not in B), - assertTrue/False(A in B, message) or assertTrue/False(A not in B, message) - sentences. - - N334 - """ - res = (asse_true_false_with_in_or_not_in.search(logical_line) or - asse_true_false_with_in_or_not_in_spaces.search(logical_line)) - if res: - yield (0, "N334: Use assertIn/NotIn(A, B) rather than " - "assertTrue/False(A in/not in B) when checking collection " - "contents.") - - -@core.flake8ext -def assert_raises_regexp(logical_line): - """Check for usage of deprecated assertRaisesRegexp - - N335 - """ - res = asse_raises_regexp.search(logical_line) - if res: - yield (0, "N335: assertRaisesRegex must be used instead " - "of assertRaisesRegexp") - - -@core.flake8ext -def dict_constructor_with_list_copy(logical_line): - msg = ("N336: Must use a dict comprehension instead of a dict constructor" - " with a sequence of key-value pairs." - ) - if dict_constructor_with_list_copy_re.match(logical_line): - yield (0, msg) - - -@core.flake8ext -def assert_equal_in(logical_line): - """Check for assertEqual(A in B, True), assertEqual(True, A in B), - assertEqual(A in B, False) or assertEqual(False, A in B) sentences - - N338 - """ - res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or - asse_equal_in_end_with_true_or_false_re.search(logical_line)) - if res: - yield (0, "N338: Use assertIn/NotIn(A, B) rather than " - "assertEqual(A in B, True/False) when checking collection " - "contents.") diff --git a/compute_hyperv/i18n.py b/compute_hyperv/i18n.py deleted file mode 100644 index 31733168..00000000 --- a/compute_hyperv/i18n.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . - -""" - -import oslo_i18n - -DOMAIN = 'nova' - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - - -def translate(value, user_locale): - return oslo_i18n.translate(value, user_locale) - - -def get_available_languages(): - return oslo_i18n.get_available_languages(DOMAIN) diff --git a/compute_hyperv/nova/README.rst b/compute_hyperv/nova/README.rst deleted file mode 100644 index c0609f31..00000000 --- a/compute_hyperv/nova/README.rst +++ /dev/null @@ -1,44 +0,0 @@ -Hyper-V Volumes Management -============================================= - -To enable the volume features, the first thing that needs to be done is to -enable the iSCSI service on the Windows compute nodes and set it to start -automatically. - -sc config msiscsi start= auto -net start msiscsi - -In Windows Server 2012, it's important to execute the following commands to -prevent having the volumes being online by default: - -diskpart -san policy=OfflineAll -exit - -How to check if your iSCSI configuration is working properly: - -On your OpenStack controller: - -1. Create a volume with e.g. "nova volume-create 1" and note the generated -volume id - -On Windows: - -2. iscsicli QAddTargetPortal -3. iscsicli ListTargets - -The output should contain the iqn related to your volume: -iqn.2010-10.org.openstack:volume- - -How to test Boot from volume in Hyper-V from the OpenStack dashboard: - -1. Fist of all create a volume -2. Get the volume ID of the created volume -3. Upload and untar to the Cloud controller the next VHD image: -http://dev.opennebula.org/attachments/download/482/ttylinux.vhd.gz -4. sudo dd if=/path/to/vhdfileofstep3 -of=/dev/nova-volumes/volume-XXXXX <- Related to the ID of step 2 -5. Launch an instance from any image (this is not important because we are -just booting from a volume) from the dashboard, and don't forget to select -boot from volume and select the volume created in step2. Important: Device -name must be "vda". diff --git a/compute_hyperv/nova/__init__.py b/compute_hyperv/nova/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/nova/block_device_manager.py b/compute_hyperv/nova/block_device_manager.py deleted file mode 100644 index 99531f76..00000000 --- a/compute_hyperv/nova/block_device_manager.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handling of block device information and mapping - -Module contains helper methods for dealing with block device information -""" - -import os - -from nova import block_device -from nova import exception -from nova import objects -from nova.virt import block_device as driver_block_device -from nova.virt import configdrive -from nova.virt import driver -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from compute_hyperv.i18n import _ -from compute_hyperv.nova import constants -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import volumeops - -LOG = logging.getLogger(__name__) - - -class BlockDeviceInfoManager(object): - - _VALID_BUS = {constants.VM_GEN_1: (constants.CTRL_TYPE_IDE, - constants.CTRL_TYPE_SCSI), - constants.VM_GEN_2: (constants.CTRL_TYPE_SCSI,)} - - _DEFAULT_BUS = constants.CTRL_TYPE_SCSI - - _TYPE_FOR_DISK_FORMAT = {'vhd': constants.DISK, - 'vhdx': constants.DISK, - 'iso': constants.DVD} - - _DEFAULT_ROOT_DEVICE = '/dev/sda' - - def __init__(self): - self._volops = volumeops.VolumeOps() - self._pathutils = pathutils.PathUtils() - - self._vmutils = utilsfactory.get_vmutils() - - @staticmethod - def _get_device_bus(ctrl_type, ctrl_addr, ctrl_slot): - """Determines the device bus and it's hypervisor assigned address. - """ - if ctrl_type == constants.CTRL_TYPE_SCSI: - address = ':'.join(map(str, [0, 0, ctrl_addr, ctrl_slot])) - return objects.SCSIDeviceBus(address=address) - elif ctrl_type == constants.CTRL_TYPE_IDE: - address = ':'.join(map(str, [ctrl_addr, ctrl_slot])) - return objects.IDEDeviceBus(address=address) - - def _get_vol_bdm_attachment_info(self, bdm): - drv_vol_bdm = driver_block_device.convert_volume(bdm) - if not drv_vol_bdm: - return - - connection_info = drv_vol_bdm['connection_info'] - if not connection_info: - LOG.warning("Missing connection info for volume %s.", - bdm.volume_id) - return - - attachment_info = self._volops.get_disk_attachment_info( - connection_info) - attachment_info['serial'] = connection_info['serial'] - return attachment_info - - def _get_eph_bdm_attachment_info(self, instance, bdm): - # When attaching ephemeral disks, we're setting this field so that - # we can map them with bdm objects. - connection_info = self.get_bdm_connection_info(bdm) - eph_filename = connection_info.get("eph_filename") - if not eph_filename: - LOG.warning("Missing ephemeral disk filename in " - "BDM connection info. BDM: %s", bdm) - return - - eph_path = os.path.join( - self._pathutils.get_instance_dir(instance.name), eph_filename) - if not os.path.exists(eph_path): - LOG.warning("Could not find ephemeral disk %s.", eph_path) - return - - return self._vmutils.get_disk_attachment_info(eph_path, - is_physical=False) - - def _get_disk_metadata(self, instance, bdm): - attachment_info = None - if bdm.is_volume: - attachment_info = self._get_vol_bdm_attachment_info(bdm) - elif block_device.new_format_is_ephemeral(bdm): - attachment_info = self._get_eph_bdm_attachment_info( - instance, bdm) - - if not attachment_info: - LOG.debug("No attachment info retrieved for bdm %s.", bdm) - return - - tags = [bdm.tag] if bdm.tag else [] - bus = self._get_device_bus( - attachment_info['controller_type'], - attachment_info['controller_addr'], - attachment_info['controller_slot']) - serial = attachment_info.get('serial') - - return objects.DiskMetadata(bus=bus, - tags=tags, - serial=serial) - - def get_bdm_metadata(self, context, instance): - """Builds a metadata object for instance devices, that maps the user - provided tag to the hypervisor assigned device address. - """ - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - - bdm_metadata = [] - for bdm in bdms: - try: - device_metadata = self._get_disk_metadata(instance, bdm) - if device_metadata: - bdm_metadata.append(device_metadata) - except (exception.DiskNotFound, os_win_exc.DiskNotFound): - LOG.debug("Could not find disk attachment while " - "updating device metadata. It may have been " - "detached. BDM: %s", bdm) - - return bdm_metadata - - def set_volume_bdm_connection_info(self, context, instance, - connection_info): - # When attaching volumes to already existing instances, the connection - # info passed to the driver is not saved yet within the BDM table. - # - # Nova sets the volume id within the connection info using the - # 'serial' key. - volume_id = connection_info['serial'] - bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( - context, volume_id, instance.uuid) - bdm.connection_info = jsonutils.dumps(connection_info) - bdm.save() - - @staticmethod - def get_bdm_connection_info(bdm): - # We're using the BDM 'connection_info' field to store ephemeral - # image information so that we can map them. In order to do so, - # we're using this helper. - # The ephemeral bdm object wrapper does not currently expose this - # field. - try: - conn_info = jsonutils.loads(bdm.connection_info) - except TypeError: - conn_info = {} - - return conn_info - - @staticmethod - def update_bdm_connection_info(bdm, **kwargs): - conn_info = BlockDeviceInfoManager.get_bdm_connection_info(bdm) - conn_info.update(**kwargs) - bdm.connection_info = jsonutils.dumps(conn_info) - bdm.save() - - def _initialize_controller_slot_counter(self, instance, vm_gen): - # we have 2 IDE controllers, for a total of 4 slots - free_slots_by_device_type = { - constants.CTRL_TYPE_IDE: [ - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER] * 2, - constants.CTRL_TYPE_SCSI: [ - os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER] - } - if configdrive.required_by(instance): - if vm_gen == constants.VM_GEN_1: - # reserve one slot for the config drive on the second - # controller in case of generation 1 virtual machines - free_slots_by_device_type[constants.CTRL_TYPE_IDE][1] -= 1 - return free_slots_by_device_type - - def validate_and_update_bdi(self, instance, image_meta, vm_gen, - block_device_info): - slot_map = self._initialize_controller_slot_counter(instance, vm_gen) - self._check_and_update_root_device(vm_gen, image_meta, - block_device_info, slot_map) - self._check_and_update_ephemerals(vm_gen, block_device_info, slot_map) - self._check_and_update_volumes(vm_gen, block_device_info, slot_map) - - if vm_gen == constants.VM_GEN_2 and configdrive.required_by(instance): - # for Generation 2 VMs, the configdrive is attached to the SCSI - # controller. Check that there is still a slot available for it. - if slot_map[constants.CTRL_TYPE_SCSI][0] == 0: - msg = _("There are no more free slots on controller %s for " - "configdrive.") % constants.CTRL_TYPE_SCSI - raise exception.InvalidBDMFormat(details=msg) - - def _check_and_update_root_device(self, vm_gen, image_meta, - block_device_info, slot_map): - # either booting from volume, or booting from image/iso - root_disk = {} - - root_device = driver.block_device_info_get_root_device( - block_device_info) - root_device = root_device or self._DEFAULT_ROOT_DEVICE - - if self.is_boot_from_volume(block_device_info): - root_volume = self._get_root_device_bdm( - block_device_info, root_device) - root_disk['type'] = constants.VOLUME - root_disk['path'] = None - root_disk['connection_info'] = root_volume['connection_info'] - else: - root_disk['type'] = self._TYPE_FOR_DISK_FORMAT.get( - image_meta['disk_format']) - if root_disk['type'] is None: - raise exception.InvalidImageFormat( - format=image_meta['disk_format']) - root_disk['path'] = None - root_disk['connection_info'] = None - - root_disk['disk_bus'] = (constants.CTRL_TYPE_IDE if - vm_gen == constants.VM_GEN_1 else constants.CTRL_TYPE_SCSI) - (root_disk['drive_addr'], - root_disk['ctrl_disk_addr']) = self._get_available_controller_slot( - root_disk['disk_bus'], slot_map) - root_disk['boot_index'] = 0 - root_disk['mount_device'] = root_device - - block_device_info['root_disk'] = root_disk - - def _get_available_controller_slot(self, controller_type, slot_map): - max_slots = (os_win_const.IDE_CONTROLLER_SLOTS_NUMBER if - controller_type == constants.CTRL_TYPE_IDE else - os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER) - for idx, ctrl in enumerate(slot_map[controller_type]): - if slot_map[controller_type][idx] >= 1: - drive_addr = idx - ctrl_disk_addr = max_slots - slot_map[controller_type][idx] - slot_map[controller_type][idx] -= 1 - return (drive_addr, ctrl_disk_addr) - - msg = _("There are no more free slots on controller %s" - ) % controller_type - raise exception.InvalidBDMFormat(details=msg) - - def is_boot_from_volume(self, block_device_info): - if block_device_info: - root_device = block_device_info.get('root_device_name') - if not root_device: - root_device = self._DEFAULT_ROOT_DEVICE - - return block_device.volume_in_mapping(root_device, - block_device_info) - - def _get_root_device_bdm(self, block_device_info, mount_device=None): - for mapping in driver.block_device_info_get_mapping(block_device_info): - if mapping['mount_device'] == mount_device: - return mapping - - def _check_and_update_ephemerals(self, vm_gen, block_device_info, - slot_map): - ephemerals = driver.block_device_info_get_ephemerals(block_device_info) - for eph in ephemerals: - self._check_and_update_bdm(slot_map, vm_gen, eph) - - def _check_and_update_volumes(self, vm_gen, block_device_info, slot_map): - volumes = driver.block_device_info_get_mapping(block_device_info) - root_device_name = block_device_info['root_disk']['mount_device'] - root_bdm = self._get_root_device_bdm(block_device_info, - root_device_name) - if root_bdm: - volumes.remove(root_bdm) - for vol in volumes: - self._check_and_update_bdm(slot_map, vm_gen, vol) - - def _check_and_update_bdm(self, slot_map, vm_gen, bdm): - disk_bus = bdm.get('disk_bus') - if not disk_bus: - bdm['disk_bus'] = self._DEFAULT_BUS - elif disk_bus not in self._VALID_BUS[vm_gen]: - msg = _("Hyper-V does not support bus type %(disk_bus)s " - "for generation %(vm_gen)s instances." - ) % {'disk_bus': disk_bus, - 'vm_gen': vm_gen} - raise exception.InvalidDiskInfo(reason=msg) - - device_type = bdm.get('device_type') - if not device_type: - bdm['device_type'] = 'disk' - elif device_type != 'disk': - msg = _("Hyper-V does not support disk type %s for ephemerals " - "or volumes.") % device_type - raise exception.InvalidDiskInfo(reason=msg) - - (bdm['drive_addr'], - bdm['ctrl_disk_addr']) = self._get_available_controller_slot( - bdm['disk_bus'], slot_map) - - # make sure that boot_index is set. - bdm['boot_index'] = bdm.get('boot_index') - - def _sort_by_boot_order(self, bd_list): - # we sort the block devices by boot_index leaving the ones that don't - # have a specified boot_index at the end - bd_list.sort(key=lambda x: (x['boot_index'] is None, x['boot_index'])) - - def get_boot_order(self, vm_gen, block_device_info): - if vm_gen == constants.VM_GEN_1: - return self._get_boot_order_gen1(block_device_info) - else: - return self._get_boot_order_gen2(block_device_info) - - def _get_boot_order_gen1(self, block_device_info): - if block_device_info['root_disk']['type'] == 'iso': - return [os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - else: - return [os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - - def _get_boot_order_gen2(self, block_device_info): - devices = [block_device_info['root_disk']] - devices += driver.block_device_info_get_ephemerals( - block_device_info) - devices += driver.block_device_info_get_mapping(block_device_info) - - self._sort_by_boot_order(devices) - - boot_order = [] - for dev in devices: - if dev.get('connection_info'): - dev_path = self._volops.get_disk_resource_path( - dev['connection_info']) - boot_order.append(dev_path) - else: - boot_order.append(dev['path']) - - return boot_order diff --git a/compute_hyperv/nova/cluster/__init__.py b/compute_hyperv/nova/cluster/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/nova/cluster/clusterops.py b/compute_hyperv/nova/cluster/clusterops.py deleted file mode 100644 index 444377fb..00000000 --- a/compute_hyperv/nova/cluster/clusterops.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Management class for Cluster VM operations.""" - -import functools -import time - -from nova.compute import power_state -from nova.compute import task_states -from nova.compute import vm_states -from nova import context -from nova.network import neutron -from nova import objects -from nova import utils -from nova.virt import block_device -from nova.virt import event as virtevent -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_log import log as logging - -import compute_hyperv.nova.conf -from compute_hyperv.nova import coordination -from compute_hyperv.nova import hostops -from compute_hyperv.nova import serialconsoleops -from compute_hyperv.nova.utils import placement as placement_utils -from compute_hyperv.nova import vmops - -LOG = logging.getLogger(__name__) -CONF = compute_hyperv.nova.conf.CONF - - -class ClusterOps(object): - - def __init__(self): - self._clustutils = utilsfactory.get_clusterutils() - self._vmutils = utilsfactory.get_vmutils() - self._clustutils.check_cluster_state() - self._instance_map = {} - - self._this_node = hostops.HostOps.get_hostname() - - self._context = context.get_admin_context() - self._network_api = neutron.API() - self._vmops = vmops.VMOps() - self._serial_console_ops = serialconsoleops.SerialConsoleOps() - self._placement = placement_utils.PlacementUtils() - - def get_instance_host(self, instance): - return self._clustutils.get_vm_host(instance.name) - - def add_to_cluster(self, instance): - try: - self._clustutils.add_vm_to_cluster( - instance.name, CONF.hyperv.max_failover_count, - CONF.hyperv.failover_period, CONF.hyperv.auto_failback) - self._instance_map[instance.name] = instance.uuid - except os_win_exc.HyperVClusterException: - LOG.exception('Adding instance to cluster failed.', - instance=instance) - - def remove_from_cluster(self, instance): - try: - if self._clustutils.vm_exists(instance.name): - self._clustutils.delete(instance.name) - self._instance_map.pop(instance.name, None) - except os_win_exc.HyperVClusterException: - LOG.exception('Removing instance from cluster failed.', - instance=instance) - - def post_migration(self, instance): - # update instance cache - self._instance_map[instance.name] = instance.uuid - - def start_failover_listener_daemon(self): - """Start the daemon failover listener.""" - - listener = self._clustutils.get_vm_owner_change_listener_v2() - cbk = functools.partial(utils.spawn_n, self._failover_migrate) - - utils.spawn_n(listener, cbk) - - def reclaim_failovered_instances(self): - # NOTE(claudiub): some instances might have failovered while the - # nova-compute service was down. Those instances will have to be - # reclaimed by this node. - expected_attrs = ['id', 'uuid', 'name', 'host'] - host_instance_uuids = self._vmops.list_instance_uuids() - nova_instances = self._get_nova_instances(expected_attrs, - host_instance_uuids) - - # filter out instances that are known to be on this host. - nova_instances = [instance for instance in nova_instances if - self._this_node.upper() != instance.host.upper()] - instance_names = [instance.name for instance in nova_instances] - - LOG.warning("Handling failovers that occurred while Nova was not " - "running: %s", instance_names) - for instance in nova_instances: - utils.spawn_n(self._failover_migrate, - instance.name, - self._this_node) - - @coordination.synchronized('failover-{instance_name}') - def _failover_migrate(self, instance_name, new_host): - """This method will check if the generated event is a legitimate - failover to this node. If it is, it will proceed to prepare the - failovered VM if necessary and update the owner of the compute vm in - nova and ports in neutron. - """ - instance = self._get_instance_by_name(instance_name) - if not instance: - # Some instances on the hypervisor may not be tracked by nova - LOG.debug('Instance %s does not exist in nova. Skipping.', - instance_name) - return - - old_host = instance.host - LOG.info('Checking instance failover %(instance)s to %(new_host)s ' - 'from host %(old_host)s.', - {'instance': instance_name, - 'new_host': new_host, - 'old_host': old_host}) - - # While the cluster group is in "pending" state, it may not even be - # registered in Hyper-V, so there's not much we can do. We'll have to - # wait for it to be handled by the Failover Cluster service. - self._wait_for_pending_instance(instance_name) - - current_host = self._clustutils.get_vm_host(instance_name) - instance_moved_again = current_host.upper() != new_host.upper() - if instance_moved_again: - LOG.warning("While processing instance %(instance)s failover to " - "%(host)s, it has moved to %(current_host)s.", - dict(host=new_host, - current_host=current_host, - instance=instance_name)) - new_host = current_host - - host_changed = old_host.upper() != new_host.upper() - migrated_here = new_host.upper() == self._this_node.upper() - migrated_from_here = old_host.upper() == self._this_node.upper() - - if instance.task_state == task_states.MIGRATING: - LOG.debug('Instance %s is being migrated by Nova. This ' - 'will not be treated as a failover.', - instance_name) - return - - if not host_changed: - LOG.warning("The source node is the same as the destination " - "node: %(host)s. The instance %(instance)s may have " - "bounced between hosts due to a failure.", - dict(host=old_host, instance=instance_name)) - - nw_info = self._network_api.get_instance_nw_info(self._context, - instance) - if host_changed and migrated_from_here: - LOG.debug('Cleaning up moved instance: %s.', instance_name) - self._vmops.unplug_vifs(instance, nw_info) - return - if not migrated_here: - LOG.debug('Instance %s did not failover to this node.', - instance_name) - return - - LOG.info('Handling instance %(instance)s failover to this host.', - {'instance': instance_name}) - - self._nova_failover_server(instance, new_host) - if host_changed: - self._failover_migrate_networks(instance, old_host) - try: - self._placement.move_compute_node_allocations( - self._context, instance, old_host, new_host, - merge_existing=False) - except Exception: - LOG.exception("Could not update failed over instance '%s' " - "allocations.", instance) - - if CONF.hyperv.recreate_ports_on_failover: - self._vmops.unplug_vifs(instance, nw_info) - self._vmops.plug_vifs(instance, nw_info) - - self._serial_console_ops.start_console_handler(instance_name) - - def _wait_for_pending_instance(self, instance_name): - # TODO(lpetrut): switch to an event listener. We'd probably want to - # avoid having one event listener per failed over instance, as there - # can be many of them. - group_state = self._clustutils.get_cluster_group_state_info( - instance_name)['state'] - while group_state == os_win_const.CLUSTER_GROUP_PENDING: - LOG.debug("Waiting for pending instance cluster group: %s", - instance_name) - time.sleep(2) - - group_state = self._clustutils.get_cluster_group_state_info( - instance_name)['state'] - - def _failover_migrate_networks(self, instance, source): - """This is called after a VM failovered to this node. - This will change the owner of the neutron ports to this node. - """ - migration = {'source_compute': source, - 'dest_compute': self._this_node, } - - self._network_api.setup_networks_on_host( - self._context, instance, self._this_node) - self._network_api.migrate_instance_start( - self._context, instance, migration) - self._network_api.setup_networks_on_host( - self._context, instance, self._this_node) - # TODO(lpetrut): provide the actual port provider mappings. - self._network_api.migrate_instance_finish( - self._context, instance, migration, - provider_mappings=None) - self._network_api.setup_networks_on_host( - self._context, instance, self._this_node) - self._network_api.setup_networks_on_host( - self._context, instance, source, teardown=True) - - def _get_instance_by_name(self, instance_name): - # Since from a failover we only get the instance name - # we need to find it's uuid so we can retrieve the instance - # object from nova. We keep a map from the instance name to the - # instance uuid. First we try to get the vm uuid from that map - # if it's not there, we try to get it from the instance notes, - # this may fail (during a failover for example, the vm will not - # be at the source node anymore) and lastly we try and get the - # vm uuid from the database. - vm_uuid = self._instance_map.get(instance_name) - if not vm_uuid: - try: - vm_uuid = self._vmutils.get_instance_uuid(instance_name) - self._instance_map[instance_name] = vm_uuid - except os_win_exc.HyperVVMNotFoundException: - pass - - if not vm_uuid: - self._update_instance_map() - vm_uuid = self._instance_map.get(instance_name) - - if not vm_uuid: - LOG.debug("Instance %s cannot be found in Nova.", instance_name) - return - - return objects.Instance.get_by_uuid(self._context, vm_uuid) - - def _update_instance_map(self): - for server in self._get_nova_instances(): - self._instance_map[server.name] = server.uuid - - def _get_nova_instances(self, expected_attrs=None, instance_uuids=None): - if not expected_attrs: - expected_attrs = ['id', 'uuid', 'name'] - - filters = {'deleted': False} - if instance_uuids is not None: - filters['uuid'] = instance_uuids - - return objects.InstanceList.get_by_filters( - self._context, filters, expected_attrs=expected_attrs) - - def _get_instance_block_device_mappings(self, instance): - """Transform block devices to the driver block_device format.""" - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - self._context, instance.uuid) - return [block_device.DriverVolumeBlockDevice(bdm) for bdm in bdms] - - def _nova_failover_server(self, instance, new_host): - if instance.vm_state == vm_states.ERROR: - # Sometimes during a failover nova can set the instance state - # to error depending on how much time the failover takes. - instance.vm_state = vm_states.ACTIVE - if instance.power_state == power_state.NOSTATE: - instance.power_state = power_state.RUNNING - - instance.host = new_host - instance.node = new_host - instance.save(expected_task_state=[None]) - - def instance_state_change_callback(self, event): - if event.transition == virtevent.EVENT_LIFECYCLE_STARTED: - # In some cases, we may not be able to plug the vifs when the - # instances are failed over (e.g. if the instances end up in - # "failed" state, without actually being registered in Hyper-V, - # being brought back online afterwards) - instance = self._get_instance_by_name(event.name) - nw_info = self._network_api.get_instance_nw_info(self._context, - instance) - self._vmops.plug_vifs(instance, nw_info) diff --git a/compute_hyperv/nova/cluster/driver.py b/compute_hyperv/nova/cluster/driver.py deleted file mode 100644 index 433cacd5..00000000 --- a/compute_hyperv/nova/cluster/driver.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A Hyper-V Cluster Nova Compute driver.""" - -from compute_hyperv.nova.cluster import clusterops -from compute_hyperv.nova.cluster import livemigrationops -from compute_hyperv.nova.cluster import volumeops -from compute_hyperv.nova import driver - - -class HyperVClusterDriver(driver.HyperVDriver): - use_coordination = True - - def __init__(self, virtapi): - super(HyperVClusterDriver, self).__init__(virtapi) - - self._clops = clusterops.ClusterOps() - self._livemigrationops = livemigrationops.ClusterLiveMigrationOps() - self._volumeops = volumeops.ClusterVolumeOps() - - self._clops.start_failover_listener_daemon() - self._clops.reclaim_failovered_instances() - - def _set_event_handler_callbacks(self): - super(HyperVClusterDriver, self)._set_event_handler_callbacks() - - self._event_handler.add_callback( - self._clops.instance_state_change_callback) - - def spawn(self, context, instance, image_meta, injected_files, - admin_password, allocations, network_info=None, - block_device_info=None, power_on=True, accel_info=None): - super(HyperVClusterDriver, self).spawn( - context, instance, image_meta, injected_files, admin_password, - allocations, network_info, block_device_info, power_on) - self._clops.add_to_cluster(instance) - - def destroy(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, destroy_secrets=True): - self._clops.remove_from_cluster(instance) - super(HyperVClusterDriver, self).destroy( - context, instance, network_info, block_device_info, - destroy_disks) - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, network_info, - block_device_info=None, - timeout=0, retry_interval=0): - self._clops.remove_from_cluster(instance) - return super(HyperVClusterDriver, self).migrate_disk_and_power_off( - context, instance, dest, flavor, network_info, - block_device_info, timeout, retry_interval) - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - allocations, block_device_info=None, power_on=True): - super(HyperVClusterDriver, self).finish_migration( - context, migration, instance, disk_info, network_info, - image_meta, resize_instance, allocations, - block_device_info, power_on) - self._clops.add_to_cluster(instance) - - def finish_revert_migration(self, context, instance, network_info, - migration, block_device_info=None, - power_on=True): - super(HyperVClusterDriver, self).finish_revert_migration( - context, instance, network_info, migration, - block_device_info, power_on) - self._clops.add_to_cluster(instance) - - def rollback_live_migration_at_destination(self, context, instance, - network_info, - block_device_info, - destroy_disks=True, - migrate_data=None): - if self._livemigrationops.is_instance_clustered(instance.name): - self.unplug_vifs(instance, network_info) - else: - super(HyperVClusterDriver, - self).rollback_live_migration_at_destination( - context, instance, network_info, block_device_info, - destroy_disks, migrate_data) - - def post_live_migration_at_destination(self, context, instance, - network_info, - block_migration=False, - block_device_info=None): - self._clops.post_migration(instance) - super(HyperVClusterDriver, self).post_live_migration_at_destination( - context, instance, network_info, - block_migration, block_device_info) diff --git a/compute_hyperv/nova/cluster/livemigrationops.py b/compute_hyperv/nova/cluster/livemigrationops.py deleted file mode 100644 index a4eb6485..00000000 --- a/compute_hyperv/nova/cluster/livemigrationops.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Management class for cluster live migration VM operations.""" - -from nova.compute import vm_states -from nova import exception -from os_win import constants as os_win_const -from os_win import utilsfactory -from oslo_log import log as logging - -from compute_hyperv.i18n import _ -import compute_hyperv.nova.conf -from compute_hyperv.nova import livemigrationops - -CONF = compute_hyperv.nova.conf.CONF -LOG = logging.getLogger(__name__) - - -class ClusterLiveMigrationOps(livemigrationops.LiveMigrationOps): - def __init__(self): - super(ClusterLiveMigrationOps, self).__init__() - self._clustutils = utilsfactory.get_clusterutils() - - def is_instance_clustered(self, instance_name): - return self._clustutils.vm_exists(instance_name) - - def live_migration(self, context, instance_ref, dest, post_method, - recover_method, block_migration=False, - migrate_data=None): - LOG.debug("live_migration called.", instance=instance_ref) - instance_name = instance_ref.name - clustered = self.is_instance_clustered(instance_name) - node_names = [node.upper() for node in - self._clustutils.get_cluster_node_names()] - - if dest.upper() not in node_names or not clustered: - # destination is not in same cluster or instance not clustered. - # do a normal live migration. - if clustered: - # remove VM from cluster before proceding to a normal live - # migration. - self._clustutils.delete(instance_name) - super(ClusterLiveMigrationOps, self).live_migration( - context, instance_ref, dest, post_method, recover_method, - block_migration, migrate_data) - return - elif self._clustutils.get_vm_host( - instance_name).upper() == dest.upper(): - # VM is already migrated. Do nothing. - # this can happen when the VM has been failovered. - return - - # destination is in the same cluster. - # perform a clustered live migration. - try: - self._clustutils.live_migrate_vm( - instance_name, - dest, - CONF.hyperv.instance_live_migration_timeout) - except Exception: - LOG.exception("Live migration failed. Attempting rollback.", - instance=instance_ref) - # The recover method will update the migration state. - # We won't error out if we manage to recover the instance, - # which would otherwise end up in error state. - self._check_failed_instance_migration( - instance_ref, - expected_state=os_win_const.CLUSTER_GROUP_ONLINE) - - recover_method(context, instance_ref, dest, migrate_data) - return - - LOG.debug("Calling live migration post_method for instance.", - instance=instance_ref) - post_method(context, instance_ref, dest, - block_migration, migrate_data) - - def _check_failed_instance_migration(self, instance, expected_state): - # After a failed migration, we expect the instance to be on the - # source node, having its initial state and not have any queued - # migrations. Otherwise, we treat it as a critical error and set - # it to 'error' state to avoid inconsistencies. - state_info = self._clustutils.get_cluster_group_state_info( - instance.name) - node_name = self._clustutils.get_node_name() - - if (state_info['owner_node'].lower() != node_name.lower() or - state_info['state'] != expected_state or - state_info['migration_queued']): - instance.vm_state = vm_states.ERROR - instance.save() - - raise exception.InstanceInvalidState( - _("Instance %(instance_name)s reached an inconsistent state " - "after a failed migration attempt. Setting the instance to " - "'error' state. Instance state info: %(state_info)s.") % - dict(instance_name=instance.name, - state_info=state_info)) - - def pre_live_migration(self, context, instance, block_device_info, - network_info): - if self.is_instance_clustered(instance.name): - self._volumeops.connect_volumes(block_device_info) - else: - super(ClusterLiveMigrationOps, self).pre_live_migration( - context, instance, block_device_info, network_info) - - def post_live_migration(self, context, instance, block_device_info, - migrate_data): - if not self.is_instance_clustered(instance.name): - super(ClusterLiveMigrationOps, self).post_live_migration( - context, instance, block_device_info, migrate_data) diff --git a/compute_hyperv/nova/cluster/volumeops.py b/compute_hyperv/nova/cluster/volumeops.py deleted file mode 100644 index 31d532b2..00000000 --- a/compute_hyperv/nova/cluster/volumeops.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import exception -from oslo_log import log as logging - -from compute_hyperv.nova import constants -from compute_hyperv.nova import volumeops - -LOG = logging.getLogger(__name__) - - -class ClusterVolumeOps(volumeops.VolumeOps): - def _load_volume_drivers(self): - self.volume_drivers = { - constants.STORAGE_PROTOCOL_SMBFS: volumeops.SMBFSVolumeDriver() - } - - def _get_volume_driver(self, connection_info): - driver_type = connection_info.get('driver_volume_type') - if driver_type in [constants.STORAGE_PROTOCOL_ISCSI, - constants.STORAGE_PROTOCOL_FC, - constants.STORAGE_PROTOCOL_RBD]: - err_msg = ( - "The Hyper-V Cluster driver does not currently support " - "passthrough disks (e.g. iSCSI/FC/RBD disks). The reason is " - "that the volumes need to be available on the destination " - "host side during an unexpected instance failover. In order " - "to leverage your storage backend, you may either use the " - "*standard* Nova Hyper-V driver or use the Cinder SMB volume " - "driver (which may imply deploying CSVs on top of LUNs " - "exposed by your storage backend).") - LOG.error(err_msg) - raise exception.VolumeDriverNotFound(driver_type=driver_type) - - return super(ClusterVolumeOps, self)._get_volume_driver( - connection_info) diff --git a/compute_hyperv/nova/conf.py b/compute_hyperv/nova/conf.py deleted file mode 100644 index 1fe9c762..00000000 --- a/compute_hyperv/nova/conf.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2017 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -import nova.conf - -hyperv_opts = [ - cfg.IntOpt('evacuate_task_state_timeout', - default=600, - help='Number of seconds to wait for an instance to be ' - 'evacuated during host maintenance.'), - cfg.IntOpt('cluster_event_check_interval', - deprecated_for_removal=True, - deprecated_since="5.0.1", - default=2), - cfg.BoolOpt('instance_automatic_shutdown', - default=False, - help='Automatically shutdown instances when the host is ' - 'shutdown. By default, instances will be saved, which ' - 'adds a disk overhead. Changing this option will not ' - 'affect existing instances.'), - cfg.IntOpt('instance_live_migration_timeout', - default=300, - min=0, - help='Number of seconds to wait for an instance to be ' - 'live migrated (Only applies to clustered instances ' - 'for the moment).'), - cfg.IntOpt('max_failover_count', - default=1, - min=1, - help="The maximum number of failovers that can occur in the " - "failover_period timeframe per VM. Once a VM's number " - "failover reaches this number, the VM will simply end up " - "in a Failed state."), - cfg.IntOpt('failover_period', - default=6, - min=1, - help="The number of hours in which the max_failover_count " - "number of failovers can occur."), - cfg.BoolOpt('recreate_ports_on_failover', - default=True, - help="When enabled, the ports will be recreated for failed " - "over instances. This ensures that we're not left with " - "a stale port."), - cfg.BoolOpt('auto_failback', - default=True, - help="Allow the VM the failback to its original host once it " - "is available."), - cfg.BoolOpt('force_destroy_instances', - default=False, - help="If this option is enabled, instance destroy requests " - "are executed immediately, regardless of instance " - "pending tasks. In some situations, the destroy " - "operation will fail (e.g. due to file locks), " - "requiring subsequent retries."), - cfg.BoolOpt('move_disks_on_cold_migration', - default=True, - help="Move the instance files to the instance dir configured " - "on the destination host. You may consider disabling " - "this when using multiple CSVs or shares and you wish " - "the source location to be preserved."), -] - -coordination_opts = [ - cfg.StrOpt('backend_url', - default='file:///C:/OpenStack/Lock', - help='The backend URL to use for distributed coordination.'), -] - -CONF = nova.conf.CONF -CONF.register_opts(coordination_opts, 'coordination') -CONF.register_opts(hyperv_opts, 'hyperv') - - -def list_opts(): - return [('coordination', coordination_opts), - ('hyperv', hyperv_opts)] diff --git a/compute_hyperv/nova/constants.py b/compute_hyperv/nova/constants.py deleted file mode 100644 index 7673121b..00000000 --- a/compute_hyperv/nova/constants.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Constants used in ops classes -""" - -from nova.compute import power_state -from nova.objects import fields as obj_fields -from os_win import constants -from oslo_utils import units - - -HYPERV_POWER_STATE = { - constants.HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN, - constants.HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN, - constants.HYPERV_VM_STATE_ENABLED: power_state.RUNNING, - constants.HYPERV_VM_STATE_PAUSED: power_state.PAUSED, - constants.HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED -} - -WMI_WIN32_PROCESSOR_ARCHITECTURE = { - constants.ARCH_I686: obj_fields.Architecture.I686, - constants.ARCH_MIPS: obj_fields.Architecture.MIPS, - constants.ARCH_ALPHA: obj_fields.Architecture.ALPHA, - constants.ARCH_PPC: obj_fields.Architecture.PPC, - constants.ARCH_ARMV7: obj_fields.Architecture.ARMV7, - constants.ARCH_IA64: obj_fields.Architecture.IA64, - constants.ARCH_X86_64: obj_fields.Architecture.X86_64, -} - - -CTRL_TYPE_IDE = "IDE" -CTRL_TYPE_SCSI = "SCSI" - -DISK = "VHD" -DISK_FORMAT = DISK -DVD = "DVD" -DVD_FORMAT = "ISO" -VOLUME = "VOLUME" - -DISK_FORMAT_MAP = { - DISK_FORMAT.lower(): DISK, - DVD_FORMAT.lower(): DVD -} - -BDI_DEVICE_TYPE_TO_DRIVE_TYPE = {'disk': DISK, - 'cdrom': DVD} - -DISK_FORMAT_VHD = "VHD" -DISK_FORMAT_VHDX = "VHDX" - -HOST_POWER_ACTION_SHUTDOWN = "shutdown" -HOST_POWER_ACTION_REBOOT = "reboot" -HOST_POWER_ACTION_STARTUP = "startup" - -IMAGE_PROP_VM_GEN = "hw_machine_type" -FLAVOR_SPEC_SECURE_BOOT = "os:secure_boot" -IMAGE_PROP_VM_GEN_1 = "hyperv-gen1" -IMAGE_PROP_VM_GEN_2 = "hyperv-gen2" - -VM_GEN_1 = 1 -VM_GEN_2 = 2 - -SERIAL_CONSOLE_BUFFER_SIZE = 4 * units.Ki - -IMAGE_PROP_INTERACTIVE_SERIAL_PORT = "interactive_serial_port" -IMAGE_PROP_LOGGING_SERIAL_PORT = "logging_serial_port" - -SERIAL_PORT_TYPE_RO = 'ro' -SERIAL_PORT_TYPE_RW = 'rw' - -SERIAL_PORT_TYPES = { - IMAGE_PROP_LOGGING_SERIAL_PORT: SERIAL_PORT_TYPE_RO, - IMAGE_PROP_INTERACTIVE_SERIAL_PORT: SERIAL_PORT_TYPE_RW -} - -# The default serial console port number used for -# logging and interactive sessions. -DEFAULT_SERIAL_CONSOLE_PORT = 1 - -FLAVOR_ESPEC_REMOTEFX_RES = 'os:resolution' -FLAVOR_ESPEC_REMOTEFX_MONITORS = 'os:monitors' -FLAVOR_ESPEC_REMOTEFX_VRAM = 'os:vram' - -IOPS_BASE_SIZE = 8 * units.Ki - -STORAGE_PROTOCOL_ISCSI = 'iscsi' -STORAGE_PROTOCOL_FC = 'fibre_channel' -STORAGE_PROTOCOL_SMBFS = 'smbfs' -STORAGE_PROTOCOL_RBD = 'rbd' - -MAX_CONSOLE_LOG_FILE_SIZE = units.Mi // 2 - -IMAGE_PROP_SECURE_BOOT = "os_secure_boot" -REQUIRED = "required" -DISABLED = "disabled" -OPTIONAL = "optional" - -IMAGE_PROP_VTPM = "os_vtpm" -IMAGE_PROP_VTPM_SHIELDED = "os_shielded_vm" - -# We have to make sure that such locks are not used outside the driver in -# order to avoid deadlocks. For this reason, we'll use the 'hv-' scope. -SNAPSHOT_LOCK_TEMPLATE = "%(instance_uuid)s-hv-snapshot" diff --git a/compute_hyperv/nova/coordination.py b/compute_hyperv/nova/coordination.py deleted file mode 100644 index 591220ea..00000000 --- a/compute_hyperv/nova/coordination.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2015 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Coordination and locking utilities.""" - -import inspect -import uuid - -import decorator -from nova import exception -from nova import utils -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -from tooz import coordination - -from compute_hyperv.i18n import _ - -LOG = log.getLogger(__name__) - -CONF = cfg.CONF - - -class Coordinator(object): - """Tooz coordination wrapper. - - Coordination member id is created from concatenated - `prefix` and `agent_id` parameters. - - :param str agent_id: Agent identifier - :param str prefix: Used to provide member identifier with a - meaningful prefix. - """ - - def __init__(self, agent_id=None, prefix=''): - self.coordinator = None - self.agent_id = agent_id or str(uuid.uuid4()) - self.started = False - self.prefix = prefix - - @utils.synchronized(name="coordinator_start") - def start(self): - if self.started: - return - - # NOTE(bluex): Tooz expects member_id as a byte string. - member_id = (self.prefix + self.agent_id).encode('ascii') - self.coordinator = coordination.get_coordinator( - cfg.CONF.coordination.backend_url, member_id) - self.coordinator.start(start_heart=True) - self.started = True - - def stop(self): - """Disconnect from coordination backend and stop heartbeat.""" - if self.started: - self.coordinator.stop() - self.coordinator = None - self.started = False - - def get_lock(self, name): - """Return a Tooz backend lock. - - :param str name: The lock name that is used to identify it - across all nodes. - """ - # NOTE(bluex): Tooz expects lock name as a byte string. - lock_name = (self.prefix + name).encode('ascii') - if self.coordinator is not None: - return self.coordinator.get_lock(lock_name) - else: - raise exception.NovaException( - _('Could not create lock. Coordinator uninitialized.')) - - -COORDINATOR = Coordinator(prefix='compute-hyperv-') - - -def synchronized(lock_name, blocking=True, coordinator=COORDINATOR): - """Synchronization decorator. - - :param str lock_name: Lock name. - :param blocking: If True, blocks until the lock is acquired. - If False, raises exception when not acquired. Otherwise, - the value is used as a timeout value and if lock is not acquired - after this number of seconds exception is raised. - :param coordinator: Coordinator class to use when creating lock. - Defaults to the global coordinator. - :raises tooz.coordination.LockAcquireFailed: if lock is not acquired - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one process will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - - Lock name can be formatted using Python format string syntax:: - - @synchronized('{f_name}-{vol.id}-{snap[name]}') - def foo(self, vol, snap): - ... - - Available field names are: decorated function parameters and - `f_name` as a decorated function name. - """ - - @decorator.decorator - def _synchronized(f, *a, **k): - call_args = inspect.getcallargs(f, *a, **k) - call_args['f_name'] = f.__name__ - lock = coordinator.get_lock(lock_name.format(**call_args)) - t1 = timeutils.now() - t2 = None - try: - with lock(blocking): - t2 = timeutils.now() - LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: ' - 'waited %(wait_secs)0.3fs', - {'name': lock.name, - 'function': f.__name__, - 'wait_secs': (t2 - t1)}) - return f(*a, **k) - finally: - t3 = timeutils.now() - if t2 is None: - held_secs = "N/A" - else: - held_secs = "%0.3fs" % (t3 - t2) - LOG.debug('Lock "%(name)s" released by "%(function)s" :: held ' - '%(held_secs)s', - {'name': lock.name, - 'function': f.__name__, - 'held_secs': held_secs}) - - return _synchronized diff --git a/compute_hyperv/nova/driver.py b/compute_hyperv/nova/driver.py deleted file mode 100644 index 1dbf386e..00000000 --- a/compute_hyperv/nova/driver.py +++ /dev/null @@ -1,479 +0,0 @@ -# Copyright (c) 2010 Cloud.com, Inc -# Copyright (c) 2012 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A Hyper-V Nova Compute driver. -""" - -import functools -import platform -import sys - -from nova import context as nova_context -from nova import exception -from nova.image import glance -from nova import objects -from nova.virt import driver -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_log import log as logging -import six - -from compute_hyperv.nova import coordination -from compute_hyperv.nova import eventhandler -from compute_hyperv.nova import hostops -from compute_hyperv.nova import imagecache -from compute_hyperv.nova import livemigrationops -from compute_hyperv.nova import migrationops -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import rdpconsoleops -from compute_hyperv.nova import serialconsoleops -from compute_hyperv.nova import snapshotops -from compute_hyperv.nova import vmops -from compute_hyperv.nova import volumeops - - -LOG = logging.getLogger(__name__) - - -def convert_exceptions(function, exception_map): - expected_exceptions = tuple(exception_map.keys()) - - @functools.wraps(function) - def wrapper(*args, **kwargs): - try: - return function(*args, **kwargs) - except expected_exceptions as ex: - raised_exception = exception_map.get(type(ex)) - if not raised_exception: - # exception might be a subclass of an expected exception. - for expected in expected_exceptions: - if isinstance(ex, expected): - raised_exception = exception_map[expected] - break - - exc_info = sys.exc_info() - # NOTE(claudiub): Python 3 raises the exception object given as - # the second argument in six.reraise. - # The original message will be maintained by passing the original - # exception. - exc = raised_exception(six.text_type(exc_info[1])) - six.reraise(raised_exception, exc, exc_info[2]) - return wrapper - - -def decorate_all_methods(decorator, *args, **kwargs): - def decorate(cls): - for attr in cls.__dict__: - class_member = getattr(cls, attr) - if callable(class_member): - setattr(cls, attr, decorator(class_member, *args, **kwargs)) - return cls - - return decorate - - -exception_conversion_map = { - # expected_exception: converted_exception - os_win_exc.OSWinException: exception.NovaException, - os_win_exc.HyperVVMNotFoundException: exception.InstanceNotFound, -} - -# NOTE(claudiub): the purpose of the decorator below is to prevent any -# os_win exceptions (subclasses of OSWinException) to leak outside of the -# HyperVDriver. - - -@decorate_all_methods(convert_exceptions, exception_conversion_map) -class HyperVDriver(driver.ComputeDriver): - capabilities = { - "has_imagecache": True, - "supports_evacuate": True, - "supports_migrate_to_same_host": False, - "supports_attach_interface": True, - "supports_device_tagging": True, - "supports_tagged_attach_interface": True, - "supports_tagged_attach_volume": True, - "supports_extend_volume": True, - "supports_multiattach": False, - "supports_trusted_certs": True, - - # Supported image types - "supports_image_type_vhd": True, - "supports_image_type_vhdx": True, - } - - use_coordination = False - - def __init__(self, virtapi): - # check if the current version of Windows is supported before any - # further driver initialisation. - self._check_minimum_windows_version() - - # We'll initialize coordination as early as possible, avoiding - # the risk of using locks before the mechanism is enabled. - if self.use_coordination: - coordination.COORDINATOR.start() - - super(HyperVDriver, self).__init__(virtapi) - - self._hostops = hostops.HostOps() - self._volumeops = volumeops.VolumeOps() - self._vmops = vmops.VMOps(virtapi) - self._snapshotops = snapshotops.SnapshotOps() - self._livemigrationops = livemigrationops.LiveMigrationOps() - self._migrationops = migrationops.MigrationOps() - self._rdpconsoleops = rdpconsoleops.RDPConsoleOps() - self._serialconsoleops = serialconsoleops.SerialConsoleOps() - self._imagecache = imagecache.ImageCache() - self._image_api = glance.API() - self._pathutils = pathutils.PathUtils() - self._event_handler = eventhandler.InstanceEventHandler() - - def _check_minimum_windows_version(self): - hostutils = utilsfactory.get_hostutils() - if not hostutils.check_min_windows_version(6, 2): - # the version is of Windows is older than Windows Server 2012 R2. - # Log an error, letting users know that this version is not - # supported any longer. - LOG.error('You are running nova-compute on an unsupported ' - 'version of Windows (older than Windows / Hyper-V ' - 'Server 2012). The support for this version of ' - 'Windows has been removed in Mitaka.') - raise exception.HypervisorTooOld(version='6.2') - elif not hostutils.check_min_windows_version(6, 3): - # TODO(claudiub): replace the warning with an exception in Rocky. - LOG.warning('You are running nova-compute on Windows / Hyper-V ' - 'Server 2012. The support for this version of Windows ' - 'has been deprecated In Queens, and will be removed ' - 'in Rocky.') - - def init_host(self, host): - self._serialconsoleops.start_console_handlers() - - self._set_event_handler_callbacks() - self._event_handler.start_listener() - - instance_path = self._pathutils.get_instances_dir() - self._pathutils.check_create_dir(instance_path) - - def _set_event_handler_callbacks(self): - # Subclasses may override this. - self._event_handler.add_callback(self.emit_event) - self._event_handler.add_callback( - self._vmops.instance_state_change_callback) - - def list_instance_uuids(self): - return self._vmops.list_instance_uuids() - - def list_instances(self): - return self._vmops.list_instances() - - def spawn(self, context, instance, image_meta, injected_files, - admin_password, allocations, network_info=None, - block_device_info=None, power_on=True, accel_info=None): - image_meta = self._recreate_image_meta(context, instance, image_meta) - self._vmops.spawn(context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info, - power_on) - - def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None, - accel_info=None): - self._vmops.reboot(instance, network_info, reboot_type) - - def destroy(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, destroy_secrets=True): - self._vmops.destroy(instance, network_info, block_device_info, - destroy_disks) - - def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None, destroy_vifs=True, - destroy_secrets=True): - """Cleanup after instance being destroyed by Hypervisor.""" - self.unplug_vifs(instance, network_info) - - def get_info(self, instance, use_cache=True): - return self._vmops.get_info(instance) - - def attach_volume(self, context, connection_info, instance, mountpoint, - disk_bus=None, device_type=None, encryption=None): - self._volumeops.attach_volume(context, - connection_info, - instance, - update_device_metadata=True) - - def detach_volume(self, context, connection_info, instance, mountpoint, - encryption=None): - context = nova_context.get_admin_context() - # The nova compute manager only updates the device metadata in - # case of tagged devices. We're including untagged devices as well. - self._volumeops.detach_volume(context, - connection_info, - instance, - update_device_metadata=True) - - def extend_volume(self, connection_info, instance, requested_size): - self._volumeops.extend_volume(connection_info) - - def get_volume_connector(self, instance): - return self._volumeops.get_volume_connector() - - def get_available_resource(self, nodename): - return self._hostops.get_available_resource() - - def get_available_nodes(self, refresh=False): - return [platform.node()] - - def host_power_action(self, action): - return self._hostops.host_power_action(action) - - def snapshot(self, context, instance, image_id, update_task_state): - self._snapshotops.snapshot(context, instance, image_id, - update_task_state) - - def volume_snapshot_create(self, context, instance, volume_id, - create_info): - self._volumeops.volume_snapshot_create(context, instance, volume_id, - create_info) - - def volume_snapshot_delete(self, context, instance, volume_id, - snapshot_id, delete_info): - self._volumeops.volume_snapshot_delete(context, instance, volume_id, - snapshot_id, delete_info) - - def pause(self, instance): - self._vmops.pause(instance) - - def unpause(self, instance): - self._vmops.unpause(instance) - - def suspend(self, context, instance): - self._vmops.suspend(instance) - - def resume(self, context, instance, network_info, block_device_info=None): - self._vmops.resume(instance) - - def power_off(self, instance, timeout=0, retry_interval=0): - self._vmops.power_off(instance, timeout, retry_interval) - - def power_on(self, context, instance, network_info, - block_device_info=None, accel_info=None): - self._vmops.power_on(instance, block_device_info, network_info) - - def resume_state_on_host_boot(self, context, instance, network_info, - block_device_info=None): - """Resume guest state when a host is booted.""" - self._vmops.resume_state_on_host_boot(context, instance, network_info, - block_device_info) - - def live_migration(self, context, instance, dest, post_method, - recover_method, block_migration=False, - migrate_data=None): - self._livemigrationops.live_migration(context, instance, dest, - post_method, recover_method, - block_migration, migrate_data) - - def rollback_live_migration_at_destination(self, context, instance, - network_info, - block_device_info, - destroy_disks=True, - migrate_data=None): - self.destroy(context, instance, network_info, block_device_info, - destroy_disks=destroy_disks) - - def pre_live_migration(self, context, instance, block_device_info, - network_info, disk_info, migrate_data): - self._livemigrationops.pre_live_migration(context, instance, - block_device_info, - network_info) - return migrate_data - - def post_live_migration(self, context, instance, block_device_info, - migrate_data=None): - self._livemigrationops.post_live_migration(context, instance, - block_device_info, - migrate_data) - - def post_live_migration_at_source(self, context, instance, network_info): - """Unplug VIFs from networks at source.""" - self._vmops.unplug_vifs(instance, network_info) - - def post_live_migration_at_destination(self, context, instance, - network_info, - block_migration=False, - block_device_info=None): - self._livemigrationops.post_live_migration_at_destination( - context, - instance, - network_info, - block_migration) - - def check_can_live_migrate_destination(self, context, instance, - src_compute_info, dst_compute_info, - block_migration=False, - disk_over_commit=False): - return self._livemigrationops.check_can_live_migrate_destination( - context, instance, src_compute_info, dst_compute_info, - block_migration, disk_over_commit) - - def cleanup_live_migration_destination_check(self, context, - dest_check_data): - self._livemigrationops.cleanup_live_migration_destination_check( - context, dest_check_data) - - def check_can_live_migrate_source(self, context, instance, - dest_check_data, block_device_info=None): - return self._livemigrationops.check_can_live_migrate_source( - context, instance, dest_check_data) - - def get_instance_disk_info(self, instance, block_device_info=None): - pass - - def plug_vifs(self, instance, network_info): - """Plug VIFs into networks.""" - self._vmops.plug_vifs(instance, network_info) - - def unplug_vifs(self, instance, network_info): - """Unplug VIFs from networks.""" - self._vmops.unplug_vifs(instance, network_info) - - def ensure_filtering_rules_for_instance(self, instance, network_info): - LOG.debug("ensure_filtering_rules_for_instance called", - instance=instance) - - def unfilter_instance(self, instance, network_info): - LOG.debug("unfilter_instance called", instance=instance) - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, network_info, - block_device_info=None, - timeout=0, retry_interval=0): - return self._migrationops.migrate_disk_and_power_off(context, - instance, dest, - flavor, - network_info, - block_device_info, - timeout, - retry_interval) - - def confirm_migration(self, context, migration, instance, network_info): - self._migrationops.confirm_migration(context, migration, - instance, network_info) - - def finish_revert_migration(self, context, instance, network_info, - migration, block_device_info=None, - power_on=True): - self._migrationops.finish_revert_migration(context, instance, - network_info, - block_device_info, power_on) - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - allocations, block_device_info=None, power_on=True): - image_meta = self._recreate_image_meta(context, instance, image_meta) - self._migrationops.finish_migration(context, migration, instance, - disk_info, network_info, - image_meta, resize_instance, - block_device_info, power_on) - - def get_host_ip_addr(self): - return self._hostops.get_host_ip_addr() - - def get_host_uptime(self): - return self._hostops.get_host_uptime() - - def get_rdp_console(self, context, instance): - return self._rdpconsoleops.get_rdp_console(instance) - - def get_serial_console(self, context, instance): - return self._serialconsoleops.get_serial_console(instance.name) - - def get_console_output(self, context, instance): - return self._serialconsoleops.get_console_output(instance.name) - - def manage_image_cache(self, context, all_instances): - self._imagecache.update(context, all_instances) - - def cache_image(self, context, image_id): - image_path, fetched = self._imagecache.cache_image( - context, image_id) - return fetched - - def attach_interface(self, context, instance, image_meta, vif): - self._vmops.attach_interface(context, instance, vif) - - def detach_interface(self, context, instance, vif): - # The device metadata gets updated outside the driver. - return self._vmops.detach_interface(instance, vif) - - def rescue(self, context, instance, network_info, image_meta, - rescue_password, block_device_info): - image_meta = self._recreate_image_meta(context, instance, image_meta) - self._vmops.rescue_instance(context, instance, network_info, - image_meta, rescue_password) - - def unrescue( - self, - context: nova_context.RequestContext, - instance: 'objects.Instance', - ): - self._vmops.unrescue_instance(instance) - - def host_maintenance_mode(self, host, mode): - return self._hostops.host_maintenance_mode(host, mode) - - def _recreate_image_meta(self, context, instance, image_meta): - # TODO(claudiub): Cleanup this method. instance.system_metadata might - # already contain all the image metadata properties we need anyways. - if image_meta.obj_attr_is_set("id"): - image_ref = image_meta.id - else: - image_ref = instance.system_metadata['image_base_image_ref'] - - if image_ref: - image_meta = self._image_api.get(context, image_ref) - else: - # boot from volume does not have an image_ref. - image_meta = image_meta.obj_to_primitive()['nova_object.data'] - image_meta['properties'] = {k.replace('image_', '', 1): v for k, v - in instance.system_metadata.items()} - image_meta["id"] = image_ref - return image_meta - - def check_instance_shared_storage_local(self, context, instance): - """Check if instance files located on shared storage. - - This runs check on the destination host, and then calls - back to the source host to check the results. - - :param context: security context - :param instance: nova.objects.instance.Instance object - :returns: A dict containing the tempfile info. - """ - return self._pathutils.check_instance_shared_storage_local(instance) - - def check_instance_shared_storage_remote(self, context, data): - return self._pathutils.check_instance_shared_storage_remote(data) - - def check_instance_shared_storage_cleanup(self, context, data): - return self._pathutils.check_instance_shared_storage_cleanup(data) - - def update_provider_tree(self, provider_tree, nodename, allocations=None): - inventory = provider_tree.data(nodename).inventory - alloc_ratios = self._get_allocation_ratios(inventory) - - self._hostops.update_provider_tree( - provider_tree, nodename, alloc_ratios, allocations) diff --git a/compute_hyperv/nova/eventhandler.py b/compute_hyperv/nova/eventhandler.py deleted file mode 100644 index c0fe4d79..00000000 --- a/compute_hyperv/nova/eventhandler.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import utils -from nova.virt import event as virtevent -from os_win import constants -from os_win import utilsfactory -from oslo_log import log as logging - -import compute_hyperv.nova.conf -from compute_hyperv.nova import vmops - -LOG = logging.getLogger(__name__) - -CONF = compute_hyperv.nova.conf.CONF - - -class HyperVLifecycleEvent(virtevent.LifecycleEvent): - def __init__(self, uuid, name, transition, timestamp=None): - super(HyperVLifecycleEvent, self).__init__(uuid, transition, timestamp) - - self.name = name - - -class InstanceEventHandler(object): - _TRANSITION_MAP = { - constants.HYPERV_VM_STATE_ENABLED: virtevent.EVENT_LIFECYCLE_STARTED, - constants.HYPERV_VM_STATE_DISABLED: virtevent.EVENT_LIFECYCLE_STOPPED, - constants.HYPERV_VM_STATE_PAUSED: virtevent.EVENT_LIFECYCLE_PAUSED, - constants.HYPERV_VM_STATE_SUSPENDED: - virtevent.EVENT_LIFECYCLE_SUSPENDED - } - - def __init__(self): - self._vmutils = utilsfactory.get_vmutils() - self._listener = self._vmutils.get_vm_power_state_change_listener( - timeframe=CONF.hyperv.power_state_check_timeframe, - event_timeout=CONF.hyperv.power_state_event_polling_interval, - filtered_states=list(self._TRANSITION_MAP.keys()), - get_handler=True) - - self._vmops = vmops.VMOps() - - self._callbacks = [] - - def add_callback(self, callback): - self._callbacks.append(callback) - - def start_listener(self): - utils.spawn_n(self._listener, self._handle_event) - - def _handle_event(self, instance_name, instance_power_state): - # Instance uuid set by Nova. If this is missing, we assume that - # the instance was not created by Nova and ignore the event. - instance_uuid = self._vmops.get_instance_uuid(instance_name) - if instance_uuid: - self._emit_event(instance_name, - instance_uuid, - instance_power_state) - else: - LOG.debug("Instance uuid could not be retrieved for instance " - "%(instance_name)s. Instance state change event will " - "be ignored. Current power state: %(power_state)s.", - dict(instance_name=instance_name, - power_state=instance_power_state)) - - def _emit_event(self, instance_name, instance_uuid, instance_state): - virt_event = self._get_virt_event(instance_uuid, - instance_name, - instance_state) - - for callback in self._callbacks: - utils.spawn_n(callback, virt_event) - - def _get_virt_event(self, instance_uuid, instance_name, instance_state): - transition = self._TRANSITION_MAP[instance_state] - return HyperVLifecycleEvent( - uuid=instance_uuid, - name=instance_name, - transition=transition) diff --git a/compute_hyperv/nova/hostops.py b/compute_hyperv/nova/hostops.py deleted file mode 100644 index c3a479c5..00000000 --- a/compute_hyperv/nova/hostops.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for host operations. -""" -import datetime -import platform -import time - -from nova.compute import api -from nova.compute import utils as compute_utils -from nova.compute import vm_states -from nova import context -from nova import exception -from nova import objects -from nova.objects import fields as obj_fields -import os_resource_classes as orc -from os_win import constants as os_win_const -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import units - -from compute_hyperv.i18n import _ -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import vmops - -CONF = compute_hyperv.nova.conf.CONF -LOG = logging.getLogger(__name__) - - -class HostOps(object): - def __init__(self): - self._diskutils = utilsfactory.get_diskutils() - self._hostutils = utilsfactory.get_hostutils() - self._pathutils = pathutils.PathUtils() - self._vmutils = utilsfactory.get_vmutils() - self._vmops = vmops.VMOps() - self._api = api.API() - - def _get_cpu_info(self): - """Get the CPU information. - :returns: A dictionary containing the main properties - of the central processor in the hypervisor. - """ - cpu_info = dict() - - processors = self._hostutils.get_cpus_info() - - w32_arch_dict = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE - cpu_info['arch'] = w32_arch_dict.get(processors[0]['Architecture'], - 'Unknown') - cpu_info['model'] = processors[0]['Name'] - cpu_info['vendor'] = processors[0]['Manufacturer'] - - topology = dict() - topology['sockets'] = len(processors) - topology['cores'] = processors[0]['NumberOfCores'] - topology['threads'] = (processors[0]['NumberOfLogicalProcessors'] // - processors[0]['NumberOfCores']) - cpu_info['topology'] = topology - - features = list() - for fkey, fname in os_win_const.PROCESSOR_FEATURE.items(): - if self._hostutils.is_cpu_feature_present(fkey): - features.append(fname) - cpu_info['features'] = features - - return cpu_info - - def _get_memory_info(self): - (total_mem_kb, free_mem_kb) = self._hostutils.get_memory_info() - total_mem_mb = total_mem_kb // 1024 - free_mem_mb = free_mem_kb // 1024 - return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb) - - def _get_storage_info_gb(self): - instances_dir = self._pathutils.get_instances_dir() - (size, free_space) = self._diskutils.get_disk_capacity( - instances_dir) - - total_gb = size // units.Gi - free_gb = free_space // units.Gi - used_gb = total_gb - free_gb - return (total_gb, free_gb, used_gb) - - def _get_hypervisor_version(self): - """Get hypervisor version. - :returns: hypervisor version (ex. 6003) - """ - - # NOTE(claudiub): The hypervisor_version will be stored in the database - # as an Integer and it will be used by the scheduler, if required by - # the image property 'hypervisor_version_requires'. - # The hypervisor_version will then be converted back to a version - # by splitting the int in groups of 3 digits. - # E.g.: hypervisor_version 6003 is converted to '6.3'. - version = self._hostutils.get_windows_version().split('.') - version = int(version[0]) * 1000 + int(version[1]) - LOG.debug('Windows version: %s ', version) - return version - - def _get_remotefx_gpu_info(self): - total_video_ram = 0 - available_video_ram = 0 - - if CONF.hyperv.enable_remotefx: - gpus = self._hostutils.get_remotefx_gpu_info() - for gpu in gpus: - total_video_ram += int(gpu['total_video_ram']) - available_video_ram += int(gpu['available_video_ram']) - else: - gpus = [] - - return {'total_video_ram': total_video_ram, - 'used_video_ram': total_video_ram - available_video_ram, - 'gpu_info': jsonutils.dumps(gpus)} - - def _get_host_numa_topology(self): - numa_nodes = self._hostutils.get_numa_nodes() - cells = [] - for numa_node in numa_nodes: - # Hyper-V does not support CPU pinning / mempages. - # initializing the rest of the fields. - numa_node.update(pinned_cpus=set(), mempages=[], siblings=[]) - cell = objects.NUMACell(**numa_node) - cells.append(cell) - - return objects.NUMATopology(cells=cells) - - @staticmethod - def get_hostname(): - return platform.node() - - def get_available_resource(self): - """Retrieve resource info. - - This method is called when nova-compute launches, and - as part of a periodic task. - - :returns: dictionary describing resources - - """ - LOG.debug('get_available_resource called') - - (total_mem_mb, - free_mem_mb, - used_mem_mb) = self._get_memory_info() - - (total_hdd_gb, - free_hdd_gb, - used_hdd_gb) = self._get_storage_info_gb() - - cpu_info = self._get_cpu_info() - cpu_topology = cpu_info['topology'] - vcpus = (cpu_topology['sockets'] * - cpu_topology['cores'] * - cpu_topology['threads']) - - # NOTE(claudiub): free_hdd_gb only refers to the currently free - # physical storage, it doesn't take into consideration the virtual - # sizes of the VMs' dynamic disks. This means that the VMs' disks can - # expand beyond the free_hdd_gb's value, and instances will still be - # scheduled to this compute node. - dic = {'vcpus': vcpus, - 'memory_mb': total_mem_mb, - 'memory_mb_used': used_mem_mb, - 'local_gb': total_hdd_gb, - 'local_gb_used': used_hdd_gb, - 'disk_available_least': free_hdd_gb, - 'hypervisor_type': "hyperv", - 'hypervisor_version': self._get_hypervisor_version(), - 'hypervisor_hostname': self.get_hostname(), - 'vcpus_used': 0, - 'cpu_info': jsonutils.dumps(cpu_info), - 'supported_instances': [ - (obj_fields.Architecture.I686, - obj_fields.HVType.HYPERV, - obj_fields.VMMode.HVM), - (obj_fields.Architecture.X86_64, - obj_fields.HVType.HYPERV, - obj_fields.VMMode.HVM)], - 'numa_topology': self._get_host_numa_topology()._to_json(), - 'pci_passthrough_devices': self._get_pci_passthrough_devices(), - } - - gpu_info = self._get_remotefx_gpu_info() - dic.update(gpu_info) - - return dic - - def _get_pci_passthrough_devices(self): - """Get host PCI devices information. - - Obtains PCI devices information and returns it as a JSON string. - - :returns: a JSON string containing a list of the assignable PCI - devices information. - """ - - pci_devices = self._hostutils.get_pci_passthrough_devices() - - for pci_dev in pci_devices: - # NOTE(claudiub): These fields are required by the PCI tracker. - dev_label = 'label_%(vendor_id)s_%(product_id)s' % { - 'vendor_id': pci_dev['vendor_id'], - 'product_id': pci_dev['product_id']} - - # TODO(claudiub): Find a way to associate the PCI devices with - # the NUMA nodes they are in. - pci_dev.update(dev_type=obj_fields.PciDeviceType.STANDARD, - label=dev_label, - numa_node=None) - - return jsonutils.dumps(pci_devices) - - def host_power_action(self, action): - """Reboots, shuts down or powers up the host.""" - if action in [constants.HOST_POWER_ACTION_SHUTDOWN, - constants.HOST_POWER_ACTION_REBOOT]: - self._hostutils.host_power_action(action) - else: - if action == constants.HOST_POWER_ACTION_STARTUP: - raise NotImplementedError( - _("Host PowerOn is not supported by the Hyper-V driver")) - - def get_host_ip_addr(self): - host_ip = CONF.my_ip - if not host_ip: - # Return the first available address - host_ip = self._hostutils.get_local_ips()[0] - LOG.debug("Host IP address is: %s", host_ip) - return host_ip - - def get_host_uptime(self): - """Returns the host uptime.""" - - tick_count64 = self._hostutils.get_host_tick_count64() - - # format the string to match libvirt driver uptime - # Libvirt uptime returns a combination of the following - # - current host time - # - time since host is up - # - number of logged in users - # - cpu load - # Since the Windows function GetTickCount64 returns only - # the time since the host is up, returning 0s for cpu load - # and number of logged in users. - # This is done to ensure the format of the returned - # value is same as in libvirt - return "%s up %s, 0 users, load average: 0, 0, 0" % ( - str(time.strftime("%H:%M:%S")), - str(datetime.timedelta(milliseconds=int(tick_count64)))) - - def host_maintenance_mode(self, host, mode): - """Starts/Stops host maintenance. On start, it triggers - guest VMs evacuation. - """ - ctxt = context.get_admin_context() - - if not mode: - self._set_service_state(host=host, binary='nova-compute', - is_disabled=False) - LOG.info('Host is no longer under maintenance.') - return 'off_maintenance' - - self._set_service_state(host=host, binary='nova-compute', - is_disabled=True) - vms_uuids = self._vmops.list_instance_uuids() - for vm_uuid in vms_uuids: - self._wait_for_instance_pending_task(ctxt, vm_uuid) - - vm_names = self._vmutils.list_instances() - for vm_name in vm_names: - self._migrate_vm(ctxt, vm_name, host) - - vms_uuid_after_migration = self._vmops.list_instance_uuids() - remaining_vms = len(vms_uuid_after_migration) - if remaining_vms == 0: - LOG.info('All vms have been migrated successfully.' - 'Host is down for maintenance') - return 'on_maintenance' - raise exception.MigrationError( - reason=_('Not all vms have been migrated: %s remaining instances.') - % remaining_vms) - - def _set_service_state(self, host, binary, is_disabled): - "Enables/Disables service on host" - - ctxt = context.get_admin_context(read_deleted='no') - service = objects.Service.get_by_args(ctxt, host, binary) - service.disabled = is_disabled - service.save() - - def _migrate_vm(self, ctxt, vm_name, host): - try: - instance_uuid = self._vmutils.get_instance_uuid(vm_name) - if not instance_uuid: - LOG.info('VM "%s" running on this host was not created by ' - 'nova. Skip migrating this vm to a new host.', - vm_name) - return - instance = objects.Instance.get_by_uuid(ctxt, instance_uuid) - if instance.vm_state == vm_states.ACTIVE: - self._api.live_migrate(ctxt, instance, block_migration=False, - disk_over_commit=False, host_name=None) - else: - self._api.resize(ctxt, instance, flavor_id=None, - clean_shutdown=True) - self._wait_for_instance_pending_task(ctxt, instance_uuid) - except Exception as e: - LOG.error('Migrating vm failed with error: %s ', e) - raise exception.MigrationError(reason='Unable to migrate %s.' - % vm_name) - - def _wait_for_instance_pending_task(self, context, vm_uuid): - instance = objects.Instance.get_by_uuid(context, vm_uuid) - task_state_timeout = CONF.hyperv.evacuate_task_state_timeout - while instance.task_state: - LOG.debug("Waiting to evacuate instance %(instance_id)s. Current " - "task state: '%(task_state)s', Time remaining: " - "%(timeout)s.", {'instance_id': instance.id, - 'task_state': instance.task_state, - 'timeout': task_state_timeout}) - time.sleep(1) - instance.refresh() - task_state_timeout -= 1 - if task_state_timeout <= 0: - err = (_("Timeout error. Instance %(instance)s hasn't changed " - "task_state %(task_state)s within %(timeout)s " - "seconds.") % - {'instance': instance.name, - 'task_state': instance.task_state, - 'timeout': CONF.hyperv.evacuate_task_state_timeout}) - raise exception.InternalError(message=err) - - def update_provider_tree(self, provider_tree, nodename, - allocation_ratios, allocations=None): - resources = self.get_available_resource() - - inventory = { - orc.VCPU: { - 'total': resources['vcpus'], - 'min_unit': 1, - 'max_unit': resources['vcpus'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.VCPU], - 'reserved': CONF.reserved_host_cpus, - }, - orc.MEMORY_MB: { - 'total': resources['memory_mb'], - 'min_unit': 1, - 'max_unit': resources['memory_mb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.MEMORY_MB], - 'reserved': CONF.reserved_host_memory_mb, - }, - # TODO(lpetrut): once #1784020 is fixed, we can skip reporting - # shared storage capacity - orc.DISK_GB: { - 'total': resources['local_gb'], - 'min_unit': 1, - 'max_unit': resources['local_gb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.DISK_GB], - 'reserved': compute_utils.convert_mb_to_ceil_gb( - CONF.reserved_host_disk_mb), - }, - } - - provider_tree.update_inventory(nodename, inventory) diff --git a/compute_hyperv/nova/imagecache.py b/compute_hyperv/nova/imagecache.py deleted file mode 100644 index 9b9acd9f..00000000 --- a/compute_hyperv/nova/imagecache.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Image caching and management. -""" -import os -import re - -from nova.compute import utils as compute_utils -from nova import exception -from nova import utils -from nova.virt import imagecache -from nova.virt import images -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -from oslo_utils import uuidutils - -from compute_hyperv.i18n import _ -import compute_hyperv.nova.conf -from compute_hyperv.nova import pathutils - -LOG = logging.getLogger(__name__) - -CONF = compute_hyperv.nova.conf.CONF - - -class ImageCache(imagecache.ImageCacheManager): - def __init__(self): - super(ImageCache, self).__init__() - self._pathutils = pathutils.PathUtils() - self._vhdutils = utilsfactory.get_vhdutils() - self.used_images = [] - self.unexplained_images = [] - self.originals = [] - - def _get_root_vhd_size_gb(self, instance): - if instance.old_flavor: - return instance.old_flavor.root_gb - else: - return instance.flavor.root_gb - - def _resize_and_cache_vhd(self, instance, vhd_path): - vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize'] - - root_vhd_size_gb = self._get_root_vhd_size_gb(instance) - root_vhd_size = root_vhd_size_gb * units.Gi - - root_vhd_internal_size = ( - self._vhdutils.get_internal_vhd_size_by_file_size( - vhd_path, root_vhd_size)) - - if root_vhd_internal_size < vhd_size: - raise exception.FlavorDiskSmallerThanImage( - flavor_size=root_vhd_size, image_size=vhd_size) - if root_vhd_internal_size > vhd_size: - path_parts = os.path.splitext(vhd_path) - resized_vhd_path = '%s_%s%s' % (path_parts[0], - root_vhd_size_gb, - path_parts[1]) - - lock_path = os.path.dirname(resized_vhd_path) - lock_name = "%s-cache.lock" % os.path.basename(resized_vhd_path) - - @utils.synchronized(name=lock_name, external=True, - lock_path=lock_path) - def copy_and_resize_vhd(): - if not self._pathutils.exists(resized_vhd_path): - try: - LOG.debug("Copying VHD %(vhd_path)s to " - "%(resized_vhd_path)s", - {'vhd_path': vhd_path, - 'resized_vhd_path': resized_vhd_path}) - self._pathutils.copyfile(vhd_path, resized_vhd_path) - LOG.debug("Resizing VHD %(resized_vhd_path)s to new " - "size %(root_vhd_size)s", - {'resized_vhd_path': resized_vhd_path, - 'root_vhd_size': root_vhd_size}) - self._vhdutils.resize_vhd(resized_vhd_path, - root_vhd_internal_size, - is_file_max_size=False) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(resized_vhd_path): - self._pathutils.remove(resized_vhd_path) - - copy_and_resize_vhd() - return resized_vhd_path - - def get_cached_image(self, context, instance, rescue_image_id=None): - image_id = rescue_image_id or instance.image_ref - image_type = self.get_image_format(context, image_id, instance) - trusted_certs = instance.trusted_certs - image_path, already_exists = self.cache_image( - context, image_id, image_type, trusted_certs) - - # Note: rescue images are not resized. - is_vhd = image_path.split('.')[-1].lower() == 'vhd' - if (CONF.use_cow_images and is_vhd and not rescue_image_id): - # Resize the base VHD image as it's not possible to resize a - # differencing VHD. This does not apply to VHDX images. - resized_image_path = self._resize_and_cache_vhd(instance, - image_path) - if resized_image_path: - return resized_image_path - - if rescue_image_id: - self._verify_rescue_image(instance, rescue_image_id, image_path) - - return image_path - - def fetch(self, context, image_id, path, trusted_certs=None): - with compute_utils.disk_ops_semaphore: - images.fetch(context, image_id, path, trusted_certs) - - def append_image_format(self, path, image_type, do_rename=True): - if image_type == 'iso': - format_ext = 'iso' - else: - # Historically, the Hyper-V driver allowed VHDX images registered - # as VHD. We'll continue to do so for now. - format_ext = self._vhdutils.get_vhd_format(path) - new_path = path + '.' + format_ext.lower() - - if do_rename: - self._pathutils.rename(path, new_path) - - return new_path - - def get_image_format(self, context, image_id, instance=None): - image_format = None - if instance: - image_format = instance.system_metadata['image_disk_format'] - if not image_format: - image_info = images.get_info(context, image_id) - image_format = image_info['disk_format'] - return image_format - - def cache_image(self, context, image_id, - image_type=None, trusted_certs=None): - if not image_type: - image_type = self.get_image_format(context, image_id) - - base_image_dir = self._pathutils.get_base_vhd_dir() - base_image_path = os.path.join(base_image_dir, image_id) - - lock_name = "%s-cache.lock" % image_id - - @utils.synchronized(name=lock_name, external=True, - lock_path=base_image_dir) - def fetch_image_if_not_existing(): - fetched = False - image_path = None - for format_ext in ['vhd', 'vhdx', 'iso']: - test_path = base_image_path + '.' + format_ext - if self._pathutils.exists(test_path): - image_path = test_path - self._update_image_timestamp(image_id) - break - - if not image_path: - try: - self.fetch(context, image_id, base_image_path, - trusted_certs) - fetched = True - image_path = self.append_image_format( - base_image_path, image_type) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(base_image_path): - self._pathutils.remove(base_image_path) - - return image_path, fetched - - return fetch_image_if_not_existing() - - def _verify_rescue_image(self, instance, rescue_image_id, - rescue_image_path): - rescue_image_info = self._vhdutils.get_vhd_info(rescue_image_path) - rescue_image_size = rescue_image_info['VirtualSize'] - flavor_disk_size = instance.flavor.root_gb * units.Gi - - if rescue_image_size > flavor_disk_size: - err_msg = _('Using a rescue image bigger than the instance ' - 'flavor disk size is not allowed. ' - 'Rescue image size: %(rescue_image_size)s. ' - 'Flavor disk size:%(flavor_disk_size)s.') % dict( - rescue_image_size=rescue_image_size, - flavor_disk_size=flavor_disk_size) - raise exception.ImageUnacceptable(reason=err_msg, - image_id=rescue_image_id) - - def get_image_details(self, context, instance): - image_id = instance.image_ref - return images.get_info(context, image_id) - - def _age_and_verify_cached_images(self, context, all_instances, base_dir): - for img in self.originals: - if img in self.used_images: - # change the timestamp on the image so as to reflect the last - # time it was used - self._update_image_timestamp(img) - elif CONF.image_cache.remove_unused_base_images: - self._remove_if_old_image(img) - - def _update_image_timestamp(self, image): - backing_files = self._get_image_backing_files(image) - for img in backing_files: - os.utime(img, None) - - def _get_image_backing_files(self, image): - base_file = self._pathutils.get_image_path(image) - if not base_file: - # not vhd or vhdx, ignore. - return [] - - backing_files = [base_file] - resize_re = re.compile('%s_[0-9]+$' % image, re.IGNORECASE) - for img in self.unexplained_images: - match = resize_re.match(img) - if match: - backing_files.append(self._pathutils.get_image_path(img)) - - return backing_files - - def _remove_if_old_image(self, image): - backing_files = self._get_image_backing_files(image) - max_age_seconds = ( - CONF.image_cache.remove_unused_original_minimum_age_seconds) - - for img in backing_files: - age_seconds = self._pathutils.get_age_of_file(img) - if age_seconds > max_age_seconds: - LOG.info("Removing old, unused image: %s", img) - self._remove_old_image(img) - - def _remove_old_image(self, image_path): - lock_path = os.path.dirname(image_path) - lock_name = "%s-cache.lock" % os.path.basename(image_path) - - @utils.synchronized(name=lock_name, external=True, - lock_path=lock_path) - def _image_synchronized_remove(): - self._pathutils.remove(image_path) - - _image_synchronized_remove() - - def update(self, context, all_instances): - base_vhd_dir = self._pathutils.get_base_vhd_dir() - - running = self._list_running_instances(context, all_instances) - self.used_images = running['used_images'].keys() - all_files = self._list_base_images(base_vhd_dir) - self.originals = all_files['originals'] - self.unexplained_images = all_files['unexplained_images'] - - self._age_and_verify_cached_images(context, all_instances, - base_vhd_dir) - - def _list_base_images(self, base_dir): - unexplained_images = [] - originals = [] - - for entry in os.listdir(base_dir): - file_name, extension = os.path.splitext(entry) - # extension has a leading '.'. E.g.: '.vhdx' - if extension.lstrip('.').lower() not in ['vhd', 'vhdx']: - # File is not an image. Ignore it. - # imagecache will not store images of any other formats. - continue - - if uuidutils.is_uuid_like(file_name): - originals.append(file_name) - else: - unexplained_images.append(file_name) - - return {'unexplained_images': unexplained_images, - 'originals': originals} diff --git a/compute_hyperv/nova/livemigrationops.py b/compute_hyperv/nova/livemigrationops.py deleted file mode 100644 index 7f74ef55..00000000 --- a/compute_hyperv/nova/livemigrationops.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for live migration VM operations. -""" - -from nova import exception -from nova.objects import migrate_data as migrate_data_obj -from os_win import utilsfactory -from oslo_log import log as logging - -from compute_hyperv.i18n import _ -from compute_hyperv.nova import block_device_manager -import compute_hyperv.nova.conf -from compute_hyperv.nova import imagecache -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import serialconsoleops -from compute_hyperv.nova import vmops -from compute_hyperv.nova import volumeops - -LOG = logging.getLogger(__name__) -CONF = compute_hyperv.nova.conf.CONF - - -class LiveMigrationOps(object): - def __init__(self): - self._livemigrutils = utilsfactory.get_livemigrationutils() - self._pathutils = pathutils.PathUtils() - self._vmops = vmops.VMOps() - self._volumeops = volumeops.VolumeOps() - self._serial_console_ops = serialconsoleops.SerialConsoleOps() - self._imagecache = imagecache.ImageCache() - self._block_dev_man = block_device_manager.BlockDeviceInfoManager() - - def live_migration(self, context, instance_ref, dest, post_method, - recover_method, block_migration=False, - migrate_data=None): - LOG.debug("live_migration called", instance=instance_ref) - instance_name = instance_ref["name"] - - if migrate_data and 'is_shared_instance_path' in migrate_data: - shared_storage = migrate_data.is_shared_instance_path - else: - shared_storage = ( - self._pathutils.check_remote_instances_dir_shared(dest)) - if migrate_data: - migrate_data.is_shared_instance_path = shared_storage - else: - migrate_data = migrate_data_obj.HyperVLiveMigrateData( - is_shared_instance_path=shared_storage) - - try: - # We must make sure that the console log workers are stopped, - # otherwise we won't be able to delete / move VM log files. - self._serial_console_ops.stop_console_handler(instance_name) - - if not shared_storage: - self._pathutils.copy_vm_console_logs(instance_name, dest) - self._vmops.copy_vm_dvd_disks(instance_name, dest) - - self._livemigrutils.live_migrate_vm( - instance_name, - dest, - migrate_disks=not shared_storage) - except Exception: - # The recover method will update the migration state. - # We won't error out if we manage to recover the instance, - # which would otherwise end up in error state. - LOG.exception("Live migration failed. Attempting rollback.", - instance=instance_ref) - recover_method(context, instance_ref, dest, migrate_data) - return - - LOG.debug("Calling live migration post_method for instance: %s", - instance_name) - post_method(context, instance_ref, dest, - block_migration, migrate_data) - - def pre_live_migration(self, context, instance, block_device_info, - network_info): - LOG.debug("pre_live_migration called", instance=instance) - self._livemigrutils.check_live_migration_config() - - if CONF.use_cow_images: - boot_from_volume = self._block_dev_man.is_boot_from_volume( - block_device_info) - if not boot_from_volume and instance.image_ref: - self._imagecache.get_cached_image(context, instance) - - self._volumeops.connect_volumes(block_device_info) - - # A planned VM with updated disk paths is needed only in case of - # passthrough disks, in which case this will ensure that the volumes - # remain attached after the VM is migrated. - disk_path_mapping = self._volumeops.get_disk_path_mapping( - block_device_info, block_dev_only=True) - if disk_path_mapping: - # We create a planned VM, ensuring that volumes will remain - # attached after the VM is migrated. - self._livemigrutils.create_planned_vm(instance.name, - instance.host, - disk_path_mapping) - - def post_live_migration(self, context, instance, block_device_info, - migrate_data): - self._volumeops.disconnect_volumes(block_device_info) - - if not migrate_data.is_shared_instance_path: - self._pathutils.get_instance_dir(instance.name, - create_dir=False, - remove_dir=True) - - def post_live_migration_at_destination(self, ctxt, instance_ref, - network_info, block_migration): - LOG.debug("post_live_migration_at_destination called", - instance=instance_ref) - self._vmops.plug_vifs(instance_ref, network_info) - self._vmops.configure_instance_metrics(instance_ref.name) - - def check_can_live_migrate_destination(self, ctxt, instance_ref, - src_compute_info, dst_compute_info, - block_migration=False, - disk_over_commit=False): - LOG.debug("check_can_live_migrate_destination called", - instance=instance_ref) - - migrate_data = migrate_data_obj.HyperVLiveMigrateData() - try: - migrate_data.is_shared_instance_path = ( - self._pathutils.check_remote_instances_dir_shared( - instance_ref.host)) - except OSError as e: - reason = _('Unavailable instance location. Exception: %s') % e - raise exception.MigrationPreCheckError(reason=reason) - - return migrate_data - - def cleanup_live_migration_destination_check(self, ctxt, dest_check_data): - LOG.debug("cleanup_live_migration_destination_check called") - - def check_can_live_migrate_source(self, ctxt, instance_ref, - dest_check_data): - LOG.debug("check_can_live_migrate_source called", - instance=instance_ref) - return dest_check_data diff --git a/compute_hyperv/nova/migrationops.py b/compute_hyperv/nova/migrationops.py deleted file mode 100644 index e3a38ca0..00000000 --- a/compute_hyperv/nova/migrationops.py +++ /dev/null @@ -1,471 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for migration / resize operations. -""" -import os -import re - -from nova import block_device -import nova.conf -from nova import exception -from nova.virt import configdrive -from nova.virt import driver -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from compute_hyperv.i18n import _ -from compute_hyperv.nova import block_device_manager -from compute_hyperv.nova import constants -from compute_hyperv.nova import imagecache -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import vmops -from compute_hyperv.nova import volumeops - -LOG = logging.getLogger(__name__) -CONF = nova.conf.CONF - - -class MigrationOps(object): - - _ADMINISTRATIVE_SHARE_RE = re.compile(r'\\\\.*\\[a-zA-Z]\$\\.*') - - def __init__(self): - self._vmutils = utilsfactory.get_vmutils() - self._vhdutils = utilsfactory.get_vhdutils() - self._pathutils = pathutils.PathUtils() - self._volumeops = volumeops.VolumeOps() - self._vmops = vmops.VMOps() - self._imagecache = imagecache.ImageCache() - self._block_dev_man = block_device_manager.BlockDeviceInfoManager() - self._migrationutils = utilsfactory.get_migrationutils() - self._metricsutils = utilsfactory.get_metricsutils() - - def _move_vm_files(self, instance): - instance_path = self._pathutils.get_instance_dir(instance.name) - revert_path = self._pathutils.get_instance_migr_revert_dir( - instance_path, remove_dir=True, create_dir=True) - export_path = self._pathutils.get_export_dir( - instance_dir=revert_path, create_dir=True) - - # copy the given instance's files to a _revert folder, as backup. - LOG.debug("Moving instance files to a revert path: %s", - revert_path, instance=instance) - self._pathutils.move_folder_files(instance_path, revert_path) - self._pathutils.copy_vm_config_files(instance.name, export_path) - - return revert_path - - def _check_target_flavor(self, instance, flavor, block_device_info): - ephemerals = driver.block_device_info_get_ephemerals(block_device_info) - eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or - instance.flavor.ephemeral_gb) - - new_root_gb = flavor.root_gb - curr_root_gb = instance.flavor.root_gb - new_eph_size = flavor.ephemeral_gb - - root_down = new_root_gb < curr_root_gb - ephemeral_down = new_eph_size < eph_size - booted_from_volume = self._block_dev_man.is_boot_from_volume( - block_device_info) - - if root_down and not booted_from_volume: - raise exception.InstanceFaultRollback( - exception.CannotResizeDisk( - reason=_("Cannot resize the root disk to a smaller size. " - "Current size: %(curr_root_gb)s GB. Requested " - "size: %(new_root_gb)s GB.") % { - 'curr_root_gb': curr_root_gb, - 'new_root_gb': new_root_gb})) - # We allow having a new flavor with no ephemeral storage, in which - # case we'll just remove all the ephemeral disks. - elif ephemeral_down and new_eph_size: - reason = (_("The new flavor ephemeral size (%(flavor_eph)s) is " - "smaller than the current total ephemeral disk size: " - "%(current_eph)s.") % - dict(flavor_eph=flavor.ephemeral_gb, - current_eph=eph_size)) - raise exception.InstanceFaultRollback( - exception.CannotResizeDisk(reason=reason)) - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, network_info, - block_device_info=None, timeout=0, - retry_interval=0): - LOG.debug("migrate_disk_and_power_off called", instance=instance) - - self._check_target_flavor(instance, flavor, block_device_info) - - self._vmops.power_off(instance, timeout, retry_interval) - instance_path = self._move_vm_files(instance) - - instance.system_metadata['backup_location'] = instance_path - instance.save() - - self._vmops.destroy(instance, network_info, - block_device_info, destroy_disks=True, - cleanup_migration_files=False) - - # return the instance's path location. - return instance_path - - def confirm_migration(self, context, migration, instance, network_info): - LOG.debug("confirm_migration called", instance=instance) - revert_path = instance.system_metadata['backup_location'] - export_path = self._pathutils.get_export_dir(instance_dir=revert_path) - self._pathutils.check_dir(export_path, remove_dir=True) - self._pathutils.check_dir(revert_path, remove_dir=True) - - def _revert_migration_files(self, instance): - revert_path = instance.system_metadata['backup_location'] - instance_path = re.sub('_revert$', '', revert_path) - - # the instance dir might still exist, if the destination node kept - # the files on the original node. - self._pathutils.check_dir(instance_path, remove_dir=True) - self._pathutils.rename(revert_path, instance_path) - return instance_path - - def _check_and_attach_config_drive(self, instance, vm_gen): - if configdrive.required_by(instance): - configdrive_path = self._pathutils.lookup_configdrive_path( - instance.name) - if configdrive_path: - self._vmops.attach_config_drive(instance, configdrive_path, - vm_gen) - else: - raise exception.ConfigDriveNotFound( - instance_uuid=instance.uuid) - - def finish_revert_migration(self, context, instance, network_info, - block_device_info=None, power_on=True): - LOG.debug("finish_revert_migration called", instance=instance) - instance_path = self._revert_migration_files(instance) - - image_meta = self._imagecache.get_image_details(context, instance) - self._import_and_setup_vm(context, instance, instance_path, image_meta, - block_device_info) - - if power_on: - self._vmops.power_on(instance, network_info=network_info) - - def _merge_base_vhd(self, diff_vhd_path, base_vhd_path): - base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path), - os.path.basename(base_vhd_path)) - try: - LOG.debug('Copying base disk %(base_vhd_path)s to ' - '%(base_vhd_copy_path)s', - {'base_vhd_path': base_vhd_path, - 'base_vhd_copy_path': base_vhd_copy_path}) - self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path) - - LOG.debug("Reconnecting copied base VHD " - "%(base_vhd_copy_path)s and diff " - "VHD %(diff_vhd_path)s", - {'base_vhd_copy_path': base_vhd_copy_path, - 'diff_vhd_path': diff_vhd_path}) - self._vhdutils.reconnect_parent_vhd(diff_vhd_path, - base_vhd_copy_path) - - LOG.debug("Merging differential disk %s into its parent.", - diff_vhd_path) - self._vhdutils.merge_vhd(diff_vhd_path) - - # Replace the differential VHD with the merged one - self._pathutils.rename(base_vhd_copy_path, diff_vhd_path) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(base_vhd_copy_path): - self._pathutils.remove(base_vhd_copy_path) - - def _check_resize_vhd(self, vhd_path, vhd_info, new_size): - curr_size = vhd_info['VirtualSize'] - if new_size < curr_size: - raise exception.CannotResizeDisk( - reason=_("Cannot resize the root disk to a smaller size. " - "Current size: %(curr_root_gb)s GB. Requested " - "size: %(new_root_gb)s GB.") % { - 'curr_root_gb': curr_size / units.Gi, - 'new_root_gb': new_size / units.Gi}) - elif new_size > curr_size: - self._resize_vhd(vhd_path, new_size) - - def _resize_vhd(self, vhd_path, new_size): - if vhd_path.split('.')[-1].lower() == "vhd": - LOG.debug("Getting parent disk info for disk: %s", vhd_path) - base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path) - if base_disk_path: - # A differential VHD cannot be resized. This limitation - # does not apply to the VHDX format. - self._merge_base_vhd(vhd_path, base_disk_path) - LOG.debug("Resizing disk \"%(vhd_path)s\" to new max " - "size %(new_size)s", - {'vhd_path': vhd_path, 'new_size': new_size}) - self._vhdutils.resize_vhd(vhd_path, new_size) - - def _check_base_disk(self, context, instance, diff_vhd_path, - src_base_disk_path): - base_vhd_path = self._imagecache.get_cached_image(context, instance) - - # If the location of the base host differs between source - # and target hosts we need to reconnect the base disk - if src_base_disk_path.lower() != base_vhd_path.lower(): - LOG.debug("Reconnecting copied base VHD " - "%(base_vhd_path)s and diff " - "VHD %(diff_vhd_path)s", - {'base_vhd_path': base_vhd_path, - 'diff_vhd_path': diff_vhd_path}) - self._vhdutils.reconnect_parent_vhd(diff_vhd_path, - base_vhd_path) - - def _migrate_disks_from_source(self, migration, instance, - source_inst_dir): - source_inst_dir = self._pathutils.get_remote_path( - migration.source_compute, source_inst_dir) - source_export_path = self._pathutils.get_export_dir( - instance_dir=source_inst_dir) - - if CONF.hyperv.move_disks_on_cold_migration: - # copy the files from the source node to this node's configured - # location. - inst_dir = self._pathutils.get_instance_dir( - instance.name, create_dir=True, remove_dir=True) - elif self._ADMINISTRATIVE_SHARE_RE.match(source_inst_dir): - # make sure that the source is not a remote local path. - # e.g.: \\win-srv\\C$\OpenStack\Instances\.. - # CSVs, local paths, and shares are fine. - # NOTE(claudiub): get rid of the final _revert part of the path. - # rstrip can remove more than _revert, which is not desired. - inst_dir = re.sub('_revert$', '', source_inst_dir) - - LOG.warning( - 'Host is configured not to copy disks on cold migration, but ' - 'the instance will not be able to start with the remote path: ' - '"%s". Only local, share, or CSV paths are acceptable.', - inst_dir) - inst_dir = self._pathutils.get_instance_dir( - instance.name, create_dir=True, remove_dir=True) - else: - # make a copy on the source node's configured location. - # strip the _revert from the source backup dir. - inst_dir = re.sub('_revert$', '', source_inst_dir) - self._pathutils.check_dir(inst_dir, create_dir=True) - - export_path = self._pathutils.get_export_dir( - instance_dir=inst_dir) - - self._pathutils.copy_folder_files(source_inst_dir, inst_dir) - self._pathutils.copy_dir(source_export_path, export_path) - return inst_dir - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance=False, - block_device_info=None, power_on=True): - LOG.debug("finish_migration called", instance=instance) - instance_dir = self._migrate_disks_from_source(migration, instance, - disk_info) - - # NOTE(claudiub): nova compute manager only takes into account disk - # flavor changes when passing to the driver resize_instance=True. - # we need to take into account flavor extra_specs as well. - resize_instance = ( - migration.old_instance_type_id != migration.new_instance_type_id) - - self._import_and_setup_vm(context, instance, instance_dir, image_meta, - block_device_info, resize_instance) - - if power_on: - self._vmops.power_on(instance, network_info=network_info) - - def _import_and_setup_vm(self, context, instance, instance_dir, image_meta, - block_device_info, resize_instance=False): - vm_gen = self._vmops.get_image_vm_generation(instance.uuid, image_meta) - self._import_vm(instance_dir) - self._vmops.update_vm_resources(instance, vm_gen, image_meta, - instance_dir, resize_instance) - - self._volumeops.connect_volumes(block_device_info) - self._update_disk_image_paths(instance, instance_dir) - self._check_and_update_disks(context, instance, vm_gen, image_meta, - block_device_info, - resize_instance=resize_instance) - self._volumeops.fix_instance_volume_disk_paths( - instance.name, block_device_info) - - self._migrationutils.realize_vm(instance.name) - - # During a resize, ephemeral disks may be removed. We cannot remove - # disks from a planned vm, for which reason we have to do this after - # *realizing* it. At the same time, we cannot realize a VM before - # updating disks to use the destination paths. - ephemerals = block_device_info['ephemerals'] - self._check_ephemeral_disks(instance, ephemerals, resize_instance) - - self._vmops.configure_remotefx(instance, vm_gen, resize_instance) - self._vmops.configure_instance_metrics(instance.name) - - def _import_vm(self, instance_dir): - snapshot_dir = self._pathutils.get_instance_snapshot_dir( - instance_dir=instance_dir) - export_dir = self._pathutils.get_export_dir(instance_dir=instance_dir) - vm_config_file_path = self._pathutils.get_vm_config_file(export_dir) - - self._migrationutils.import_vm_definition(vm_config_file_path, - snapshot_dir) - - # NOTE(claudiub): after the VM was imported, the VM config files are - # not necessary anymore. - self._pathutils.get_export_dir(instance_dir=instance_dir, - remove_dir=True) - - def _update_disk_image_paths(self, instance, instance_path): - """Checks if disk images have the correct path and updates them if not. - - When resizing an instance, the vm is imported on the destination node - and the disk files are copied from source node. If the hosts have - different instance_path config options set, the disks are migrated to - the correct paths, but vm disk resources are not updated to point to - the new location. - """ - (disk_files, volume_drives) = self._vmutils.get_vm_storage_paths( - instance.name) - - pattern = re.compile('configdrive|eph|root', re.IGNORECASE) - for disk_file in disk_files: - disk_name = os.path.basename(disk_file) - if not pattern.match(disk_name): - # skip files that do not match the pattern. - continue - - expected_disk_path = os.path.join(instance_path, disk_name) - if not os.path.exists(expected_disk_path): - raise exception.DiskNotFound(location=expected_disk_path) - - if expected_disk_path.lower() != disk_file.lower(): - LOG.debug("Updating VM disk location from %(src)s to %(dest)s", - {'src': disk_file, 'dest': expected_disk_path, - 'instance': instance}) - self._vmutils.update_vm_disk_path(disk_file, - expected_disk_path, - is_physical=False) - - def _check_and_update_disks(self, context, instance, vm_gen, image_meta, - block_device_info, resize_instance=False): - self._block_dev_man.validate_and_update_bdi(instance, image_meta, - vm_gen, block_device_info) - root_device = block_device_info['root_disk'] - - if root_device['type'] == constants.DISK: - root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) - root_device['path'] = root_vhd_path - if not root_vhd_path: - base_vhd_path = self._pathutils.get_instance_dir(instance.name) - raise exception.DiskNotFound(location=base_vhd_path) - - root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path) - src_base_disk_path = root_vhd_info.get("ParentPath") - if src_base_disk_path: - self._check_base_disk(context, instance, root_vhd_path, - src_base_disk_path) - - if resize_instance: - new_size = instance.flavor.root_gb * units.Gi - self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size) - - def _check_ephemeral_disks(self, instance, ephemerals, - resize_instance=False): - instance_name = instance.name - new_eph_gb = instance.get('ephemeral_gb', 0) - ephemerals_to_remove = set() - - if not ephemerals and new_eph_gb: - # No explicit ephemeral disk bdm was retrieved, yet the flavor - # provides ephemeral storage, for which reason we're adding a - # default ephemeral disk. - eph = dict(device_type='disk', - drive_addr=0, - size=new_eph_gb) - ephemerals.append(eph) - - if len(ephemerals) == 1: - # NOTE(claudiub): Resize only if there is one ephemeral. If there - # are more than 1, resizing them can be problematic. This behaviour - # also exists in the libvirt driver and it has to be addressed in - # the future. - ephemerals[0]['size'] = new_eph_gb - elif new_eph_gb and sum( - eph['size'] for eph in ephemerals) != new_eph_gb: - # New ephemeral size is different from the original ephemeral size - # and there are multiple ephemerals. - LOG.warning("Cannot resize multiple ephemeral disks for instance.", - instance=instance) - - for index, eph in enumerate(ephemerals): - eph_name = "eph%s" % index - existing_eph_path = self._pathutils.lookup_ephemeral_vhd_path( - instance_name, eph_name) - - if not existing_eph_path and eph['size']: - eph['format'] = self._vhdutils.get_best_supported_vhd_format() - eph['path'] = self._pathutils.get_ephemeral_vhd_path( - instance_name, eph['format'], eph_name) - if not resize_instance: - # ephemerals should have existed. - raise exception.DiskNotFound(location=eph['path']) - - # We cannot rely on the BlockDeviceInfoManager class to - # provide us a disk slot as it's only usable when creating - # new instances (it's not aware of the current disk address - # layout). - # There's no way in which IDE may be requested for new - # ephemeral disks (after a resize), so we'll just enforce - # SCSI for now. os-win does not currently allow retrieving - # free IDE slots. - ctrller_path = self._vmutils.get_vm_scsi_controller( - instance.name) - ctrl_addr = self._vmutils.get_free_controller_slot( - ctrller_path) - eph['disk_bus'] = constants.CTRL_TYPE_SCSI - eph['ctrl_disk_addr'] = ctrl_addr - - # create ephemerals - self._vmops.create_ephemeral_disk(instance.name, eph) - self._vmops.attach_ephemerals(instance_name, [eph]) - elif eph['size'] > 0: - # ephemerals exist. resize them. - eph['path'] = existing_eph_path - eph_vhd_info = self._vhdutils.get_vhd_info(eph['path']) - self._check_resize_vhd( - eph['path'], eph_vhd_info, eph['size'] * units.Gi) - else: - eph['path'] = None - # ephemeral new size is 0, remove it. - ephemerals_to_remove.add(existing_eph_path) - - if not new_eph_gb: - # The new flavor does not provide any ephemeral storage. We'll - # remove any existing ephemeral disk (default ones included). - attached_ephemerals = self._vmops.get_attached_ephemeral_disks( - instance.name) - ephemerals_to_remove |= set(attached_ephemerals) - - for eph_path in ephemerals_to_remove: - self._vmutils.detach_vm_disk(instance_name, eph_path, - is_physical=False) - self._pathutils.remove(eph_path) diff --git a/compute_hyperv/nova/pathutils.py b/compute_hyperv/nova/pathutils.py deleted file mode 100644 index 5a776d96..00000000 --- a/compute_hyperv/nova/pathutils.py +++ /dev/null @@ -1,333 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile -import time - -from nova import exception -from os_win import exceptions as os_win_exc -from os_win.utils import pathutils -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import fileutils -from oslo_utils import uuidutils - -from compute_hyperv.i18n import _ -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants - -LOG = logging.getLogger(__name__) - -CONF = compute_hyperv.nova.conf.CONF - -ERROR_INVALID_NAME = 123 - -# NOTE(claudiub): part of the pre-existing PathUtils is nova-specific and -# it does not belong in the os-win library. In order to ensure the same -# functionality with the least amount of changes necessary, adding as a mixin -# the os_win.pathutils.PathUtils class into this PathUtils. - - -class PathUtils(pathutils.PathUtils): - - _CSV_FOLDER = 'ClusterStorage\\' - - def __init__(self): - super(PathUtils, self).__init__() - self._vmutils = utilsfactory.get_vmutils() - - def copy_folder_files(self, src_dir, dest_dir): - """Copies the files of the given src_dir to dest_dir. - - It will ignore any nested folders. - - :param src_dir: Given folder from which to copy files. - :param dest_dir: Folder to which to copy files. - """ - - # NOTE(claudiub): this will have to be moved to os-win. - - for fname in os.listdir(src_dir): - src = os.path.join(src_dir, fname) - # ignore subdirs. - if os.path.isfile(src): - self.copy(src, os.path.join(dest_dir, fname)) - - def get_instances_dir(self, remote_server=None): - local_instance_path = os.path.normpath(CONF.instances_path) - - if remote_server and not local_instance_path.startswith(r'\\'): - if CONF.hyperv.instances_path_share: - path = CONF.hyperv.instances_path_share - else: - # In this case, we expect the instance dir to have the same - # location on the remote server. - path = local_instance_path - return self.get_remote_path(remote_server, path) - else: - return local_instance_path - - def get_remote_path(self, remote_server, remote_path): - if remote_path.startswith('\\\\'): - return remote_path - - # Use an administrative share - remote_unc_path = ('\\\\%(remote_server)s\\%(path)s' % - dict(remote_server=remote_server, - path=remote_path.replace(':', '$'))) - - csv_location = '\\'.join([os.getenv('SYSTEMDRIVE', 'C:'), - self._CSV_FOLDER]) - if remote_path.lower().startswith(csv_location.lower()): - # the given remote_path is a CSV path. - # Return remote_path as the local path. - LOG.debug("Remote path %s is on a CSV. Returning as a local path.", - remote_path) - return remote_path - - LOG.debug('Returning UNC path %(unc_path)s for host %(host)s.', - dict(unc_path=remote_unc_path, host=remote_server)) - return remote_unc_path - - def _get_instances_sub_dir(self, dir_name, remote_server=None, - create_dir=True, remove_dir=False): - instances_path = self.get_instances_dir(remote_server) - path = os.path.join(instances_path, dir_name) - self.check_dir(path, create_dir=create_dir, remove_dir=remove_dir) - - return path - - def check_dir(self, path, create_dir=False, remove_dir=False): - try: - if remove_dir: - self.check_remove_dir(path) - if create_dir: - self.check_create_dir(path) - except WindowsError as ex: - if ex.winerror == ERROR_INVALID_NAME: - raise exception.AdminRequired(_( - "Cannot access \"%(path)s\", make sure the " - "path exists and that you have the proper permissions. " - "In particular Nova-Compute must not be executed with the " - "builtin SYSTEM account or other accounts unable to " - "authenticate on a remote host.") % {'path': path}) - raise - - def get_instance_migr_revert_dir(self, instance_path, create_dir=False, - remove_dir=False): - dir_name = '%s_revert' % instance_path - self.check_dir(dir_name, create_dir, remove_dir) - return dir_name - - def get_instance_dir(self, instance_name, remote_server=None, - create_dir=True, remove_dir=False): - instance_dir = self._get_instances_sub_dir( - instance_name, remote_server, - create_dir=False, remove_dir=False) - - # In some situations, the instance files may reside at a different - # location than the configured one. - if not os.path.exists(instance_dir): - vmutils = (self._vmutils if not remote_server - else utilsfactory.get_vmutils(remote_server)) - try: - instance_dir = vmutils.get_vm_config_root_dir( - instance_name) - if remote_server: - instance_dir = self.get_remote_path(remote_server, - instance_dir) - LOG.info("Found instance dir at non-default location: %s", - instance_dir) - except os_win_exc.HyperVVMNotFoundException: - pass - - self.check_dir(instance_dir, - create_dir=create_dir, - remove_dir=remove_dir) - return instance_dir - - def _lookup_vhd_path(self, instance_name, vhd_path_func, - *args, **kwargs): - vhd_path = None - for format_ext in ['vhd', 'vhdx']: - test_path = vhd_path_func(instance_name, format_ext, - *args, **kwargs) - if self.exists(test_path): - vhd_path = test_path - break - return vhd_path - - def lookup_root_vhd_path(self, instance_name, rescue=False): - return self._lookup_vhd_path(instance_name, self.get_root_vhd_path, - rescue) - - def lookup_configdrive_path(self, instance_name, rescue=False): - configdrive_path = None - for format_ext in constants.DISK_FORMAT_MAP: - test_path = self.get_configdrive_path(instance_name, format_ext, - rescue=rescue) - if self.exists(test_path): - configdrive_path = test_path - break - return configdrive_path - - def lookup_ephemeral_vhd_path(self, instance_name, eph_name): - return self._lookup_vhd_path(instance_name, - self.get_ephemeral_vhd_path, - eph_name) - - def get_root_vhd_path(self, instance_name, format_ext=None, rescue=False): - instance_path = self.get_instance_dir(instance_name) - image_name = 'root' - if rescue: - image_name += '-rescue' - if format_ext: - image_name += '.' + format_ext.lower() - return os.path.join(instance_path, image_name) - - def get_configdrive_path(self, instance_name, format_ext, - remote_server=None, rescue=False): - instance_path = self.get_instance_dir(instance_name, remote_server) - configdrive_image_name = 'configdrive' - if rescue: - configdrive_image_name += '-rescue' - return os.path.join(instance_path, - configdrive_image_name + '.' + format_ext.lower()) - - def get_ephemeral_vhd_path(self, instance_name, format_ext, eph_name): - instance_path = self.get_instance_dir(instance_name) - return os.path.join(instance_path, eph_name + '.' + format_ext.lower()) - - def get_base_vhd_dir(self): - return self._get_instances_sub_dir('_base') - - def get_export_dir(self, instance_name=None, instance_dir=None, - create_dir=False, remove_dir=False): - if not instance_dir: - instance_dir = self.get_instance_dir(instance_name, - create_dir=create_dir) - - export_dir = os.path.join(instance_dir, 'export') - self.check_dir(export_dir, create_dir=create_dir, - remove_dir=remove_dir) - return export_dir - - def get_vm_console_log_paths(self, instance_name, remote_server=None): - instance_dir = self.get_instance_dir(instance_name, - remote_server) - console_log_path = os.path.join(instance_dir, 'console.log') - return console_log_path, console_log_path + '.1' - - def copy_vm_console_logs(self, instance_name, dest_host): - local_log_paths = self.get_vm_console_log_paths( - instance_name) - remote_log_paths = self.get_vm_console_log_paths( - instance_name, remote_server=dest_host) - - for local_log_path, remote_log_path in zip(local_log_paths, - remote_log_paths): - if self.exists(local_log_path): - self.copy(local_log_path, remote_log_path) - - def get_image_path(self, image_name): - # Note: it is possible that the path doesn't exist - base_dir = self.get_base_vhd_dir() - for ext in ['vhd', 'vhdx', 'iso']: - file_path = os.path.join(base_dir, - image_name + '.' + ext.lower()) - if self.exists(file_path): - return file_path - return None - - def get_age_of_file(self, file_name): - return time.time() - os.path.getmtime(file_name) - - def check_dirs_shared_storage(self, src_dir, dest_dir): - # Check if shared storage is being used by creating a temporary - # file at the destination path and checking if it exists at the - # source path. - LOG.debug("Checking if %(src_dir)s and %(dest_dir)s point " - "to the same location.", - dict(src_dir=src_dir, dest_dir=dest_dir)) - with tempfile.NamedTemporaryFile(dir=dest_dir) as tmp_file: - src_path = os.path.join(src_dir, - os.path.basename(tmp_file.name)) - - shared_storage = os.path.exists(src_path) - return shared_storage - - def check_remote_instances_dir_shared(self, dest): - # Checks if the instances dir from a remote host points - # to the same storage location as the local instances dir. - local_inst_dir = self.get_instances_dir() - remote_inst_dir = self.get_instances_dir(dest) - return self.check_dirs_shared_storage(local_inst_dir, - remote_inst_dir) - - def check_instance_shared_storage_local(self, instance): - instance_dir = self.get_instance_dir(instance.name) - - fd, tmp_file = tempfile.mkstemp(dir=instance_dir) - LOG.debug("Creating tmpfile %s to verify with other " - "compute node that the instance is on " - "the same shared storage.", - tmp_file, instance=instance) - os.close(fd) - # We're sticking with the same dict key as the libvirt driver. - # At some point, this may become a versioned object. - return {"filename": tmp_file} - - def check_instance_shared_storage_remote(self, data): - return os.path.exists(data['filename']) - - def check_instance_shared_storage_cleanup(self, data): - fileutils.delete_if_exists(data["filename"]) - - def get_instance_snapshot_dir(self, instance_name=None, instance_dir=None): - if instance_name: - instance_dir = self.get_instance_dir(instance_name, - create_dir=False) - return os.path.join(instance_dir, 'Snapshots') - - def get_instance_virtual_machines_dir(self, instance_name=None, - instance_dir=None): - if instance_name: - instance_dir = self.get_instance_dir(instance_name, - create_dir=False) - return os.path.join(instance_dir, "Virtual Machines") - - def copy_vm_config_files(self, instance_name, dest_dir): - """Copies the VM configuration files to the given destination folder. - - :param instance_name: the given instance's name. - :param dest_dir: the location where the VM configuration files are - copied to. - """ - src_dir = self.get_instance_virtual_machines_dir(instance_name) - self.copy_folder_files(src_dir, dest_dir) - - def get_vm_config_file(self, path): - for dir_file in os.listdir(path): - file_name, file_ext = os.path.splitext(dir_file) - if (file_ext.lower() in ['.vmcx', '.xml'] and - uuidutils.is_uuid_like(file_name)): - - config_file = os.path.join(path, dir_file) - LOG.debug("Found VM config file: %s", config_file) - return config_file - - raise exception.NotFound( - _("Folder %s does not contain any VM config data file.") % path) diff --git a/compute_hyperv/nova/pdk.py b/compute_hyperv/nova/pdk.py deleted file mode 100644 index eb3bf908..00000000 --- a/compute_hyperv/nova/pdk.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from barbicanclient import client as barbican_client -from keystoneauth1 import session -from nova import exception -from os_win._i18n import _ - - -class PDK(object): - - def create_pdk(self, context, instance, image_meta, pdk_filepath): - """Generates a pdk file using the barbican container referenced by - the image metadata or instance metadata. A pdk file is a shielding - data file which contains a RDP certificate, unattended file, - volume signature catalogs and guardian metadata. - """ - - with open(pdk_filepath, 'wb') as pdk_file_handle: - pdk_reference = self._get_pdk_reference(instance, image_meta) - pdk_container = self._get_pdk_container(context, instance, - pdk_reference) - pdk_data = self._get_pdk_data(pdk_container) - pdk_file_handle.write(pdk_data) - - def _get_pdk_reference(self, instance, image_meta): - image_pdk_ref = image_meta['properties'].get('img_pdk_reference') - boot_metadata_pdk_ref = instance.metadata.get('img_pdk_reference') - - if not (image_pdk_ref or boot_metadata_pdk_ref): - reason = _('A reference to a barbican container containing the ' - 'pdk file must be passed as an image property. This ' - 'is required in order to enable VTPM') - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - return boot_metadata_pdk_ref or image_pdk_ref - - def _get_pdk_container(self, context, instance, pdk_reference): - """Retrieves the barbican container containing the pdk file. - """ - - auth = context.get_auth_plugin() - sess = session.Session(auth=auth) - brb_client = barbican_client.Client(session=sess) - - try: - pdk_container = brb_client.containers.get(pdk_reference) - except Exception as e: - err_msg = _("Retrieving barbican container with reference " - "%(pdk_reference)s failed with error: %(error)s") % { - 'pdk_reference': pdk_reference, - 'error': e} - raise exception.InvalidMetadata(instance_id=instance.uuid, - reason=err_msg) - return pdk_container - - def _get_pdk_data(self, pdk_container): - """Return the data from all barbican container's secrets. - """ - - no_of_secrets = len(pdk_container.secrets) - data = bytes() - for index in range(no_of_secrets): - current_secret = pdk_container.secrets[str(index + 1)] - retrived_secret_data = current_secret.payload - data += retrived_secret_data - return data diff --git a/compute_hyperv/nova/rdpconsoleops.py b/compute_hyperv/nova/rdpconsoleops.py deleted file mode 100644 index cbd92900..00000000 --- a/compute_hyperv/nova/rdpconsoleops.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.console import type as ctype -from os_win import utilsfactory -from oslo_log import log as logging - -from compute_hyperv.nova import hostops - -LOG = logging.getLogger(__name__) - - -class RDPConsoleOps(object): - def __init__(self): - self._hostops = hostops.HostOps() - self._vmutils = utilsfactory.get_vmutils() - self._rdpconsoleutils = utilsfactory.get_rdpconsoleutils() - - def get_rdp_console(self, instance): - LOG.debug("get_rdp_console called", instance=instance) - host = self._hostops.get_host_ip_addr() - port = self._rdpconsoleutils.get_rdp_console_port() - vm_id = self._vmutils.get_vm_id(instance.name) - - LOG.debug("RDP console: %(host)s:%(port)s, %(vm_id)s", - {"host": host, "port": port, "vm_id": vm_id}) - - return ctype.ConsoleRDP( - host=host, port=port, internal_access_path=vm_id) diff --git a/compute_hyperv/nova/serialconsolehandler.py b/compute_hyperv/nova/serialconsolehandler.py deleted file mode 100644 index cc40d836..00000000 --- a/compute_hyperv/nova/serialconsolehandler.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet import patcher -from nova.console import serial as serial_console -from nova.console import type as ctype -from nova import exception -from nova.i18n import _ -from os_win.utils.io import ioutils -from os_win import utilsfactory -from oslo_log import log as logging - -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import serialproxy - -CONF = compute_hyperv.nova.conf.CONF -LOG = logging.getLogger(__name__) - -threading = patcher.original('threading') - - -class SerialConsoleHandler(object): - """Handles serial console ops related to a given instance.""" - def __init__(self, instance_name): - self._vmutils = utilsfactory.get_vmutils() - self._pathutils = pathutils.PathUtils() - - self._instance_name = instance_name - self._log_path = self._pathutils.get_vm_console_log_paths( - self._instance_name)[0] - - self._client_connected = None - self._input_queue = None - self._output_queue = None - - self._serial_proxy = None - self._workers = [] - self._log_handler = None - - def start(self): - self._setup_handlers() - - for worker in self._workers: - worker.start() - - def stop(self): - for worker in self._workers: - worker.stop() - - if self._serial_proxy: - serial_console.release_port(self._listen_host, - self._listen_port) - - def _setup_handlers(self): - if CONF.serial_console.enabled: - self._setup_serial_proxy_handler() - - self._setup_named_pipe_handlers() - - def _setup_serial_proxy_handler(self): - self._listen_host = ( - CONF.serial_console.proxyclient_address) - self._listen_port = serial_console.acquire_port( - self._listen_host) - - LOG.info('Initializing serial proxy on ' - '%(addr)s:%(port)s, handling connections ' - 'to instance %(instance_name)s.', - {'addr': self._listen_host, - 'port': self._listen_port, - 'instance_name': self._instance_name}) - - # Use this event in order to manage - # pending queue operations. - self._client_connected = threading.Event() - self._input_queue = ioutils.IOQueue( - client_connected=self._client_connected) - self._output_queue = ioutils.IOQueue( - client_connected=self._client_connected) - - self._serial_proxy = serialproxy.SerialProxy( - self._instance_name, self._listen_host, - self._listen_port, self._input_queue, - self._output_queue, self._client_connected) - - self._workers.append(self._serial_proxy) - - def _setup_named_pipe_handlers(self): - # At most 2 named pipes will be used to access the vm serial ports. - # - # The named pipe having the 'ro' suffix will be used only for logging - # while the 'rw' pipe will be used for interactive sessions, logging - # only when there is no 'ro' pipe. - serial_port_mapping = self._get_vm_serial_port_mapping() - log_rw_pipe_output = not serial_port_mapping.get( - constants.SERIAL_PORT_TYPE_RO) - - for pipe_type, pipe_path in serial_port_mapping.items(): - enable_logging = (pipe_type == constants.SERIAL_PORT_TYPE_RO or - log_rw_pipe_output) - handler = self._get_named_pipe_handler( - pipe_path, - pipe_type=pipe_type, - enable_logging=enable_logging) - self._workers.append(handler) - - if enable_logging: - self._log_handler = handler - - def _get_named_pipe_handler(self, pipe_path, pipe_type, - enable_logging): - kwargs = {} - if pipe_type == constants.SERIAL_PORT_TYPE_RW: - kwargs = {'input_queue': self._input_queue, - 'output_queue': self._output_queue, - 'connect_event': self._client_connected} - if enable_logging: - kwargs['log_file'] = self._log_path - - handler = utilsfactory.get_named_pipe_handler(pipe_path, **kwargs) - return handler - - def _get_vm_serial_port_mapping(self): - serial_port_conns = self._vmutils.get_vm_serial_port_connections( - self._instance_name) - - if not serial_port_conns: - err_msg = _("No suitable serial port pipe was found " - "for instance %(instance_name)s") - raise exception.NovaException( - err_msg % {'instance_name': self._instance_name}) - - serial_port_mapping = {} - # At the moment, we tag the pipes by using a pipe path suffix - # as we can't use the serial port ElementName attribute because of - # a Hyper-V bug. - for pipe_path in serial_port_conns: - # expected pipe_path: - # '\\.\pipe\fc1bcc91-c7d3-4116-a210-0cd151e019cd_rw' - port_type = pipe_path[-2:] - if port_type in [constants.SERIAL_PORT_TYPE_RO, - constants.SERIAL_PORT_TYPE_RW]: - serial_port_mapping[port_type] = pipe_path - else: - serial_port_mapping[constants.SERIAL_PORT_TYPE_RW] = pipe_path - - return serial_port_mapping - - def get_serial_console(self): - if not CONF.serial_console.enabled: - raise exception.ConsoleTypeUnavailable(console_type='serial') - return ctype.ConsoleSerial(host=self._listen_host, - port=self._listen_port) - - def flush_console_log(self): - if self._log_handler: - LOG.debug("Flushing instance %s console log.", - self._instance_name) - self._log_handler.flush_log_file() diff --git a/compute_hyperv/nova/serialconsoleops.py b/compute_hyperv/nova/serialconsoleops.py deleted file mode 100644 index c713168f..00000000 --- a/compute_hyperv/nova/serialconsoleops.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import os - -from nova import exception -from nova.i18n import _ -from nova import utils -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import importutils -import six - -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import serialconsolehandler - -LOG = logging.getLogger(__name__) - -_console_handlers = {} - - -def instance_synchronized(func): - @functools.wraps(func) - def wrapper(self, instance_name, *args, **kwargs): - @utils.synchronized(instance_name) - def inner(): - return func(self, instance_name, *args, **kwargs) - return inner() - return wrapper - - -class SerialConsoleOps(object): - def __init__(self): - self._vmops_prop = None - - self._vmutils = utilsfactory.get_vmutils() - self._pathutils = pathutils.PathUtils() - - @property - def _vmops(self): - # We have to avoid a circular dependency. - if not self._vmops_prop: - self._vmops_prop = importutils.import_class( - 'compute_hyperv.nova.vmops.VMOps')() - return self._vmops_prop - - @instance_synchronized - def start_console_handler(self, instance_name): - if self._vmutils.is_secure_vm(instance_name): - LOG.warning("Skipping starting serial console handler. " - "Shielded/Encrypted VM %(instance_name)s " - "doesn't support serial console.", - {'instance_name': instance_name}) - return - - # Cleanup existing workers. - self.stop_console_handler_unsync(instance_name) - handler = None - - try: - handler = serialconsolehandler.SerialConsoleHandler( - instance_name) - handler.start() - _console_handlers[instance_name] = handler - except Exception as exc: - LOG.error('Instance %(instance_name)s serial console handler ' - 'could not start. Exception %(exc)s', - {'instance_name': instance_name, 'exc': exc}) - if handler: - handler.stop() - - @instance_synchronized - def stop_console_handler(self, instance_name): - self.stop_console_handler_unsync(instance_name) - - def stop_console_handler_unsync(self, instance_name): - handler = _console_handlers.get(instance_name) - if handler: - LOG.info("Stopping instance %(instance_name)s " - "serial console handler.", - {'instance_name': instance_name}) - handler.stop() - del _console_handlers[instance_name] - - @instance_synchronized - def get_serial_console(self, instance_name): - handler = _console_handlers.get(instance_name) - if not handler: - raise exception.ConsoleTypeUnavailable(console_type='serial') - return handler.get_serial_console() - - @instance_synchronized - def get_console_output(self, instance_name): - if self._vmutils.is_secure_vm(instance_name): - err = _("Shielded/Encrypted VMs don't support serial console.") - raise exception.ConsoleNotAvailable(err) - - console_log_paths = self._pathutils.get_vm_console_log_paths( - instance_name) - - handler = _console_handlers.get(instance_name) - if handler: - handler.flush_console_log() - - try: - log = b'' - # Start with the oldest console log file. - for log_path in reversed(console_log_paths): - if os.path.exists(log_path): - with open(log_path, 'rb') as fp: - log += fp.read() - return log - except IOError as err: - raise exception.ConsoleLogOutputException( - instance_id=instance_name, reason=six.text_type(err)) - - def start_console_handlers(self): - active_instances = self._vmutils.get_active_instances() - for instance_name in active_instances: - instance_uuid = self._vmops.get_instance_uuid(instance_name) - - if instance_uuid: - self.start_console_handler(instance_name) - else: - LOG.debug("Instance uuid could not be retrieved for " - "instance %s. Its serial console output will not " - "be handled.", instance_name) diff --git a/compute_hyperv/nova/serialproxy.py b/compute_hyperv/nova/serialproxy.py deleted file mode 100644 index 86fe6b14..00000000 --- a/compute_hyperv/nova/serialproxy.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import socket - -from eventlet import patcher -from nova import exception -from nova.i18n import _ - -from compute_hyperv.nova import constants - -# Note(lpetrut): Eventlet greenpipes are not supported on Windows. The named -# pipe handlers implemented in os-win use Windows API calls which can block -# the whole thread. In order to avoid this, those workers run in separate -# 'native' threads. -# -# As this proxy communicates with those workers via queues, the serial console -# proxy workers have to run in 'native' threads as well. -threading = patcher.original('threading') - - -def handle_socket_errors(func): - @functools.wraps(func) - def wrapper(self, *args, **kwargs): - try: - return func(self, *args, **kwargs) - except socket.error: - self._client_connected.clear() - return wrapper - - -class SerialProxy(threading.Thread): - def __init__(self, instance_name, addr, port, input_queue, - output_queue, client_connected): - super(SerialProxy, self).__init__() - self.setDaemon(True) - - self._instance_name = instance_name - self._addr = addr - self._port = port - self._conn = None - - self._input_queue = input_queue - self._output_queue = output_queue - self._client_connected = client_connected - self._stopped = threading.Event() - - def _setup_socket(self): - try: - self._sock = socket.socket(socket.AF_INET, - socket.SOCK_STREAM) - self._sock.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, - 1) - self._sock.bind((self._addr, self._port)) - self._sock.listen(1) - except socket.error as err: - self._sock.close() - msg = (_('Failed to initialize serial proxy on ' - '%(addr)s:%(port)s, handling connections ' - 'to instance %(instance_name)s. Error: %(error)s') % - {'addr': self._addr, - 'port': self._port, - 'instance_name': self._instance_name, - 'error': err}) - raise exception.NovaException(msg) - - def stop(self): - self._stopped.set() - self._client_connected.clear() - if self._conn: - self._conn.shutdown(socket.SHUT_RDWR) - self._conn.close() - self._sock.close() - - def run(self): - self._setup_socket() - while not self._stopped.isSet(): - self._accept_conn() - - @handle_socket_errors - def _accept_conn(self): - self._conn, client_addr = self._sock.accept() - self._client_connected.set() - - workers = [] - for job in [self._get_data, self._send_data]: - worker = threading.Thread(target=job) - worker.setDaemon(True) - worker.start() - workers.append(worker) - - for worker in workers: - worker_running = (worker.is_alive() and - worker is not threading.current_thread()) - if worker_running: - worker.join() - - self._conn.close() - self._conn = None - - @handle_socket_errors - def _get_data(self): - while self._client_connected.isSet(): - data = self._conn.recv(constants.SERIAL_CONSOLE_BUFFER_SIZE) - if not data: - self._client_connected.clear() - return - self._input_queue.put(data) - - @handle_socket_errors - def _send_data(self): - while self._client_connected.isSet(): - data = self._output_queue.get_burst() - if data: - self._conn.sendall(data) diff --git a/compute_hyperv/nova/snapshotops.py b/compute_hyperv/nova/snapshotops.py deleted file mode 100644 index f3beb8d7..00000000 --- a/compute_hyperv/nova/snapshotops.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for VM snapshot operations. -""" -import os - -from nova.compute import task_states -from nova.compute import utils as compute_utils -from nova import exception -from nova.image import glance -from nova import utils -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_log import log as logging - -from compute_hyperv.nova import constants -from compute_hyperv.nova import pathutils - -LOG = logging.getLogger(__name__) - - -class SnapshotOps(object): - def __init__(self): - self._pathutils = pathutils.PathUtils() - self._vmutils = utilsfactory.get_vmutils() - self._vhdutils = utilsfactory.get_vhdutils() - - def _save_glance_image(self, context, image_id, image_vhd_path): - image_format = self._vhdutils.get_vhd_format(image_vhd_path).lower() - - (glance_image_service, - image_id) = glance.get_remote_image_service(context, image_id) - image_metadata = {"disk_format": image_format, - "container_format": "bare"} - with self._pathutils.open(image_vhd_path, 'rb') as f: - with compute_utils.disk_ops_semaphore: - glance_image_service.update(context, image_id, - image_metadata, f, - purge_props=False) - - def snapshot(self, context, instance, image_id, update_task_state): - # This operation is not fully preemptive at the moment. We're locking - # it as well as the destroy operation (if configured to do so). - @utils.synchronized(constants.SNAPSHOT_LOCK_TEMPLATE % - dict(instance_uuid=instance.uuid)) - def instance_synchronized_snapshot(): - self._snapshot(context, instance, image_id, update_task_state) - - try: - instance_synchronized_snapshot() - except os_win_exc.HyperVVMNotFoundException: - # the instance might disappear before starting the operation. - raise exception.InstanceNotFound(instance_id=instance.uuid) - - def _snapshot(self, context, instance, image_id, update_task_state): - """Create snapshot from a running VM instance.""" - instance_name = instance.name - - LOG.debug("Creating snapshot for instance %s", instance_name) - snapshot_path = self._vmutils.take_vm_snapshot(instance_name) - update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) - - export_dir = None - - try: - src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) - - LOG.debug("Getting info for VHD %s", src_vhd_path) - src_base_disk_path = self._vhdutils.get_vhd_parent_path( - src_vhd_path) - - export_dir = self._pathutils.get_export_dir( - instance_name, create_dir=True, remove_dir=True) - - dest_vhd_path = os.path.join(export_dir, os.path.basename( - src_vhd_path)) - LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s', - {'src_vhd_path': src_vhd_path, - 'dest_vhd_path': dest_vhd_path}) - self._pathutils.copyfile(src_vhd_path, dest_vhd_path) - - image_vhd_path = None - if not src_base_disk_path: - image_vhd_path = dest_vhd_path - else: - basename = os.path.basename(src_base_disk_path) - dest_base_disk_path = os.path.join(export_dir, basename) - LOG.debug('Copying base disk %(src_vhd_path)s to ' - '%(dest_base_disk_path)s', - {'src_vhd_path': src_vhd_path, - 'dest_base_disk_path': dest_base_disk_path}) - self._pathutils.copyfile(src_base_disk_path, - dest_base_disk_path) - - LOG.debug("Reconnecting copied base VHD " - "%(dest_base_disk_path)s and diff " - "VHD %(dest_vhd_path)s", - {'dest_base_disk_path': dest_base_disk_path, - 'dest_vhd_path': dest_vhd_path}) - self._vhdutils.reconnect_parent_vhd(dest_vhd_path, - dest_base_disk_path) - - LOG.debug("Merging diff disk %s into its parent.", - dest_vhd_path) - self._vhdutils.merge_vhd(dest_vhd_path) - image_vhd_path = dest_base_disk_path - - LOG.debug("Updating Glance image %(image_id)s with content from " - "merged disk %(image_vhd_path)s", - {'image_id': image_id, 'image_vhd_path': image_vhd_path}) - update_task_state(task_state=task_states.IMAGE_UPLOADING, - expected_state=task_states.IMAGE_PENDING_UPLOAD) - self._save_glance_image(context, image_id, image_vhd_path) - - LOG.debug("Snapshot image %(image_id)s updated for VM " - "%(instance_name)s", - {'image_id': image_id, 'instance_name': instance_name}) - finally: - try: - LOG.debug("Removing snapshot %s", image_id) - self._vmutils.remove_vm_snapshot(snapshot_path) - except Exception: - LOG.exception('Failed to remove snapshot for VM %s', - instance_name, instance=instance) - if export_dir: - LOG.debug('Removing directory: %s', export_dir) - self._pathutils.rmtree(export_dir) diff --git a/compute_hyperv/nova/utils/__init__.py b/compute_hyperv/nova/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/nova/utils/placement.py b/compute_hyperv/nova/utils/placement.py deleted file mode 100644 index 41214f5c..00000000 --- a/compute_hyperv/nova/utils/placement.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import exception -from nova import objects -from nova.scheduler.client import report -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -CONSUMER_GENERATION_VERSION = "1.28" # Rocky - - -class PlacementUtils(object): - def __init__(self): - self.reportclient = report.SchedulerReportClient() - - def move_compute_node_allocations(self, context, instance, old_host, - new_host, merge_existing=True): - LOG.info("Moving instance allocations from compute node %s to %s.", - old_host, new_host, instance=instance) - - cn_uuid = objects.ComputeNode.get_by_host_and_nodename( - context, old_host, old_host).uuid - new_cn_uuid = objects.ComputeNode.get_by_host_and_nodename( - context, new_host, new_host).uuid - - self.move_allocations(context, instance.uuid, cn_uuid, - new_cn_uuid, - merge_existing=merge_existing) - - @report.retries - def move_allocations(self, context, consumer_uuid, old_rp_uuid, - new_rp_uuid, merge_existing=True): - allocs = self._get_allocs_for_consumer( - context, consumer_uuid, - version=CONSUMER_GENERATION_VERSION) - allocations = allocs['allocations'] - - if old_rp_uuid == new_rp_uuid: - LOG.debug("Requested to move allocations to the " - "same provider: %s.", old_rp_uuid) - return - - if old_rp_uuid not in allocations: - LOG.warning("Expected to find allocations referencing resource " - "provider %s for %s, but found none.", - old_rp_uuid, consumer_uuid) - return - - if merge_existing and new_rp_uuid in allocations: - LOG.info("Merging existing allocations for consumer %s on " - "provider %s: %s.", - consumer_uuid, new_rp_uuid, allocations) - self.merge_resources( - allocations[new_rp_uuid]['resources'], - allocations[old_rp_uuid]['resources']) - else: - if new_rp_uuid in allocations: - LOG.info("Replacing existing allocations for consumer %s " - "on provider %s: %s", - consumer_uuid, new_rp_uuid, allocations) - - allocations[new_rp_uuid] = allocations[old_rp_uuid] - - del allocations[old_rp_uuid] - self._put_allocs(context, consumer_uuid, allocs, - version=CONSUMER_GENERATION_VERSION) - - def _put_allocs(self, context, consumer_uuid, allocations, version=None): - url = '/allocations/%s' % consumer_uuid - r = self.reportclient.put(url, allocations, - version=version, - global_request_id=context.global_id) - if r.status_code != 204: - errors = r.json().get('errors') or [] - # NOTE(jaypipes): Yes, it sucks doing string comparison like this - # but we have no error codes, only error messages. - # TODO(gibi): Use more granular error codes when available - for err in errors: - if err.get('code') == 'placement.concurrent_update': - reason = ( - 'another process changed the resource providers ' - 'involved in our attempt to put allocations for ' - 'consumer %s' % consumer_uuid) - raise report.Retry('put_allocations', reason) - raise exception.AllocationUpdateFailed( - consumer_uuid=consumer_uuid, error=errors) - - def _get_allocs_for_consumer(self, context, consumer, version=None): - resp = self.reportclient.get('/allocations/%s' % consumer, - version=version, - global_request_id=context.global_id) - if not resp: - # TODO(efried): Use code/title/detail to make a better exception - raise exception.ConsumerAllocationRetrievalFailed( - consumer_uuid=consumer, error=resp.text) - - return resp.json() - - @staticmethod - def merge_resources(original_resources, new_resources, sign=1): - """Merge a list of new resources with existing resources. - - Either add the resources (if sign is 1) or subtract (if sign is -1). - If the resulting value is 0 do not include the resource in the results. - """ - - all_keys = set(original_resources.keys()) | set(new_resources.keys()) - for key in all_keys: - value = (original_resources.get(key, 0) + - (sign * new_resources.get(key, 0))) - if value: - original_resources[key] = value - else: - original_resources.pop(key, None) diff --git a/compute_hyperv/nova/vif.py b/compute_hyperv/nova/vif.py deleted file mode 100644 index e01e494b..00000000 --- a/compute_hyperv/nova/vif.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# Copyright 2013 Pedro Navarro Perez -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from nova import exception -from nova.i18n import _ -from nova.network import model -from nova.network import os_vif_util -import os_vif -from os_win import constants as os_win_const -from os_win import utilsfactory -from oslo_log import log as logging - -import compute_hyperv.nova.conf - -LOG = logging.getLogger(__name__) -CONF = compute_hyperv.nova.conf.CONF - - -class HyperVBaseVIFPlugin(object): - @abc.abstractmethod - def plug(self, instance, vif): - pass - - @abc.abstractmethod - def unplug(self, instance, vif): - pass - - -class HyperVNeutronVIFPlugin(HyperVBaseVIFPlugin): - """Neutron VIF plugin.""" - - def plug(self, instance, vif): - # Neutron takes care of plugging the port - pass - - def unplug(self, instance, vif): - # Neutron takes care of unplugging the port - pass - - -class HyperVNovaNetworkVIFPlugin(HyperVBaseVIFPlugin): - """Nova network VIF plugin.""" - - def __init__(self): - self._netutils = utilsfactory.get_networkutils() - - def plug(self, instance, vif): - self._netutils.connect_vnic_to_vswitch(CONF.hyperv.vswitch_name, - vif['id']) - - def unplug(self, instance, vif): - # TODO(alepilotti) Not implemented - pass - - -class HyperVVIFDriver(object): - def __init__(self): - self._metricsutils = utilsfactory.get_metricsutils() - self._netutils = utilsfactory.get_networkutils() - self._vmutils = utilsfactory.get_vmutils() - self._vif_plugin = HyperVNeutronVIFPlugin() - - def plug(self, instance, vif): - vif_type = vif['type'] - if vif_type == model.VIF_TYPE_HYPERV: - self._vif_plugin.plug(instance, vif) - elif vif_type == model.VIF_TYPE_OVS: - reason = _("OVS is no longer supported. Please consider using " - "the networking-hyperv agent.") - raise exception.VirtualInterfacePlugException(reason) - else: - reason = _("Failed to plug virtual interface: " - "unexpected vif_type=%s") % vif_type - raise exception.VirtualInterfacePlugException(reason) - - def unplug(self, instance, vif): - vif_type = vif['type'] - if vif_type == model.VIF_TYPE_HYPERV: - self._vif_plugin.unplug(instance, vif) - elif vif_type == model.VIF_TYPE_OVS: - vif = os_vif_util.nova_to_osvif_vif(vif) - instance = os_vif_util.nova_to_osvif_instance(instance) - os_vif.unplug(vif, instance) - else: - reason = _("unexpected vif_type=%s") % vif_type - raise exception.VirtualInterfaceUnplugException(reason=reason) - - def enable_metrics(self, instance_name, vif_id): - # Hyper-V's metric collection API is extremely inconsistent. - # As opposed to other metrics, network metrics have to be enabled - # whenever the vm starts. Attempting to do so while the vm is shut off - # will fail. Also, this option gets reset when the vm is rebooted. - # - # Note that meter ACLs must already be set on the specified port. - # For "hyperv" ports, this is handled by networking-hyperv, while - # for OVS ports, we're doing it on the Nova side. - vm_state = self._vmutils.get_vm_state(instance_name) - if vm_state in [os_win_const.HYPERV_VM_STATE_ENABLED, - os_win_const.HYPERV_VM_STATE_PAUSED]: - LOG.debug("Enabling instance port metrics. " - "Instance name: %(instance_name)s. " - "Port name: %(port_name)s.", - dict(instance_name=instance_name, - port_name=vif_id)) - self._metricsutils.enable_port_metrics_collection(vif_id) - else: - LOG.debug("Instance %s is not running. Port metrics will " - "be enabled when the instance starts.", - instance_name) diff --git a/compute_hyperv/nova/vmops.py b/compute_hyperv/nova/vmops.py deleted file mode 100644 index db656234..00000000 --- a/compute_hyperv/nova/vmops.py +++ /dev/null @@ -1,1500 +0,0 @@ -# Copyright (c) 2010 Cloud.com, Inc -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for basic VM operations. -""" -import contextlib -import functools -import os -import time - -from eventlet import timeout as etimeout -from nova.api.metadata import base as instance_metadata -from nova.compute import task_states -from nova.compute import vm_states -from nova import exception -from nova import objects -from nova.objects import fields -from nova import utils -from nova import version -from nova.virt import configdrive -from nova.virt import event as virtevent -from nova.virt import hardware -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import fileutils -from oslo_utils import units -from oslo_utils import uuidutils -import six - -from compute_hyperv.i18n import _ -from compute_hyperv.nova import block_device_manager -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import imagecache -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import pdk -from compute_hyperv.nova import serialconsoleops -from compute_hyperv.nova import vif as vif_utils -from compute_hyperv.nova import volumeops - -LOG = logging.getLogger(__name__) - -CONF = compute_hyperv.nova.conf.CONF - -SHUTDOWN_TIME_INCREMENT = 5 -REBOOT_TYPE_SOFT = 'SOFT' -REBOOT_TYPE_HARD = 'HARD' - -VM_GENERATIONS = { - constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1, - constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2 -} - -VM_GENERATIONS_CONTROLLER_TYPES = { - constants.VM_GEN_1: constants.CTRL_TYPE_IDE, - constants.VM_GEN_2: constants.CTRL_TYPE_SCSI -} - - -def check_admin_permissions(function): - @functools.wraps(function) - def wrapper(self, *args, **kwds): - - # Make sure the windows account has the required admin permissions. - self._vmutils.check_admin_permissions() - return function(self, *args, **kwds) - return wrapper - - -class VMOps(object): - _ROOT_DISK_CTRL_ADDR = 0 - - def __init__(self, virtapi=None): - self._virtapi = virtapi - self._vmutils = utilsfactory.get_vmutils() - self._metricsutils = utilsfactory.get_metricsutils() - self._vhdutils = utilsfactory.get_vhdutils() - self._hostutils = utilsfactory.get_hostutils() - self._migrutils = utilsfactory.get_migrationutils() - self._pathutils = pathutils.PathUtils() - self._volumeops = volumeops.VolumeOps() - self._imagecache = imagecache.ImageCache() - self._vif_driver_cache = {} - self._serial_console_ops = serialconsoleops.SerialConsoleOps() - self._block_dev_man = ( - block_device_manager.BlockDeviceInfoManager()) - self._vif_driver = vif_utils.HyperVVIFDriver() - self._pdk = pdk.PDK() - - def list_instance_uuids(self): - instance_uuids = [] - for (instance_name, notes) in self._vmutils.list_instance_notes(): - if notes and uuidutils.is_uuid_like(notes[0]): - instance_uuids.append(str(notes[0])) - else: - LOG.debug("Notes not found or not resembling a GUID for " - "instance: %s", instance_name) - return instance_uuids - - def list_instances(self): - return self._vmutils.list_instances() - - def get_info(self, instance): - """Get information about the VM.""" - LOG.debug("get_info called for instance", instance=instance) - - instance_name = instance.name - if not self._vmutils.vm_exists(instance_name): - raise exception.InstanceNotFound(instance_id=instance.uuid) - - info = self._vmutils.get_vm_summary_info(instance_name) - - state = constants.HYPERV_POWER_STATE[info['EnabledState']] - return hardware.InstanceInfo(state=state) - - def _create_root_device(self, context, instance, root_disk_info, vm_gen): - path = None - if root_disk_info['type'] == constants.DISK: - path = self._create_root_vhd(context, instance) - self.check_vm_image_type(instance.uuid, vm_gen, path) - elif root_disk_info['type'] == constants.DVD: - path = self._create_root_iso(context, instance) - root_disk_info['path'] = path - - def _create_root_iso(self, context, instance): - root_iso_path_cached = self._imagecache.get_cached_image(context, - instance) - root_iso_path = self._pathutils.get_root_vhd_path(instance.name, 'iso') - - if not os.path.exists(root_iso_path): - self._pathutils.copyfile(root_iso_path_cached, root_iso_path) - else: - LOG.info("Root iso '%s' already exists. Reusing it.", - root_iso_path) - - return root_iso_path - - def _create_root_vhd(self, context, instance, rescue_image_id=None): - is_rescue_vhd = rescue_image_id is not None - cache_image = instance.vm_state != vm_states.SHELVED_OFFLOADED - - if cache_image: - base_vhd_path = self._imagecache.get_cached_image(context, - instance, - rescue_image_id) - format_ext = base_vhd_path.split('.')[-1] - root_vhd_path = self._pathutils.get_root_vhd_path(instance.name, - format_ext, - is_rescue_vhd) - if os.path.exists(root_vhd_path): - LOG.info("Root vhd '%s' already exists. Reusing it.", - root_vhd_path) - return root_vhd_path - else: - LOG.debug("Unshelving instance, avoiding image cache.") - base_vhd_path = None - root_vhd_path = self._pathutils.get_root_vhd_path( - instance.name, - None, - is_rescue_vhd) - self._imagecache.fetch( - context, instance.image_ref, root_vhd_path, - instance.trusted_certs) - glance_img_fmt = self._imagecache.get_image_format( - context, instance.image_ref, instance) - root_vhd_path = self._imagecache.append_image_format( - root_vhd_path, glance_img_fmt) - - try: - if cache_image: - # When unshelving instances, we're using temporary snapshots - # that shouln't be cached. - if CONF.use_cow_images: - LOG.debug("Creating differencing VHD. Parent: " - "%(base_vhd_path)s, Target: %(root_vhd_path)s", - {'base_vhd_path': base_vhd_path, - 'root_vhd_path': root_vhd_path}, - instance=instance) - self._vhdutils.create_differencing_vhd(root_vhd_path, - base_vhd_path) - vhd_type = self._vhdutils.get_vhd_format(base_vhd_path) - if vhd_type == constants.DISK_FORMAT_VHD: - # The base image has already been resized. As - # differencing vhdx images support it, the root image - # will be resized instead if needed. - return root_vhd_path - else: - LOG.debug("Copying VHD image %(base_vhd_path)s to target: " - "%(root_vhd_path)s", - {'base_vhd_path': base_vhd_path, - 'root_vhd_path': root_vhd_path}, - instance=instance) - self._pathutils.copyfile(base_vhd_path, root_vhd_path) - - root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path) - root_vhd_size = root_vhd_info['VirtualSize'] - flavor_size = instance.flavor.root_gb * units.Gi - flavor_internal_size = ( - self._vhdutils.get_internal_vhd_size_by_file_size( - base_vhd_path or root_vhd_path, flavor_size)) - - if not is_rescue_vhd and self._is_resize_needed( - root_vhd_path, root_vhd_size, - flavor_internal_size, instance): - self._vhdutils.resize_vhd(root_vhd_path, - flavor_internal_size, - is_file_max_size=False) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(root_vhd_path): - self._pathutils.remove(root_vhd_path) - - return root_vhd_path - - def _is_resize_needed(self, vhd_path, old_size, new_size, instance): - if new_size < old_size: - raise exception.FlavorDiskSmallerThanImage( - flavor_size=new_size, image_size=old_size) - elif new_size > old_size: - LOG.debug("Resizing VHD %(vhd_path)s to new " - "size %(new_size)s", - {'new_size': new_size, - 'vhd_path': vhd_path}, - instance=instance) - return True - return False - - def _create_ephemerals(self, instance, ephemerals): - for index, eph in enumerate(ephemerals): - eph['format'] = self._vhdutils.get_best_supported_vhd_format() - eph_name = "eph%s" % index - eph['path'] = self._pathutils.get_ephemeral_vhd_path( - instance.name, eph['format'], eph_name) - self.create_ephemeral_disk(instance.name, eph) - - def create_ephemeral_disk(self, instance_name, eph_info): - if not os.path.exists(eph_info['path']): - self._vhdutils.create_dynamic_vhd(eph_info['path'], - eph_info['size'] * units.Gi) - else: - LOG.info("Ephemeral '%s' disk already exists. Reusing it.", - eph_info['path']) - - def get_attached_ephemeral_disks(self, instance_name): - vm_image_disks = self._vmutils.get_vm_storage_paths( - instance_name)[0] - return [image_path for image_path in vm_image_disks - if os.path.basename(image_path).lower().startswith('eph')] - - @staticmethod - def _get_vif_metadata(context, instance_id): - vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context, - instance_id) - vif_metadata = [] - for vif in vifs: - if 'tag' in vif and vif.tag: - device = objects.NetworkInterfaceMetadata( - mac=vif.address, - bus=objects.PCIDeviceBus(), - tags=[vif.tag]) - vif_metadata.append(device) - - return vif_metadata - - def update_device_metadata(self, context, instance): - """Builds a metadata object for instance devices, that maps the user - provided tag to the hypervisor assigned device address. - """ - metadata = [] - - metadata.extend(self._get_vif_metadata(context, instance.uuid)) - metadata.extend(self._block_dev_man.get_bdm_metadata(context, - instance)) - - instance.device_metadata = objects.InstanceDeviceMetadata( - devices=metadata) - instance.save() - - def set_boot_order(self, instance_name, vm_gen, block_device_info): - boot_order = self._block_dev_man.get_boot_order( - vm_gen, block_device_info) - LOG.debug("Setting boot order for instance: %(instance_name)s: " - "%(boot_order)s", {'instance_name': instance_name, - 'boot_order': boot_order}) - - self._vmutils.set_boot_order(instance_name, boot_order) - - @check_admin_permissions - def spawn(self, context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info=None, - power_on=True): - """Create a new VM and start it.""" - LOG.info("Spawning new instance", instance=instance) - - instance_name = instance.name - if self._vmutils.vm_exists(instance_name): - raise exception.InstanceExists(name=instance_name) - - vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) - - instance_dir = self._pathutils.get_instance_dir(instance.name, - create_dir=False) - if os.path.exists(instance_dir): - LOG.info("Instance directory already exists." - "Reusing existing files.") - - self._block_dev_man.validate_and_update_bdi( - instance, image_meta, vm_gen, block_device_info) - root_device = block_device_info['root_disk'] - self._create_root_device(context, instance, root_device, vm_gen) - self._create_ephemerals(instance, block_device_info['ephemerals']) - - try: - with self.wait_vif_plug_events(instance, network_info): - # waiting will occur after the instance is created. - self.create_instance(context, instance, network_info, - block_device_info, vm_gen, image_meta) - self.plug_vifs(instance, network_info) - - self.update_device_metadata(context, instance) - - if configdrive.required_by(instance): - configdrive_path = self._create_config_drive(context, - instance, - injected_files, - admin_password, - network_info) - - self.attach_config_drive(instance, configdrive_path, vm_gen) - self.set_boot_order(instance.name, vm_gen, block_device_info) - # vifs are already plugged in at this point. We waited on the vif - # plug event previously when we created the instance. Skip the - # plug vifs during power on in this case - if power_on: - self.power_on(instance, - network_info=network_info, - should_plug_vifs=False) - except Exception: - with excutils.save_and_reraise_exception(): - self.destroy(instance, network_info, block_device_info) - - @contextlib.contextmanager - def wait_vif_plug_events(self, instance, network_info): - timeout = CONF.vif_plugging_timeout - - try: - # NOTE(claudiub): async calls to bind the neutron ports will be - # done when network_info is being accessed. - events = self._get_neutron_events(network_info) - with self._virtapi.wait_for_instance_event( - instance, events, deadline=timeout, - error_callback=self._neutron_failed_callback): - yield - except etimeout.Timeout: - # We never heard from Neutron - LOG.warning('Timeout waiting for vif plugging callback for ' - 'instance.', instance=instance) - if CONF.vif_plugging_is_fatal: - raise exception.VirtualInterfaceCreateException() - except exception.PortBindingFailed: - LOG.warning( - "Neutron failed to bind a port to this host. Make sure that " - "an L2 agent is registered on this node and alive (Neutron " - "Open vSwitch agent or Hyper-V agent), or that Neutron is " - "configured with a mechanism driver that is able to bind " - "ports without requiring an L2 agent on this host (e.g. OVN " - "mechanism driver). If you're using Neutron Hyper-V agent, " - "make sure that networking-hyperv is installed on the " - "Neutron controller and that the neutron-server service was " - "configured to use the 'hyperv' mechanism_driver.") - raise - - def _neutron_failed_callback(self, event_name, instance): - LOG.error('Neutron Reported failure on event %s', - event_name, instance=instance) - if CONF.vif_plugging_is_fatal: - raise exception.VirtualInterfaceCreateException() - - def _get_neutron_events(self, network_info): - # NOTE(danms): We need to collect any VIFs that are currently - # down that we expect a down->up event for. Anything that is - # already up will not undergo that transition, and for - # anything that might be stale (cache-wise) assume it's - # already up so we don't block on it. - if CONF.vif_plugging_timeout: - return [('network-vif-plugged', vif['id']) - for vif in network_info if vif.get('active') is False] - return [] - - def create_instance(self, context, instance, network_info, - block_device_info, vm_gen, image_meta): - root_device = block_device_info['root_disk'] - instance_name = instance.name - instance_path = os.path.join(CONF.instances_path, instance_name) - secure_boot_enabled = self._requires_secure_boot(instance, image_meta, - vm_gen) - - memory_per_numa_node, cpus_per_numa_node = ( - self._get_instance_vnuma_config(instance, image_meta)) - vnuma_enabled = bool(memory_per_numa_node) - - self._vmutils.create_vm(instance_name, - vnuma_enabled, - vm_gen, - instance_path, - [instance.uuid]) - - self.configure_remotefx(instance, vm_gen) - - self._vmutils.create_scsi_controller(instance_name) - self._attach_root_device(context, instance, root_device) - self.attach_ephemerals(instance_name, block_device_info['ephemerals']) - self._volumeops.attach_volumes( - context, block_device_info['block_device_mapping'], instance) - - serial_ports = self._get_image_serial_port_settings(image_meta) - self._create_vm_com_port_pipes(instance, serial_ports) - - for vif in network_info: - LOG.debug('Creating nic for instance', instance=instance) - self._vmutils.create_nic(instance_name, - vif['id'], - vif['address']) - - self.configure_instance_metrics(instance_name) - - if secure_boot_enabled: - certificate_required = self._requires_certificate(image_meta) - self._vmutils.enable_secure_boot( - instance.name, msft_ca_required=certificate_required) - - self._configure_secure_vm(context, instance, image_meta, - secure_boot_enabled) - - self.update_vm_resources(instance, vm_gen, image_meta) - - def update_vm_resources(self, instance, vm_gen, image_meta, - instance_path=None, is_resize=False): - """Updates the VM's reconfigurable resources.""" - memory_per_numa_node, cpus_per_numa_node = ( - self._get_instance_vnuma_config(instance, image_meta)) - vnuma_enabled = bool(memory_per_numa_node) - nested_virt_enabled = self._requires_nested_virt(instance, image_meta) - - dynamic_memory_ratio = self._get_instance_dynamic_memory_ratio( - instance, vnuma_enabled, nested_virt_enabled) - - if (instance.pci_requests.requests and not - CONF.hyperv.instance_automatic_shutdown): - # NOTE(claudiub): if the instance requires PCI devices, its - # host shutdown action MUST be shutdown. - LOG.info("Instance automatic shutdown is disabled but " - "passthrough PCI devices were requested. " - "Setting instance automatic shutdown.") - - automatic_shutdown = (CONF.hyperv.instance_automatic_shutdown or - instance.pci_requests.requests) - host_shutdown_action = ( - os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN - if automatic_shutdown - else None) - - self._vmutils.update_vm(instance.name, - instance.flavor.memory_mb, - memory_per_numa_node, - instance.flavor.vcpus, - cpus_per_numa_node, - CONF.hyperv.limit_cpu_features, - dynamic_memory_ratio, - configuration_root_dir=instance_path, - host_shutdown_action=host_shutdown_action, - vnuma_enabled=vnuma_enabled, - chassis_asset_tag=version.product_string()) - - self._set_instance_disk_qos_specs(instance, is_resize) - self._attach_pci_devices(instance, is_resize) - if nested_virt_enabled: - # NOTE(claudiub): We might not want to disable nested - # virtualization. If it was enabled, the guest will most probably - # have Hyper-V enabled + nested VMs, which will break if nested - # virtualization is disabled. - self._vmutils.set_nested_virtualization(instance.name, - state=nested_virt_enabled) - - def _attach_pci_devices(self, instance, is_resize): - if is_resize: - # NOTE(claudiub): there is no way to tell which devices to add when - # considering the old flavor. We need to remove all the PCI devices - # and then reattach them according to the new flavor. - self._vmutils.remove_all_pci_devices(instance.name) - - for pci_request in instance.pci_requests.requests: - spec = pci_request.spec[0] - for counter in range(pci_request.count): - self._vmutils.add_pci_device(instance.name, - spec['vendor_id'], - spec['product_id']) - - def _get_instance_vnuma_config(self, instance, image_meta): - """Returns the appropriate NUMA configuration for Hyper-V instances, - given the desired instance NUMA topology. - - :param instance: instance containing the flavor and it's extra_specs, - where the NUMA topology is defined. - :param image_meta: image's metadata, containing properties related to - the instance's NUMA topology. - :returns: memory amount and number of vCPUs per NUMA node or - (None, None), if instance NUMA topology was not requested. - :raises exception.InstanceUnacceptable: - If the given instance NUMA topology is not possible on Hyper-V. - """ - image_meta = objects.ImageMeta.from_dict(image_meta) - instance_topology = hardware.numa_get_constraints(instance.flavor, - image_meta) - if not instance_topology: - # instance NUMA topology was not requested. - return None, None - - memory_per_numa_node = instance_topology.cells[0].memory - cpus_per_numa_node = len(instance_topology.cells[0].cpuset) - - # validate that the requested NUMA topology is not asymetric. - # e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y. - # same with memory. - for cell in instance_topology.cells: - if len(cell.cpuset) != cpus_per_numa_node: - reason = _("Hyper-V does not support NUMA topologies with " - "uneven number of processors. (%(a)s != %(b)s)") % { - 'a': len(cell.cpuset), 'b': cpus_per_numa_node} - raise exception.InstanceUnacceptable(reason=reason, - instance_id=instance.uuid) - if cell.memory != memory_per_numa_node: - reason = _("Hyper-V does not support NUMA topologies with " - "uneven amounts of memory. (%(a)s != %(b)s)") % { - 'a': cell.memory, 'b': memory_per_numa_node} - raise exception.InstanceUnacceptable(reason=reason, - instance_id=instance.uuid) - - return memory_per_numa_node, cpus_per_numa_node - - def _get_instance_dynamic_memory_ratio(self, instance, vnuma_enabled, - nested_virt_enabled): - dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio - if vnuma_enabled: - LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning " - "has to be disabled in order for the instance to " - "benefit from it.", instance=instance) - if CONF.hyperv.dynamic_memory_ratio > 1.0: - LOG.warning( - "Instance vNUMA topology requested, but dynamic memory " - "ratio is higher than 1.0 in nova.conf. Ignoring dynamic " - "memory ratio option.", instance=instance) - dynamic_memory_ratio = 1.0 - - if nested_virt_enabled and dynamic_memory_ratio != 1: - # NOTE(claudiub): instances requiring nested virtualization cannot - # have dynamic memory. Set dynamic memory ratio to 1 for the - # instance. (disabled) - LOG.warning("Instance %s requires nested virtualization, but " - "host is configured with dynamic memory " - "allocation. Creating instance without dynamic " - "memory allocation.", instance.uuid) - dynamic_memory_ratio = 1.0 - - return dynamic_memory_ratio - - def configure_remotefx(self, instance, vm_gen, is_resize=False): - """Configures RemoteFX for the given instance. - - The given instance must be a realized VM before changing any RemoteFX - configurations. - """ - extra_specs = instance.flavor.extra_specs - remotefx_max_resolution = extra_specs.get( - constants.FLAVOR_ESPEC_REMOTEFX_RES) - if not remotefx_max_resolution: - # RemoteFX not required. - if is_resize and instance.old_flavor.extra_specs.get( - constants.FLAVOR_ESPEC_REMOTEFX_RES): - # the instance was resized from a RemoteFX flavor to one - # without RemoteFX. We need to disable RemoteFX on the - # instance. - self._vmutils.disable_remotefx_video_adapter(instance.name) - return - - if not CONF.hyperv.enable_remotefx: - raise exception.InstanceUnacceptable( - _("enable_remotefx configuration option needs to be set to " - "True in order to use RemoteFX.")) - - if not self._hostutils.check_server_feature( - self._hostutils.FEATURE_RDS_VIRTUALIZATION): - raise exception.InstanceUnacceptable( - _("The RDS-Virtualization feature must be installed in order " - "to use RemoteFX.")) - - if not self._vmutils.vm_gen_supports_remotefx(vm_gen): - raise exception.InstanceUnacceptable( - _("RemoteFX is not supported on generation %s virtual " - "machines on this version of Windows.") % vm_gen) - - instance_name = instance.name - LOG.debug('Configuring RemoteFX for instance: %s', instance_name) - - remotefx_monitor_count = int(extra_specs.get( - constants.FLAVOR_ESPEC_REMOTEFX_MONITORS) or 1) - remotefx_vram = extra_specs.get( - constants.FLAVOR_ESPEC_REMOTEFX_VRAM) - vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None - - self._vmutils.enable_remotefx_video_adapter( - instance_name, - remotefx_monitor_count, - remotefx_max_resolution, - vram_bytes) - - def _attach_root_device(self, context, instance, root_dev_info): - if root_dev_info['type'] == constants.VOLUME: - self._volumeops.attach_volume(context, - root_dev_info['connection_info'], - instance, - disk_bus=root_dev_info['disk_bus']) - else: - self._attach_drive(instance.name, root_dev_info['path'], - root_dev_info['drive_addr'], - root_dev_info['ctrl_disk_addr'], - root_dev_info['disk_bus'], - root_dev_info['type']) - - def attach_ephemerals(self, instance_name, ephemerals): - for eph in ephemerals: - # if an ephemeral doesn't have a path, it might have been removed - # during resize. - if eph.get('path'): - self._attach_drive( - instance_name, eph['path'], eph['drive_addr'], - eph['ctrl_disk_addr'], eph['disk_bus'], - constants.BDI_DEVICE_TYPE_TO_DRIVE_TYPE[ - eph['device_type']]) - - # This may be an ephemeral added by default by us, in which - # case there won't be a bdm object. - bdm_obj = getattr(eph, '_bdm_obj', None) - if bdm_obj: - filename = os.path.basename(eph['path']) - self._block_dev_man.update_bdm_connection_info( - eph._bdm_obj, eph_filename=filename) - - def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr, - controller_type, drive_type=constants.DISK): - if controller_type == constants.CTRL_TYPE_SCSI: - self._vmutils.attach_scsi_drive(instance_name, path, drive_type) - else: - self._vmutils.attach_ide_drive(instance_name, path, drive_addr, - ctrl_disk_addr, drive_type) - - def get_image_vm_generation(self, instance_id, image_meta): - image_props = image_meta['properties'] - default_vm_gen = self._hostutils.get_default_vm_generation() - image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN, - default_vm_gen) - if image_prop_vm not in self._hostutils.get_supported_vm_types(): - reason = _('Requested VM Generation %s is not supported on ' - 'this OS.') % image_prop_vm - raise exception.InstanceUnacceptable(instance_id=instance_id, - reason=reason) - - return VM_GENERATIONS[image_prop_vm] - - def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): - if (vm_gen != constants.VM_GEN_1 and root_vhd_path and - self._vhdutils.get_vhd_format( - root_vhd_path) == constants.DISK_FORMAT_VHD): - reason = _('Requested VM Generation %s, but provided VHD ' - 'instead of VHDX.') % vm_gen - raise exception.InstanceUnacceptable(instance_id=instance_id, - reason=reason) - - def _requires_certificate(self, image_meta): - os_type = image_meta.get('properties', {}).get('os_type', None) - if os_type == fields.OSType.WINDOWS: - return False - return True - - def _requires_secure_boot(self, instance, image_meta, vm_gen): - """Checks whether the given instance requires Secure Boot. - - Secure Boot feature will be enabled by setting the "os_secure_boot" - image property or the "os:secure_boot" flavor extra spec to required. - - :raises exception.InstanceUnacceptable: if the given image_meta has - no os_type property set, or if the image property value and the - flavor extra spec value are conflicting, or if Secure Boot is - required, but the instance's VM generation is 1. - """ - img_secure_boot = image_meta['properties'].get('os_secure_boot') - flavor_secure_boot = instance.flavor.extra_specs.get( - constants.FLAVOR_SPEC_SECURE_BOOT) - - requires_sb = False - conflicting_values = False - - if flavor_secure_boot == fields.SecureBoot.REQUIRED: - requires_sb = True - if img_secure_boot == fields.SecureBoot.DISABLED: - conflicting_values = True - elif img_secure_boot == fields.SecureBoot.REQUIRED: - requires_sb = True - if flavor_secure_boot == fields.SecureBoot.DISABLED: - conflicting_values = True - - if conflicting_values: - reason = _( - "Conflicting image metadata property and flavor extra_specs " - "values: os_secure_boot (%(image_secure_boot)s) / " - "os:secure_boot (%(flavor_secure_boot)s)") % { - 'image_secure_boot': img_secure_boot, - 'flavor_secure_boot': flavor_secure_boot} - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - - if requires_sb: - if vm_gen != constants.VM_GEN_2: - reason = _('Secure boot requires generation 2 VM.') - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - - os_type = image_meta['properties'].get('os_type') - if not os_type: - reason = _('For secure boot, os_type must be specified in ' - 'image properties.') - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - return requires_sb - - def _requires_nested_virt(self, instance, image_meta): - flavor_cpu_features = instance.flavor.extra_specs.get( - 'hw:cpu_features', '') - flavor_cpu_features = flavor_cpu_features.lower().split(',') - image_cpu_features = image_meta['properties'].get('hw_cpu_features', - '') - image_cpu_features = image_cpu_features.lower().split(',') - - if 'vmx' in flavor_cpu_features or 'vmx' in image_cpu_features: - if self._hostutils.supports_nested_virtualization(): - return True - - reason = _('Host does not support nested virtualization.') - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - return False - - def _create_config_drive(self, context, instance, injected_files, - admin_password, network_info, rescue=False): - if CONF.config_drive_format != 'iso9660': - raise exception.ConfigDriveUnsupportedFormat( - format=CONF.config_drive_format) - - LOG.info('Using config drive for instance', instance=instance) - - extra_md = {} - if admin_password and CONF.hyperv.config_drive_inject_password: - extra_md['admin_pass'] = admin_password - - inst_md = instance_metadata.InstanceMetadata( - instance, content=injected_files, extra_md=extra_md, - network_info=network_info) - - configdrive_path_iso = self._pathutils.get_configdrive_path( - instance.name, constants.DVD_FORMAT, rescue=rescue) - LOG.info('Creating config drive at %(path)s', - {'path': configdrive_path_iso}, instance=instance) - - with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: - try: - cdb.make_drive(configdrive_path_iso) - except processutils.ProcessExecutionError as e: - with excutils.save_and_reraise_exception(): - LOG.error('Creating config drive failed with error: %s', - e, instance=instance) - - if not CONF.hyperv.config_drive_cdrom: - configdrive_path = self._pathutils.get_configdrive_path( - instance.name, constants.DISK_FORMAT_VHD, rescue=rescue) - processutils.execute( - CONF.hyperv.qemu_img_cmd, - 'convert', - '-f', - 'raw', - '-O', - 'vpc', - configdrive_path_iso, - configdrive_path, - attempts=1) - self._pathutils.remove(configdrive_path_iso) - else: - configdrive_path = configdrive_path_iso - - return configdrive_path - - def attach_config_drive(self, instance, configdrive_path, vm_gen): - configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):] - # Do the attach here and if there is a certain file format that isn't - # supported in constants.DISK_FORMAT_MAP then bomb out. - try: - drive_type = constants.DISK_FORMAT_MAP[configdrive_ext] - controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] - self._attach_drive(instance.name, configdrive_path, 1, 0, - controller_type, drive_type) - except KeyError: - raise exception.InvalidDiskFormat(disk_format=configdrive_ext) - - def _detach_config_drive(self, instance_name, rescue=False, delete=False): - configdrive_path = self._pathutils.lookup_configdrive_path( - instance_name, rescue=rescue) - - if configdrive_path: - self._vmutils.detach_vm_disk(instance_name, - configdrive_path, - is_physical=False) - if delete: - self._pathutils.remove(configdrive_path) - - @serialconsoleops.instance_synchronized - def _delete_disk_files(self, instance, instance_path=None, - cleanup_migration_files=True): - # We want to avoid the situation in which serial console workers - # are started while we perform this operation, preventing us from - # deleting the instance log files (bug #1556189). This can happen - # due to delayed instance lifecycle events. - # - # The unsynchronized method is being used to avoid a deadlock. - self._serial_console_ops.stop_console_handler_unsync(instance.name) - - # This may be a 'non-default' location. - if not instance_path: - instance_path = self._pathutils.get_instance_dir(instance.name) - - self._pathutils.check_remove_dir(instance_path) - - if cleanup_migration_files: - self._pathutils.get_instance_migr_revert_dir( - instance_path, remove_dir=True) - - backup_location = instance.system_metadata.get('backup_location') - if backup_location: - self._pathutils.check_remove_dir(backup_location) - - def destroy(self, instance, *args, **kwargs): - # Nova allows destroying instances regardless of pending tasks. - # In some cases, we may not be able to properly delete instances - # while having a pending task (e.g. when snapshotting, due to file - # locks). - # - # We may append other locks for operations that are non preemptive. - # We should not rely on instance task states, which may be hanging. - @utils.synchronized(constants.SNAPSHOT_LOCK_TEMPLATE % - dict(instance_uuid=instance.uuid)) - def synchronized_destroy(): - self._destroy(instance, *args, **kwargs) - - if CONF.hyperv.force_destroy_instances: - self._destroy(instance, *args, **kwargs) - else: - synchronized_destroy() - - def _destroy(self, instance, network_info, block_device_info, - destroy_disks=True, cleanup_migration_files=True): - instance_name = instance.name - LOG.info("Got request to destroy instance", instance=instance) - - # Get the instance folder before destroying it. In some cases, - # we won't be able to retrieve it otherwise. - instance_path = self._pathutils.get_instance_dir(instance.name, - create_dir=False) - - # When reverting resizes, the manager will request instance files - # cleanup to be skipped if the hosts use shared storage, in which - # case we'd leak files if multiple CSVs are used. It's safe to - # cleanup the instance files when reverting resizes as long as we - # preserve the "*_revert" dir. - if instance.task_state == task_states.RESIZE_REVERTING: - destroy_disks = True - cleanup_migration_files = False - - try: - if self._vmutils.vm_exists(instance_name): - - # Stop the VM first. - self._vmutils.stop_vm_jobs(instance_name) - self.power_off(instance) - self._vmutils.destroy_vm(instance_name) - elif self._migrutils.planned_vm_exists(instance_name): - self._migrutils.destroy_existing_planned_vm(instance_name) - else: - LOG.debug("Instance not found", instance=instance) - - # NOTE(claudiub): The vifs should be unplugged and the volumes - # should be disconnected even if the VM doesn't exist anymore, - # so they are not leaked. - self.unplug_vifs(instance, network_info) - self._volumeops.disconnect_volumes(block_device_info) - - if destroy_disks: - self._delete_disk_files(instance, instance_path, - cleanup_migration_files) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to destroy instance: %s', instance_name) - - def reboot(self, instance, network_info, reboot_type): - """Reboot the specified instance.""" - LOG.debug("Rebooting instance", instance=instance) - - if reboot_type == REBOOT_TYPE_SOFT: - if self._soft_shutdown(instance): - self.power_on(instance, network_info=network_info) - return - - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_REBOOT) - - def _soft_shutdown(self, instance, - timeout=CONF.hyperv.wait_soft_reboot_seconds, - retry_interval=SHUTDOWN_TIME_INCREMENT): - """Perform a soft shutdown on the VM. - - :return: True if the instance was shutdown within time limit, - False otherwise. - """ - LOG.debug("Performing Soft shutdown on instance", instance=instance) - - while timeout > 0: - # Perform a soft shutdown on the instance. - # Wait maximum timeout for the instance to be shutdown. - # If it was not shutdown, retry until it succeeds or a maximum of - # time waited is equal to timeout. - wait_time = min(retry_interval, timeout) - try: - LOG.debug("Soft shutdown instance, timeout remaining: %d", - timeout, instance=instance) - self._vmutils.soft_shutdown_vm(instance.name) - if self._wait_for_power_off(instance.name, wait_time): - LOG.info("Soft shutdown succeeded.", - instance=instance) - return True - except os_win_exc.HyperVException as e: - # Exception is raised when trying to shutdown the instance - # while it is still booting. - LOG.debug("Soft shutdown failed: %s", e, instance=instance) - time.sleep(wait_time) - - timeout -= retry_interval - - LOG.warning("Timed out while waiting for soft shutdown.", - instance=instance) - return False - - def pause(self, instance): - """Pause VM instance.""" - LOG.debug("Pause instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_PAUSED) - - def unpause(self, instance): - """Unpause paused VM instance.""" - LOG.debug("Unpause instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_ENABLED) - - def suspend(self, instance): - """Suspend the specified instance.""" - LOG.debug("Suspend instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_SUSPENDED) - - def resume(self, instance): - """Resume the suspended VM instance.""" - LOG.debug("Resume instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_ENABLED) - - def power_off(self, instance, timeout=0, retry_interval=0): - """Power off the specified instance.""" - LOG.debug("Power off instance", instance=instance) - - # We must make sure that the console log workers are stopped, - # otherwise we won't be able to delete or move the VM log files. - self._serial_console_ops.stop_console_handler(instance.name) - - if retry_interval <= 0: - retry_interval = SHUTDOWN_TIME_INCREMENT - - try: - if timeout and self._soft_shutdown(instance, - timeout, - retry_interval): - return - - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_DISABLED) - except os_win_exc.HyperVVMNotFoundException: - # The manager can call the stop API after receiving instance - # power off events. If this is triggered when the instance - # is being deleted, it might attempt to power off an unexisting - # instance. We'll just pass in this case. - LOG.debug("Instance not found. Skipping power off", - instance=instance) - - def power_on(self, instance, block_device_info=None, network_info=None, - should_plug_vifs=True): - """Power on the specified instance.""" - LOG.debug("Power on instance", instance=instance) - - if block_device_info: - self._volumeops.fix_instance_volume_disk_paths(instance.name, - block_device_info) - - if should_plug_vifs: - self.plug_vifs(instance, network_info) - self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - def _set_vm_state(self, instance, req_state): - instance_name = instance.name - - try: - self._vmutils.set_vm_state(instance_name, req_state) - - LOG.debug("Successfully changed state of VM %(instance_name)s" - " to: %(req_state)s", {'instance_name': instance_name, - 'req_state': req_state}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to change vm state of %(instance_name)s" - " to %(req_state)s", - {'instance_name': instance_name, - 'req_state': req_state}) - - def _get_vm_state(self, instance_name): - summary_info = self._vmutils.get_vm_summary_info(instance_name) - return summary_info['EnabledState'] - - def _wait_for_power_off(self, instance_name, time_limit): - """Waiting for a VM to be in a disabled state. - - :return: True if the instance is shutdown within time_limit, - False otherwise. - """ - - desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED] - - def _check_vm_status(instance_name): - if self._get_vm_state(instance_name) in desired_vm_states: - raise loopingcall.LoopingCallDone() - - periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status, - instance_name) - - try: - # add a timeout to the periodic call. - periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT) - etimeout.with_timeout(time_limit, periodic_call.wait) - except etimeout.Timeout: - # VM did not shutdown in the expected time_limit. - return False - finally: - # stop the periodic call, in case of exceptions or Timeout. - periodic_call.stop() - - return True - - def resume_state_on_host_boot(self, context, instance, network_info, - block_device_info=None): - """Resume guest state when a host is booted.""" - self.power_on(instance, block_device_info, network_info) - - def _create_vm_com_port_pipes(self, instance, serial_ports): - for port_number, port_type in serial_ports.items(): - pipe_path = r'\\.\pipe\%s_%s' % (instance.uuid, port_type) - self._vmutils.set_vm_serial_port_connection( - instance.name, port_number, pipe_path) - - def copy_vm_dvd_disks(self, vm_name, dest_host): - dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name) - dest_path = self._pathutils.get_instance_dir( - vm_name, remote_server=dest_host) - for path in dvd_disk_paths: - self._pathutils.copyfile(path, dest_path) - - def plug_vifs(self, instance, network_info): - if network_info: - for vif in network_info: - try: - self._vif_driver.plug(instance, vif) - except Exception as exc: - LOG.exception("Failed to plug vif: '%s'.", - vif, instance=instance) - raise exception.VirtualInterfacePlugException( - six.text_type(exc)) - - def unplug_vifs(self, instance, network_info): - if network_info: - for vif in network_info: - self._vif_driver.unplug(instance, vif) - - def configure_instance_metrics(self, instance_name, - enable_network_metrics=False): - if not CONF.hyperv.enable_instance_metrics_collection: - LOG.debug("Instance metrics collection is not enabled.") - return - - LOG.debug("Enabling instance %s metrics.", instance_name) - # Some metrics (cpu, memory and network) are disabled when the vms are - # powered off. This looks like a Hyper-V bug that we'll have to - # mitigate at the Nova driver level. - self._metricsutils.enable_vm_metrics_collection(instance_name) - - # Network metrics are handled separately. The reason is that the vm - # must be running and the ports must be already attached in order to - # be able to enable those metrics. - if enable_network_metrics: - vif_ids = self._vmutils.get_vm_nic_names(instance_name) - for vif_id in vif_ids: - self._vif_driver.enable_metrics(instance_name, vif_id) - - def _get_image_serial_port_settings(self, image_meta): - image_props = image_meta['properties'] - serial_ports = {} - - for img_prop, port_type in six.iteritems(constants.SERIAL_PORT_TYPES): - port_number = int(image_props.get( - img_prop, - constants.DEFAULT_SERIAL_CONSOLE_PORT)) - - if port_number not in [1, 2]: - err_msg = _("Invalid serial port number: %(port_number)s. " - "Only COM 1 and COM 2 are available.") % dict( - port_number=port_number) - raise exception.ImageSerialPortNumberInvalid(err_msg) - - existing_type = serial_ports.get(port_number) - if (not existing_type or - existing_type == constants.SERIAL_PORT_TYPE_RO): - serial_ports[port_number] = port_type - - return serial_ports - - def _check_hotplug_available(self, instance): - """Check whether attaching an interface is possible for the given - instance. - - :returns: True if attaching / detaching interfaces is possible for the - given instance. - """ - vm_state = self._get_vm_state(instance.name) - if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED: - # can attach / detach interface to stopped VMs. - return True - - if not self._hostutils.check_min_windows_version(10, 0): - LOG.error("vNIC hot plugging is supported only in newer " - "versions than Windows Hyper-V / Server 2012 R2.") - return False - - if (self._vmutils.get_vm_generation(instance.name) == - constants.VM_GEN_1): - LOG.error("Cannot hot plug vNIC to a first generation VM.", - instance=instance) - return False - - return True - - def attach_interface(self, context, instance, vif): - if not self._check_hotplug_available(instance): - raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) - - LOG.debug('Attaching vif: %s', vif['id'], instance=instance) - self._vmutils.create_nic(instance.name, vif['id'], vif['address']) - self._vif_driver.plug(instance, vif) - - self.update_device_metadata(context, instance) - - def detach_interface(self, instance, vif): - try: - if not self._check_hotplug_available(instance): - raise exception.InterfaceDetachFailed( - instance_uuid=instance.uuid) - - LOG.debug('Detaching vif: %s', vif['id'], instance=instance) - self._vif_driver.unplug(instance, vif) - self._vmutils.destroy_nic(instance.name, vif['id']) - except os_win_exc.HyperVVMNotFoundException: - LOG.error("Instance not found during detach interface. It " - "might have been destroyed beforehand.", - instance=instance) - raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid) - - def rescue_instance(self, context, instance, network_info, image_meta, - rescue_password): - try: - self._rescue_instance(context, instance, network_info, - image_meta, rescue_password) - except Exception as exc: - with excutils.save_and_reraise_exception(): - err_msg = ("Instance rescue failed. Exception: %(exc)s. " - "Attempting to unrescue the instance.") - LOG.error(err_msg, {'exc': exc}, instance=instance) - self.unrescue_instance(instance) - - def _rescue_instance(self, context, instance, network_info, image_meta, - rescue_password): - rescue_image_id = image_meta.get('id') or instance.image_ref - rescue_vhd_path = self._create_root_vhd( - context, instance, rescue_image_id=rescue_image_id) - - rescue_vm_gen = self.get_image_vm_generation(instance.uuid, - image_meta) - vm_gen = self._vmutils.get_vm_generation(instance.name) - if rescue_vm_gen != vm_gen: - err_msg = _('The requested rescue image requires a different VM ' - 'generation than the actual rescued instance. ' - 'Rescue image VM generation: %(rescue_vm_gen)s. ' - 'Rescued instance VM generation: %(vm_gen)s.') % dict( - rescue_vm_gen=rescue_vm_gen, - vm_gen=vm_gen) - raise exception.ImageUnacceptable(reason=err_msg, - image_id=rescue_image_id) - - self.check_vm_image_type(instance.uuid, rescue_vm_gen, rescue_vhd_path) - - root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) - if not root_vhd_path: - err_msg = _('Instance root disk image could not be found. ' - 'Rescuing instances booted from volume is ' - 'not supported.') - raise exception.InstanceNotRescuable(reason=err_msg, - instance_id=instance.uuid) - - controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] - - self._vmutils.detach_vm_disk(instance.name, root_vhd_path, - is_physical=False) - self._attach_drive(instance.name, rescue_vhd_path, 0, - self._ROOT_DISK_CTRL_ADDR, controller_type) - self._vmutils.attach_scsi_drive(instance.name, root_vhd_path, - drive_type=constants.DISK) - - if configdrive.required_by(instance): - self._detach_config_drive(instance.name) - rescue_configdrive_path = self._create_config_drive( - context, - instance, - injected_files=None, - admin_password=rescue_password, - network_info=network_info, - rescue=True) - self.attach_config_drive(instance, rescue_configdrive_path, - vm_gen) - - self.power_on(instance) - - def unrescue_instance(self, instance): - self.power_off(instance) - - root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) - rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, - rescue=True) - - if (instance.vm_state == vm_states.RESCUED and - not (rescue_vhd_path and root_vhd_path)): - err_msg = _('Missing instance root and/or rescue image. ' - 'The instance cannot be unrescued.') - raise exception.InstanceNotRescuable(reason=err_msg, - instance_id=instance.uuid) - - vm_gen = self._vmutils.get_vm_generation(instance.name) - controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] - - self._vmutils.detach_vm_disk(instance.name, root_vhd_path, - is_physical=False) - if rescue_vhd_path: - self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, - is_physical=False) - fileutils.delete_if_exists(rescue_vhd_path) - self._attach_drive(instance.name, root_vhd_path, 0, - self._ROOT_DISK_CTRL_ADDR, controller_type) - - self._detach_config_drive(instance.name, rescue=True, delete=True) - - # Reattach the configdrive, if exists and not already attached. - configdrive_path = self._pathutils.lookup_configdrive_path( - instance.name) - if configdrive_path and not self._vmutils.is_disk_attached( - configdrive_path, is_physical=False): - self.attach_config_drive(instance, configdrive_path, vm_gen) - - self.power_on(instance) - - def _set_instance_disk_qos_specs(self, instance, is_resize): - quota_specs = self._get_scoped_flavor_extra_specs(instance, 'quota') - - disk_total_bytes_sec = int( - quota_specs.get('disk_total_bytes_sec') or 0) - disk_total_iops_sec = int( - quota_specs.get('disk_total_iops_sec') or - self._volumeops.bytes_per_sec_to_iops(disk_total_bytes_sec)) - - if disk_total_iops_sec or is_resize: - # NOTE(claudiub): the instance might have been "resized" to a - # flavor with no QoS specs. We need to set them to 0 in this case. - local_disks = self._get_instance_local_disks(instance.name) - for disk_path in local_disks: - self._vmutils.set_disk_qos_specs(disk_path, - disk_total_iops_sec) - - def _get_instance_local_disks(self, instance_name): - instance_path = self._pathutils.get_instance_dir(instance_name) - instance_disks = self._vmutils.get_vm_storage_paths(instance_name)[0] - local_disks = [disk_path for disk_path in instance_disks - if instance_path in disk_path] - return local_disks - - def _get_scoped_flavor_extra_specs(self, instance, scope): - extra_specs = instance.flavor.extra_specs or {} - filtered_specs = {} - for spec, value in extra_specs.items(): - if ':' in spec: - _scope, key = spec.split(':') - if _scope == scope: - filtered_specs[key] = value - return filtered_specs - - def _configure_secure_vm(self, context, instance, image_meta, - secure_boot_enabled): - """Adds and enables a vTPM, encrypting the disks. - Shielding option implies encryption option enabled. - """ - - requires_encryption = False - requires_shielded = self._feature_requested( - instance, - image_meta, - constants.IMAGE_PROP_VTPM_SHIELDED) - - if not requires_shielded: - requires_encryption = self._feature_requested( - instance, - image_meta, - constants.IMAGE_PROP_VTPM) - - if not (requires_shielded or requires_encryption): - return - - self._check_vtpm_requirements(instance, image_meta, - secure_boot_enabled) - - with self._pathutils.temporary_file('.fsk') as fsk_filepath, \ - self._pathutils.temporary_file('.pdk') as pdk_filepath: - self._create_fsk(instance, fsk_filepath) - - self._pdk.create_pdk(context, instance, image_meta, pdk_filepath) - self._vmutils.add_vtpm(instance.name, pdk_filepath, - shielded=requires_shielded) - LOG.info("VTPM was added.", instance=instance) - self._vmutils.provision_vm(instance.name, fsk_filepath, - pdk_filepath) - - def _feature_requested(self, instance, image_meta, image_prop): - image_props = image_meta['properties'] - image_prop_option = image_props.get(image_prop) - - feature_requested = image_prop_option == constants.REQUIRED - - return feature_requested - - def _check_vtpm_requirements(self, instance, image_meta, - secure_boot_enabled): - if not secure_boot_enabled: - reason = _("Adding a vtpm requires secure boot to be enabled.") - raise exception.InstanceUnacceptable( - instance_id=instance.uuid, reason=reason) - - os_type = image_meta.get('properties', {}).get('os_type') - if os_type not in os_win_const.VTPM_SUPPORTED_OS: - reason = _('vTPM is not supported for this OS type: %(os_type)s. ' - ' Supported OS types: %(supported_os_types)s') % { - 'os_type': os_type, - 'supported_os_types': - ','.join(os for os in os_win_const.VTPM_SUPPORTED_OS)} - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - - if not self._hostutils.is_host_guarded(): - reason = _('This host in not guarded.') - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - - def _create_fsk(self, instance, fsk_filepath): - """Writes in the fsk file all the substitution strings and their - values which will populate the unattended file used when - creating the pdk. - """ - - fsk_pairs = self._get_fsk_data(instance) - self._vmutils.populate_fsk(fsk_filepath, fsk_pairs) - - def _get_fsk_data(self, instance): - """The unattended file may contain substitution strings. Those with - their coresponding values are passed as metadata and will be added - to a fsk file. - """ - - fsk_pairs = {'@@%s@@' % key.split('fsk:')[1]: value - for key, value in instance.metadata.items() - if key.startswith('fsk:')} - - fsk_computername_key = '@@%s@@' % os_win_const.FSK_COMPUTERNAME - fsk_computer_name = fsk_pairs.get(fsk_computername_key) - - if instance.hostname != fsk_computer_name and fsk_computer_name: - err_msg = _("The FSK mappings contain ComputerName " - "%(fsk_computer_name)s, which does not match the " - "instance name %(instance_name)s.") % { - 'fsk_computer_name': fsk_computer_name, - 'instance_name': instance.hostname} - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=err_msg) - - # In case of not specifying the computer name as a FSK metadata value, - # it will be added by default in order to avoid a reboot when - # configuring the instance hostname - if not fsk_computer_name: - fsk_pairs[fsk_computername_key] = instance.hostname - return fsk_pairs - - @contextlib.contextmanager - def prepare_for_volume_snapshot(self, instance, allow_paused=False): - set_previous_state = False - - try: - curr_state = self._vmutils.get_vm_state(instance.name) - - allowed_states = [os_win_const.HYPERV_VM_STATE_DISABLED, - os_win_const.HYPERV_VM_STATE_SUSPENDED] - if allow_paused: - allowed_states.append(os_win_const.HYPERV_VM_STATE_PAUSED) - - if curr_state not in allowed_states: - if allow_paused: - self.pause(instance) - else: - self.suspend(instance) - set_previous_state = True - yield - finally: - if set_previous_state: - self._set_vm_state(instance, curr_state) - - def get_instance_uuid(self, instance_name, expect_existing=False): - # Fetch the instance UUID from the VM notes attribute. - try: - instance_uuid = self._vmutils.get_instance_uuid(instance_name) - return instance_uuid - except os_win_exc.HyperVVMNotFoundException: - with excutils.save_and_reraise_exception() as ctxt: - LOG.debug("Could not find instance %s while retrieving " - "its uuid. It may have been deleted meanwhile.", - instance_name) - ctxt.reraise = expect_existing - - def instance_state_change_callback(self, event): - if event.transition in (virtevent.EVENT_LIFECYCLE_STARTED, - virtevent.EVENT_LIFECYCLE_RESUMED): - # We can handle the following operations concurrently. - utils.spawn_n(self._serial_console_ops.start_console_handler, - event.name) - utils.spawn_n(self.configure_instance_metrics, - event.name, - enable_network_metrics=True) - else: - self._serial_console_ops.stop_console_handler(event.name) diff --git a/compute_hyperv/nova/volumeops.py b/compute_hyperv/nova/volumeops.py deleted file mode 100644 index 78e08f7b..00000000 --- a/compute_hyperv/nova/volumeops.py +++ /dev/null @@ -1,780 +0,0 @@ -# Copyright 2012 Pedro Navarro Perez -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for Storage-related functions (attach, detach, etc). -""" -import inspect -import os -import time - -from nova.compute import task_states -from nova import exception -from nova import objects -from nova import utils -from nova.virt import block_device as driver_block_device -from nova.virt import driver -from nova.volume import cinder -from os_brick.initiator import connector -from os_win import constants as os_win_const -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import strutils - -from compute_hyperv.i18n import _ -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import pathutils - -LOG = logging.getLogger(__name__) - -CONF = compute_hyperv.nova.conf.CONF - - -def volume_snapshot_lock(f): - """Synchronizes volume snapshot related operations. - - The locks will be applied on a per-instance basis. The decorated method - must accept an instance object. - """ - def inner(*args, **kwargs): - all_args = inspect.getcallargs(f, *args, **kwargs) - instance = all_args['instance'] - - lock_name = "volume-snapshot-%s" % instance.name - - @utils.synchronized(lock_name) - def synchronized(): - return f(*args, **kwargs) - - return synchronized() - return inner - - -class VolumeOps(object): - """Management class for Volume-related tasks - """ - - def __init__(self): - self._volume_api = cinder.API() - self._vmops_prop = None - self._block_dev_man_prop = None - - self._vmutils = utilsfactory.get_vmutils() - self._default_root_device = 'vda' - - self._load_volume_drivers() - - def _load_volume_drivers(self): - self.volume_drivers = { - constants.STORAGE_PROTOCOL_SMBFS: SMBFSVolumeDriver(), - constants.STORAGE_PROTOCOL_ISCSI: ISCSIVolumeDriver(), - constants.STORAGE_PROTOCOL_FC: FCVolumeDriver(), - constants.STORAGE_PROTOCOL_RBD: RBDVolumeDriver()} - - @property - def _vmops(self): - # We have to avoid a circular dependency. - if not self._vmops_prop: - self._vmops_prop = importutils.import_class( - 'compute_hyperv.nova.vmops.VMOps')() - return self._vmops_prop - - @property - def _block_dev_man(self): - if not self._block_dev_man_prop: - self._block_dev_man_prop = importutils.import_class( - 'compute_hyperv.nova.block_device_manager.' - 'BlockDeviceInfoManager')() - return self._block_dev_man_prop - - def _get_volume_driver(self, connection_info): - driver_type = connection_info.get('driver_volume_type') - if driver_type not in self.volume_drivers: - raise exception.VolumeDriverNotFound(driver_type=driver_type) - return self.volume_drivers[driver_type] - - def validate_host_configuration(self): - for protocol, volume_driver in self.volume_drivers.items(): - try: - volume_driver.validate_host_configuration() - except exception.ValidationError as ex: - LOG.warning( - "Volume driver %(protocol)s reported a validation " - "error. Attaching such volumes will probably fail. " - "Error message: %(err_msg)s.", - dict(protocol=protocol, err_msg=ex.message)) - - def attach_volumes(self, context, volumes, instance): - for vol in volumes: - self.attach_volume(context, vol['connection_info'], instance) - - def disconnect_volumes(self, block_device_info): - mapping = driver.block_device_info_get_mapping(block_device_info) - for vol in mapping: - self.disconnect_volume(vol['connection_info']) - - def attach_volume(self, context, connection_info, instance, - disk_bus=constants.CTRL_TYPE_SCSI, - update_device_metadata=False): - tries_left = CONF.hyperv.volume_attach_retry_count + 1 - - while tries_left: - try: - self._attach_volume(context, - connection_info, - instance, - disk_bus, - update_device_metadata) - break - except Exception as ex: - tries_left -= 1 - if not tries_left: - LOG.exception( - "Failed to attach volume %(connection_info)s " - "to instance %(instance_name)s. ", - {'connection_info': strutils.mask_dict_password( - connection_info), - 'instance_name': instance.name}) - - # We're requesting a detach as the disk may have - # been attached to the instance but one of the - # post-attach operations failed. - self.detach_volume(context, - connection_info, - instance, - update_device_metadata) - raise exception.VolumeAttachFailed( - volume_id=connection_info['serial'], - reason=ex) - else: - LOG.warning( - "Failed to attach volume %(connection_info)s " - "to instance %(instance_name)s. " - "Tries left: %(tries_left)s.", - {'connection_info': strutils.mask_dict_password( - connection_info), - 'instance_name': instance.name, - 'tries_left': tries_left}) - - time.sleep(CONF.hyperv.volume_attach_retry_interval) - - def _attach_volume(self, context, connection_info, instance, - disk_bus=constants.CTRL_TYPE_SCSI, - update_device_metadata=False): - LOG.debug( - "Attaching volume: %(connection_info)s to %(instance_name)s", - {'connection_info': strutils.mask_dict_password(connection_info), - 'instance_name': instance.name}) - volume_driver = self._get_volume_driver(connection_info) - volume_driver.attach_volume(connection_info, - instance.name, - disk_bus) - - if update_device_metadata: - # When attaching volumes to already existing instances, - # the connection info passed to the driver is not saved - # yet within the BDM table. - self._block_dev_man.set_volume_bdm_connection_info( - context, instance, connection_info) - self._vmops.update_device_metadata( - context, instance) - - qos_specs = connection_info['data'].get('qos_specs') or {} - if qos_specs: - volume_driver.set_disk_qos_specs(connection_info, - qos_specs) - - def disconnect_volume(self, connection_info): - volume_driver = self._get_volume_driver(connection_info) - volume_driver.disconnect_volume(connection_info) - - def detach_volume(self, context, connection_info, instance, - update_device_metadata=False): - LOG.debug("Detaching volume: %(connection_info)s " - "from %(instance_name)s", - {'connection_info': strutils.mask_dict_password( - connection_info), - 'instance_name': instance.name}) - volume_driver = self._get_volume_driver(connection_info) - volume_driver.detach_volume(connection_info, instance.name) - volume_driver.disconnect_volume(connection_info) - - if update_device_metadata: - self._vmops.update_device_metadata(context, instance) - - def fix_instance_volume_disk_paths(self, instance_name, block_device_info): - # Mapping containing the current disk paths for each volume. - actual_disk_mapping = self.get_disk_path_mapping(block_device_info) - if not actual_disk_mapping: - return - - # Mapping containing virtual disk resource path and the physical - # disk path for each volume serial number. The physical path - # associated with this resource may not be the right one, - # as physical disk paths can get swapped after host reboots. - vm_disk_mapping = self._vmutils.get_vm_physical_disk_mapping( - instance_name) - - for serial, vm_disk in vm_disk_mapping.items(): - actual_disk_path = actual_disk_mapping[serial] - if vm_disk['mounted_disk_path'] != actual_disk_path: - self._vmutils.set_disk_host_res(vm_disk['resource_path'], - actual_disk_path) - - def get_volume_connector(self): - # NOTE(lpetrut): the Windows os-brick connectors - # do not use a root helper. - conn = connector.get_connector_properties( - root_helper=None, - my_ip=CONF.my_block_storage_ip, - multipath=CONF.hyperv.use_multipath_io, - enforce_multipath=True, - host=CONF.host) - return conn - - def connect_volumes(self, block_device_info): - mapping = driver.block_device_info_get_mapping(block_device_info) - for vol in mapping: - connection_info = vol['connection_info'] - volume_driver = self._get_volume_driver(connection_info) - volume_driver.connect_volume(connection_info) - - def get_disk_path_mapping(self, block_device_info, block_dev_only=False): - block_mapping = driver.block_device_info_get_mapping(block_device_info) - disk_path_mapping = {} - for vol in block_mapping: - connection_info = vol['connection_info'] - disk_serial = connection_info['serial'] - - volume_driver = self._get_volume_driver(connection_info) - if block_dev_only and not volume_driver._is_block_dev: - continue - - disk_path = volume_driver.get_disk_resource_path(connection_info) - disk_path_mapping[disk_serial] = disk_path - return disk_path_mapping - - def get_disk_resource_path(self, connection_info): - volume_driver = self._get_volume_driver(connection_info) - return volume_driver.get_disk_resource_path(connection_info) - - @staticmethod - def bytes_per_sec_to_iops(no_bytes): - # Hyper-v uses normalized IOPS (8 KB increments) - # as IOPS allocation units. - return ( - (no_bytes + constants.IOPS_BASE_SIZE - 1) // - constants.IOPS_BASE_SIZE) - - @staticmethod - def validate_qos_specs(qos_specs, supported_qos_specs): - unsupported_specs = set(qos_specs.keys()).difference( - supported_qos_specs) - if unsupported_specs: - LOG.warning('Got unsupported QoS specs: ' - '%(unsupported_specs)s. ' - 'Supported qos specs: %(supported_qos_specs)s', - {'unsupported_specs': unsupported_specs, - 'supported_qos_specs': supported_qos_specs}) - - @volume_snapshot_lock - def volume_snapshot_create(self, context, instance, volume_id, - create_info): - LOG.debug("Creating snapshot for volume %(volume_id)s on instance " - "%(instance_name)s with create info %(create_info)s", - {"volume_id": volume_id, - "instance_name": instance.name, - "create_info": create_info}) - snapshot_id = create_info['snapshot_id'] - - snapshot_failed = False - try: - instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING - instance.save(expected_task_state=[None]) - - bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( - context, volume_id, instance.uuid) - driver_bdm = driver_block_device.convert_volume(bdm) - connection_info = driver_bdm['connection_info'] - - volume_driver = self._get_volume_driver(connection_info) - volume_driver.create_snapshot(connection_info, instance, - create_info) - - # The volume driver is expected to - # update the connection info. - driver_bdm.save() - except Exception: - with excutils.save_and_reraise_exception(): - snapshot_failed = True - - err_msg = ('Error occurred while snapshotting volume. ' - 'sending error status to Cinder.') - LOG.exception(err_msg, - instance=instance) - finally: - instance.task_state = None - instance.save( - expected_task_state=[task_states.IMAGE_SNAPSHOT_PENDING]) - - snapshot_status = 'error' if snapshot_failed else 'creating' - self._volume_api.update_snapshot_status( - context, snapshot_id, snapshot_status) - - @volume_snapshot_lock - def volume_snapshot_delete(self, context, instance, volume_id, - snapshot_id, delete_info): - LOG.debug("Deleting snapshot for volume %(volume_id)s on instance " - "%(instance_name)s with delete info %(delete_info)s", - {"volume_id": volume_id, - "instance_name": instance.name, - "delete_info": delete_info}) - - snapshot_delete_failed = False - try: - instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING - instance.save(expected_task_state=[None]) - - bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( - context, volume_id, instance.uuid) - driver_bdm = driver_block_device.convert_volume(bdm) - connection_info = driver_bdm['connection_info'] - - volume_driver = self._get_volume_driver(connection_info) - volume_driver.delete_snapshot(connection_info, instance, - delete_info) - - # The volume driver is expected to - # update the connection info. - driver_bdm.save() - except Exception: - with excutils.save_and_reraise_exception(): - snapshot_delete_failed = True - - err_msg = ('Error occurred while deleting volume ' - 'snapshot. Sending error status to Cinder.') - LOG.exception(err_msg, - instance=instance) - finally: - instance.task_state = None - instance.save( - expected_task_state=[task_states.IMAGE_SNAPSHOT_PENDING]) - - snapshot_status = ('error_deleting' - if snapshot_delete_failed else 'deleting') - self._volume_api.update_snapshot_status( - context, snapshot_id, snapshot_status) - - def get_disk_attachment_info(self, connection_info): - volume_driver = self._get_volume_driver(connection_info) - return volume_driver.get_disk_attachment_info(connection_info) - - def extend_volume(self, connection_info): - volume_driver = self._get_volume_driver(connection_info) - return volume_driver.extend_volume(connection_info) - - -class BaseVolumeDriver(object): - _is_block_dev = True - _protocol = None - _extra_connector_args = {} - - def __init__(self): - self._conn = None - self._diskutils = utilsfactory.get_diskutils() - self._vmutils = utilsfactory.get_vmutils() - self._migrutils = utilsfactory.get_migrationutils() - self._metricsutils = utilsfactory.get_metricsutils() - - @property - def _connector(self): - if not self._conn: - scan_attempts = CONF.hyperv.mounted_disk_query_retry_count - scan_interval = CONF.hyperv.mounted_disk_query_retry_interval - - self._conn = connector.InitiatorConnector.factory( - protocol=self._protocol, - root_helper=None, - use_multipath=CONF.hyperv.use_multipath_io, - device_scan_attempts=scan_attempts, - device_scan_interval=scan_interval, - **self._extra_connector_args) - return self._conn - - def connect_volume(self, connection_info): - return self._connector.connect_volume(connection_info['data']) - - def disconnect_volume(self, connection_info): - self._connector.disconnect_volume(connection_info['data']) - - def get_disk_resource_path(self, connection_info): - disk_paths = self._connector.get_volume_paths(connection_info['data']) - if not disk_paths: - vol_id = connection_info['serial'] - err_msg = _("Could not find disk path. Volume id: %s") - raise exception.DiskNotFound(err_msg % vol_id) - - return self._get_disk_res_path(disk_paths[0]) - - def validate_host_configuration(self): - if self._is_block_dev: - self._check_san_policy() - - def _get_disk_res_path(self, disk_path): - if self._is_block_dev: - # We need the Msvm_DiskDrive resource path as this - # will be used when the disk is attached to an instance. - disk_number = self._diskutils.get_device_number_from_device_name( - disk_path) - disk_res_path = self._vmutils.get_mounted_disk_by_drive_number( - disk_number) - else: - disk_res_path = disk_path - - if not disk_res_path: - err_msg = _("Could not find an attachable disk resource path " - "for disk: %s") % disk_path - raise exception.DiskNotFound(err_msg) - return disk_res_path - - def _check_san_policy(self): - disk_policy = self._diskutils.get_new_disk_policy() - - accepted_policies = [os_win_const.DISK_POLICY_OFFLINE_SHARED, - os_win_const.DISK_POLICY_OFFLINE_ALL] - - if disk_policy not in accepted_policies: - err_msg = _("Invalid SAN policy. The SAN policy " - "must be set to 'Offline Shared' or 'Offline All' " - "in order to attach passthrough disks to instances.") - raise exception.ValidationError(message=err_msg) - - def attach_volume(self, connection_info, instance_name, - disk_bus=constants.CTRL_TYPE_SCSI): - self.validate_host_configuration() - - dev_info = self.connect_volume(connection_info) - - serial = connection_info['serial'] - disk_path = self._get_disk_res_path(dev_info['path']) - ctrller_path, slot = self._get_disk_ctrl_and_slot(instance_name, - disk_bus) - if self._is_block_dev: - # We need to tag physical disk resources with the volume - # serial number, in order to be able to retrieve them - # during live migration. - self._vmutils.attach_volume_to_controller(instance_name, - ctrller_path, - slot, - disk_path, - serial=serial) - else: - self._vmutils.attach_drive(instance_name, - disk_path, - ctrller_path, - slot) - - self._configure_disk_metrics(disk_path) - - def _configure_disk_metrics(self, disk_path): - if not CONF.hyperv.enable_instance_metrics_collection: - return - - if self._is_block_dev: - LOG.warning("Hyper-V does not support collecting metrics for " - "passthrough disks (e.g. iSCSI/FC).") - return - - LOG.debug("Enabling disk metrics: %s.", disk_path) - self._metricsutils.enable_disk_metrics_collection( - disk_path, is_physical=self._is_block_dev) - - def detach_volume(self, connection_info, instance_name): - if self._migrutils.planned_vm_exists(instance_name): - LOG.warning("Instance %s is a Planned VM, cannot detach " - "volumes from it.", instance_name) - return - # Retrieving the disk path can be a time consuming operation in - # case of passthrough disks. As such disks attachments will be - # tagged using the volume id, we'll just use that instead. - # - # Note that Hyper-V does not allow us to attach the same passthrough - # disk to multiple instances, which means that we're safe to rely - # on this tag. - if not self._is_block_dev: - disk_path = self.get_disk_resource_path(connection_info) - # In this case, we're not tagging the disks, so we want os-win - # to use the disk path to identify the attachment. - serial = None - else: - disk_path = None - serial = connection_info['serial'] - - LOG.debug("Detaching disk from instance: %(instance_name)s. " - "Disk path: %(disk_path)s. Disk serial tag: %(serial)s.", - dict(disk_path=disk_path, - serial=serial, - instance_name=instance_name)) - self._vmutils.detach_vm_disk(instance_name, disk_path, - is_physical=self._is_block_dev, - serial=serial) - - def _get_disk_ctrl_and_slot(self, instance_name, disk_bus): - if disk_bus == constants.CTRL_TYPE_IDE: - # Find the IDE controller for the vm. - ctrller_path = self._vmutils.get_vm_ide_controller( - instance_name, 0) - # Attaching to the first slot - slot = 0 - else: - # Find the SCSI controller for the vm - ctrller_path = self._vmutils.get_vm_scsi_controller( - instance_name) - slot = self._vmutils.get_free_controller_slot(ctrller_path) - return ctrller_path, slot - - def set_disk_qos_specs(self, connection_info, disk_qos_specs): - LOG.info("The %(protocol)s Hyper-V volume driver " - "does not support QoS. Ignoring QoS specs.", - dict(protocol=self._protocol)) - - def create_snapshot(self, connection_info, instance, create_info): - raise NotImplementedError() - - def delete_snapshot(self, connection_info, instance, delete_info): - raise NotImplementedError() - - def get_disk_attachment_info(self, connection_info): - if self._is_block_dev: - disk_path = None - serial = connection_info['serial'] - else: - disk_path = self.get_disk_resource_path(connection_info) - serial = None - - return self._vmutils.get_disk_attachment_info( - disk_path, - is_physical=self._is_block_dev, - serial=serial) - - def extend_volume(self, connection_info): - # We're not actually extending the volume, we're just - # refreshing cached information about an already extended volume. - self._connector.extend_volume(connection_info['data']) - - -class ISCSIVolumeDriver(BaseVolumeDriver): - _is_block_dev = True - _protocol = constants.STORAGE_PROTOCOL_ISCSI - - def __init__(self, *args, **kwargs): - self._extra_connector_args = dict( - initiator_list=CONF.hyperv.iscsi_initiator_list) - - super(ISCSIVolumeDriver, self).__init__(*args, **kwargs) - - -class SMBFSVolumeDriver(BaseVolumeDriver): - _is_block_dev = False - _protocol = constants.STORAGE_PROTOCOL_SMBFS - _extra_connector_args = dict(local_path_for_loopback=True) - - def __init__(self): - self._vmops_prop = None - self._pathutils = pathutils.PathUtils() - self._vhdutils = utilsfactory.get_vhdutils() - super(SMBFSVolumeDriver, self).__init__() - - @property - def _vmops(self): - # We have to avoid a circular dependency. - if not self._vmops_prop: - self._vmops_prop = importutils.import_class( - 'compute_hyperv.nova.vmops.VMOps')() - return self._vmops_prop - - def export_path_synchronized(f): - def wrapper(inst, connection_info, *args, **kwargs): - export_path = inst._get_export_path(connection_info) - - @utils.synchronized(export_path) - def inner(): - return f(inst, connection_info, *args, **kwargs) - return inner() - return wrapper - - def _get_export_path(self, connection_info): - return connection_info['data']['export'].replace('/', '\\') - - @export_path_synchronized - def attach_volume(self, *args, **kwargs): - super(SMBFSVolumeDriver, self).attach_volume(*args, **kwargs) - - @export_path_synchronized - def disconnect_volume(self, *args, **kwargs): - # We synchronize those operations based on the share path in order to - # avoid the situation when a SMB share is unmounted while a volume - # exported by it is about to be attached to an instance. - super(SMBFSVolumeDriver, self).disconnect_volume(*args, **kwargs) - - def set_disk_qos_specs(self, connection_info, qos_specs): - supported_qos_specs = ['total_iops_sec', 'total_bytes_sec'] - VolumeOps.validate_qos_specs(qos_specs, supported_qos_specs) - - total_bytes_sec = int(qos_specs.get('total_bytes_sec') or 0) - total_iops_sec = int(qos_specs.get('total_iops_sec') or - VolumeOps.bytes_per_sec_to_iops( - total_bytes_sec)) - - if total_iops_sec: - disk_path = self.get_disk_resource_path(connection_info) - self._vmutils.set_disk_qos_specs(disk_path, total_iops_sec) - - def create_snapshot(self, connection_info, instance, create_info): - attached_path = self.get_disk_resource_path(connection_info) - # Cinder tells us the new differencing disk file name it expects. - # The image does not exist yet, so we'll have to create it. - new_path = os.path.join(os.path.dirname(attached_path), - create_info['new_file']) - attachment_info = self._vmutils.get_disk_attachment_info( - attached_path, is_physical=False) - disk_ctrl_type = attachment_info['controller_type'] - - if disk_ctrl_type == constants.CTRL_TYPE_SCSI: - self._create_snapshot_scsi(instance, attachment_info, - attached_path, new_path) - else: - # IDE disks cannot be hotplugged. - self._create_snapshot_ide(instance, attached_path, new_path) - - connection_info['data']['name'] = create_info['new_file'] - - def _create_snapshot_ide(self, instance, attached_path, new_path): - with self._vmops.prepare_for_volume_snapshot(instance): - self._vhdutils.create_differencing_vhd(new_path, attached_path) - self._vmutils.update_vm_disk_path(attached_path, new_path, - is_physical=False) - - def _create_snapshot_scsi(self, instance, attachment_info, - attached_path, new_path): - with self._vmops.prepare_for_volume_snapshot(instance, - allow_paused=True): - self._vmutils.detach_vm_disk(instance.name, - attached_path, - is_physical=False) - self._vhdutils.create_differencing_vhd(new_path, attached_path) - self._vmutils.attach_drive(instance.name, - new_path, - attachment_info['controller_path'], - attachment_info['controller_slot']) - - def delete_snapshot(self, connection_info, instance, delete_info): - attached_path = self.get_disk_resource_path(connection_info) - attachment_info = self._vmutils.get_disk_attachment_info( - attached_path, is_physical=False) - disk_ctrl_type = attachment_info['controller_type'] - - base_dir = os.path.dirname(attached_path) - file_to_merge_name = delete_info['file_to_merge'] - file_to_merge = os.path.join(base_dir, file_to_merge_name) - - allow_paused = disk_ctrl_type == constants.CTRL_TYPE_SCSI - with self._vmops.prepare_for_volume_snapshot( - instance, - allow_paused=allow_paused): - curr_state = self._vmutils.get_vm_state(instance.name) - # We need to detach the image in order to alter the vhd chain - # while the instance is paused. - needs_detach = curr_state == os_win_const.HYPERV_VM_STATE_PAUSED - - if needs_detach: - self._vmutils.detach_vm_disk(instance.name, - attached_path, - is_physical=False) - new_top_img_path = self._do_delete_snapshot(attached_path, - file_to_merge) - attachment_changed = (attached_path.lower() != - new_top_img_path.lower()) - - if needs_detach: - self._vmutils.attach_drive(instance.name, - new_top_img_path, - attachment_info['controller_path'], - attachment_info['controller_slot']) - elif attachment_changed: - # When merging the latest snapshot, we have to update - # the attachment. Luckily, although we cannot detach - # IDE disks, we can swap them. - self._vmutils.update_vm_disk_path(attached_path, - new_top_img_path, - is_physical=False) - - connection_info['data']['name'] = os.path.basename( - new_top_img_path) - - def _do_delete_snapshot(self, attached_path, file_to_merge): - parent_path = self._vhdutils.get_vhd_parent_path(file_to_merge) - path_to_reconnect = None - - merging_top_image = attached_path.lower() == file_to_merge.lower() - if not merging_top_image: - path_to_reconnect = self._get_higher_image_from_chain( - file_to_merge, attached_path) - - # We'll let Cinder delete this image. At this point, Cinder may - # safely query it, considering that it will no longer be in-use. - self._vhdutils.merge_vhd(file_to_merge, - delete_merged_image=False) - - if path_to_reconnect: - self._vhdutils.reconnect_parent_vhd(path_to_reconnect, - parent_path) - - new_top_img_path = (parent_path if merging_top_image - else attached_path) - return new_top_img_path - - def _get_higher_image_from_chain(self, vhd_path, top_vhd_path): - # We're searching for the child image of the specified vhd. - # We start by looking at the top image, looping through the - # parent images. - current_path = top_vhd_path - parent_path = self._vhdutils.get_vhd_parent_path(current_path) - while parent_path: - if parent_path.lower() == vhd_path.lower(): - return current_path - - current_path = parent_path - parent_path = self._vhdutils.get_vhd_parent_path(current_path) - - err_msg = _("Could not find image %(vhd_path)s in the chain using " - "top level image %(top_vhd_path)s") - raise exception.ImageNotFound( - err_msg % dict(vhd_path=vhd_path, top_vhd_path=top_vhd_path)) - - -class FCVolumeDriver(BaseVolumeDriver): - _is_block_dev = True - _protocol = constants.STORAGE_PROTOCOL_FC - - -class RBDVolumeDriver(BaseVolumeDriver): - _is_block_dev = True - _protocol = constants.STORAGE_PROTOCOL_RBD - _extra_connector_args = dict(do_local_attach=True) diff --git a/compute_hyperv/tests/__init__.py b/compute_hyperv/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/tests/fake_instance.py b/compute_hyperv/tests/fake_instance.py deleted file mode 100644 index 42914cff..00000000 --- a/compute_hyperv/tests/fake_instance.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -from nova import objects -from nova.objects import fields -import six - - -def fake_db_instance(**updates): - flavorinfo = None - db_instance = { - 'id': 1, - 'deleted': False, - 'uuid': str(uuid.uuid4()), - 'user_id': 'fake-user', - 'project_id': 'fake-project', - 'host': 'fake-host', - 'created_at': datetime.datetime(1955, 11, 5), - 'pci_devices': [], - 'security_groups': [], - 'metadata': {}, - 'system_metadata': {}, - 'root_gb': 0, - 'ephemeral_gb': 0, - 'extra': {'pci_requests': None, - 'flavor': flavorinfo, - 'numa_topology': None, - 'vcpu_model': None, - 'trusted_certs': None, - }, - 'tags': [], - 'services': [] - } - - for name, field in six.iteritems(objects.Instance.fields): - if name in db_instance: - continue - if field.nullable: - db_instance[name] = None - elif field.default != fields.UnspecifiedDefault: - db_instance[name] = field.default - elif name in ['flavor', 'ec2_ids', 'keypairs']: - pass - else: - raise Exception('fake_db_instance needs help with %s' % name) - - if updates: - db_instance.update(updates) - return db_instance - - -def fake_instance_obj(context='fake-context', **updates): - expected_attrs = updates.pop('expected_attrs', None) - flavor = objects.Flavor(id=1, name='flavor1', - memory_mb=256, vcpus=1, - root_gb=1, ephemeral_gb=1, - flavorid='1', - swap=0, rxtx_factor=1.0, - vcpu_weight=1, - disabled=False, - is_public=True, - extra_specs={}, - projects=[]) - flavor.obj_reset_changes() - inst = objects.Instance._from_db_object(context, - objects.Instance(), fake_db_instance(**updates), - expected_attrs=expected_attrs) - inst.flavor = flavor - inst.old_flavor = flavor.obj_clone() - inst.obj_reset_changes() - return inst diff --git a/compute_hyperv/tests/test.py b/compute_hyperv/tests/test.py deleted file mode 100644 index b1af08e4..00000000 --- a/compute_hyperv/tests/test.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base classes for our unit tests. - -Allows overriding of flags for use of fakes, and some black magic for -inline callbacks. - -""" - -import eventlet -eventlet.monkey_patch(os=False) -import inspect -from unittest import mock - -import fixtures -from nova.tests import fixtures as nova_fixtures -from oslo_log.fixture import logging_error as log_fixture -from oslo_log import log as logging -from oslotest import base -from oslotest import mock_fixture -import six - -import compute_hyperv.nova.conf - -CONF = compute_hyperv.nova.conf.CONF - -logging.register_options(CONF) -CONF.set_override('use_stderr', False) - -_TRUE_VALUES = ('True', 'true', '1', 'yes') - - -def _patch_mock_to_raise_for_invalid_assert_calls(): - def raise_for_invalid_assert_calls(wrapped): - def wrapper(_self, name): - valid_asserts = [ - 'assert_called_with', - 'assert_called_once_with', - 'assert_has_calls', - 'assert_any_calls'] - - if name.startswith('assert') and name not in valid_asserts: - raise AttributeError('%s is not a valid mock assert method' - % name) - - return wrapped(_self, name) - return wrapper - mock.Mock.__getattr__ = raise_for_invalid_assert_calls( - mock.Mock.__getattr__) - - -# NOTE(gibi): needs to be called only once at import time -# to patch the mock lib -_patch_mock_to_raise_for_invalid_assert_calls() - -# NOTE(claudiub): this needs to be called before any mock.patch calls are -# being done, and especially before any other test classes load. This fixes -# the mock.patch autospec issue: -# https://github.com/testing-cabal/mock/issues/396 -mock_fixture.patch_mock_module() - - -class NoDBTestCase(base.BaseTestCase): - """Test case base class for all unit tests. - - Due to the slowness of DB access, please consider deriving from - `NoDBTestCase` first. - """ - - TIMEOUT_SCALING_FACTOR = 1 - MOCK_TOOZ = True - - def setUp(self): - """Run before each test method to initialize test environment.""" - # Ensure BaseTestCase's ConfigureLogging fixture is disabled since - # we're using the one from Nova (StandardLogging). - with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'): - super(NoDBTestCase, self).setUp() - - self.useFixture(mock_fixture.MockAutospecFixture()) - - self.useFixture(log_fixture.get_logging_handle_error_fixture()) - - self.useFixture(nova_fixtures.StandardLogging()) - self.useFixture(nova_fixtures.ConfFixture(CONF)) - - # NOTE(blk-u): WarningsFixture must be after the Database fixture - # because sqlalchemy-migrate messes with the warnings filters. - self.useFixture(nova_fixtures.WarningsFixture()) - - self.addCleanup(self._clear_attrs) - self.policy = self.useFixture(nova_fixtures.PolicyFixture()) - - self.useFixture(nova_fixtures.PoisonFunctions()) - - if self.MOCK_TOOZ: - self.patch('compute_hyperv.nova.coordination.Coordinator.start') - self.patch('compute_hyperv.nova.coordination.Coordinator.stop') - self.patch('compute_hyperv.nova.coordination.Coordinator.get_lock') - - def _clear_attrs(self): - # Delete attributes that don't start with _ so they don't pin - # memory around unnecessarily for the duration of the test - # suite - for key in [k for k in six.iterkeys(self.__dict__) if k[0] != '_']: - del self.__dict__[key] - - def flags(self, **kw): - """Override flag variables for a test.""" - group = kw.pop('group', None) - for k, v in six.iteritems(kw): - CONF.set_override(k, v, group) - - def patch(self, path, *args, **kwargs): - patcher = mock.patch(path, *args, **kwargs) - result = patcher.start() - return result - - def assertPublicAPISignatures(self, baseinst, inst): - def get_public_apis(inst): - methods = {} - for (name, value) in inspect.getmembers(inst, inspect.ismethod): - if name.startswith("_"): - continue - methods[name] = value - return methods - - baseclass = baseinst.__class__.__name__ - basemethods = get_public_apis(baseinst) - implmethods = get_public_apis(inst) - - extranames = [] - for name in sorted(implmethods.keys()): - if name not in basemethods: - extranames.append(name) - - self.assertEqual([], extranames, - "public APIs not listed in base class %s" % - baseclass) - - for name in sorted(implmethods.keys()): - baseargs = inspect.getargspec(basemethods[name]) - implargs = inspect.getargspec(implmethods[name]) - - self.assertEqual(baseargs, implargs, - "%s args don't match base class %s" % - (name, baseclass)) diff --git a/compute_hyperv/tests/unit/__init__.py b/compute_hyperv/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/tests/unit/cluster/__init__.py b/compute_hyperv/tests/unit/cluster/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/tests/unit/cluster/test_clusterops.py b/compute_hyperv/tests/unit/cluster/test_clusterops.py deleted file mode 100644 index 7921dec1..00000000 --- a/compute_hyperv/tests/unit/cluster/test_clusterops.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt -from nova.compute import power_state -from nova.compute import task_states -from nova.compute import vm_states -from nova import exception -from nova.network import neutron -from nova import objects -from nova.virt import event as virtevent -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc - -from compute_hyperv.nova.cluster import clusterops -import compute_hyperv.nova.conf -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - -CONF = compute_hyperv.nova.conf.CONF - - -@ddt.ddt -class ClusterOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V ClusterOps class.""" - - _autospec_classes = [ - clusterops.hostops.HostOps, - neutron.API, - clusterops.vmops.VMOps, - clusterops.serialconsoleops.SerialConsoleOps, - clusterops.placement_utils.PlacementUtils, - ] - - _FAKE_INSTANCE_NAME = 'fake_instance_name' - - def setUp(self): - super(ClusterOpsTestCase, self).setUp() - self.context = 'fake_context' - - self.clusterops = clusterops.ClusterOps() - self.clusterops._context = self.context - - self._clustutils = self.clusterops._clustutils - self._network_api = self.clusterops._network_api - self._placement = self.clusterops._placement - - def test_get_instance_host(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.clusterops.get_instance_host(mock_instance) - - self.clusterops._clustutils.get_vm_host.assert_called_once_with( - mock_instance.name) - - def test_add_to_cluster(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.clusterops.add_to_cluster(mock_instance) - - mock_add_vm = self.clusterops._clustutils.add_vm_to_cluster - mock_add_vm.assert_called_once_with( - mock_instance.name, CONF.hyperv.max_failover_count, - CONF.hyperv.failover_period, CONF.hyperv.auto_failback) - self.assertEqual(mock_instance.uuid, - self.clusterops._instance_map[mock_instance.name]) - - @mock.patch.object(clusterops, 'LOG') - def test_add_to_cluster_exception(self, mock_LOG): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_add_vm = self.clusterops._clustutils.add_vm_to_cluster - mock_add_vm.side_effect = os_win_exc.HyperVClusterException - - self.clusterops.add_to_cluster(mock_instance) - self.assertTrue(mock_LOG.exception.called) - - def test_remove_from_cluster(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.clusterops.remove_from_cluster(mock_instance) - - self.clusterops._clustutils.vm_exists.assert_called_once_with( - mock_instance.name) - self.clusterops._clustutils.delete.assert_called_once_with( - mock_instance.name) - self.assertIsNone(self.clusterops._instance_map.get( - mock_instance.name)) - - @mock.patch.object(clusterops, 'LOG') - def test_remove_from_cluster_exception(self, mock_LOG): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_delete = self.clusterops._clustutils.delete - mock_delete.side_effect = os_win_exc.HyperVClusterException - - self.clusterops.remove_from_cluster(mock_instance) - self.assertTrue(mock_LOG.exception.called) - - def test_post_migration(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.clusterops.post_migration(mock_instance) - - self.assertEqual( - self.clusterops._instance_map[mock_instance.name], - mock_instance.uuid) - - @mock.patch('nova.utils.spawn_n') - def test_start_failover_listener_daemon(self, mock_spawn): - self.clusterops.start_failover_listener_daemon() - - spawn_args = mock_spawn.call_args_list[0][0] - self.assertEqual( - self._clustutils.get_vm_owner_change_listener_v2.return_value, - spawn_args[0]) - - cbk = spawn_args[1] - cbk() - - mock_spawn.assert_called_with(self.clusterops._failover_migrate) - - @mock.patch('nova.utils.spawn_n') - @mock.patch.object(clusterops.ClusterOps, '_failover_migrate') - @mock.patch.object(clusterops.ClusterOps, '_get_nova_instances') - def test_reclaim_failovered_instances(self, mock_get_instances, - mock_failover_migrate, - mock_spawn): - self.clusterops._this_node = 'fake_node' - mock_instance1 = mock.MagicMock(host='other_host') - mock_instance2 = mock.MagicMock(host=self.clusterops._this_node) - mock_get_instances.return_value = [mock_instance1, mock_instance2] - - self.clusterops.reclaim_failovered_instances() - - self.clusterops._vmops.list_instance_uuids.assert_called_once_with() - mock_get_instances.assert_called_once_with( - ['id', 'uuid', 'name', 'host'], - self.clusterops._vmops.list_instance_uuids.return_value) - mock_spawn.assert_called_once_with( - mock_failover_migrate, - mock_instance1.name, - self.clusterops._this_node) - - @mock.patch.object(clusterops.ClusterOps, '_wait_for_pending_instance') - @mock.patch.object(clusterops, 'LOG') - @mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name') - def test_failover_migrate_no_instance(self, mock_get_instance_by_name, - mock_LOG, - mock_wait_pending_instance): - mock_get_instance_by_name.return_value = None - - self.clusterops._failover_migrate(mock.sentinel.instance_name, - mock.sentinel.new_host) - - mock_LOG.debug.assert_called_once_with( - 'Instance %s does not exist in nova. Skipping.', - mock.sentinel.instance_name) - self.assertFalse( - self.clusterops._network_api.get_instance_nw_info.called) - - @mock.patch.object(clusterops.ClusterOps, '_wait_for_pending_instance') - @mock.patch.object(clusterops, 'LOG') - @mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name') - def test_failover_migrate_migrating(self, mock_get_instance_by_name, - mock_LOG, mock_wait_pending_instance): - instance = mock_get_instance_by_name.return_value - instance.task_state = task_states.MIGRATING - - self.clusterops._failover_migrate(mock.sentinel.instance_name, - 'new_host') - - mock_LOG.debug.assert_called_once_with( - 'Instance %s is being migrated by Nova. This ' - 'will not be treated as a failover.', - mock.sentinel.instance_name) - - @mock.patch.object(clusterops.ClusterOps, '_wait_for_pending_instance') - @mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name') - def test_failover_migrate_at_source_node(self, mock_get_instance_by_name, - mock_wait_pending_instance): - instance = mock_get_instance_by_name.return_value - instance.host = 'old_host' - self.clusterops._this_node = instance.host - - self.clusterops._failover_migrate(mock.sentinel.instance_name, - 'new_host') - - self.clusterops._vmops.unplug_vifs.assert_called_once_with(instance, - self.clusterops._network_api.get_instance_nw_info.return_value) - - @mock.patch.object(clusterops.ClusterOps, '_wait_for_pending_instance') - @mock.patch.object(clusterops, 'LOG') - @mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name') - def test_failover_migrate_not_this_node(self, mock_get_instance_by_name, - mock_LOG, - mock_wait_pending_instance): - self.clusterops._this_node = 'new_host' - - self.clusterops._failover_migrate(mock.sentinel.instance_name, - 'host') - - mock_LOG.debug.assert_called_once_with( - 'Instance %s did not failover to this node.', - mock.sentinel.instance_name) - - @mock.patch.object(clusterops.ClusterOps, '_wait_for_pending_instance') - @mock.patch.object(clusterops.ClusterOps, '_failover_migrate_networks') - @mock.patch.object(clusterops.ClusterOps, '_nova_failover_server') - @mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name') - def test_failover_migrate_changed_host(self, mock_get_instance_by_name, - mock_nova_failover_server, - mock_failover_migrate_networks, - mock_wait_pending_instance): - instance = mock_get_instance_by_name.return_value - old_host = 'old_host' - new_host = 'new_host' - instance.host = old_host - self.clusterops._this_node = new_host - self._clustutils.get_vm_host.return_value = new_host - # Placement exceptions shouldn't break the rest of the failover logic. - self._placement.move_compute_node_allocations.side_effect = ( - exception.NovaException) - - self.clusterops._failover_migrate(mock.sentinel.instance_name, - new_host) - - mock_wait_pending_instance.assert_called_once_with( - mock.sentinel.instance_name) - self._clustutils.get_vm_host.assert_called_once_with( - mock.sentinel.instance_name) - mock_get_instance_by_name.assert_called_once_with( - mock.sentinel.instance_name) - get_inst_nw_info = self.clusterops._network_api.get_instance_nw_info - get_inst_nw_info.assert_called_once_with(self.clusterops._context, - instance) - mock_nova_failover_server.assert_called_once_with(instance, new_host) - mock_failover_migrate_networks.assert_called_once_with( - instance, old_host) - self._placement.move_compute_node_allocations.assert_called_once_with( - self.clusterops._context, instance, old_host, new_host, - merge_existing=False) - self.clusterops._vmops.plug_vifs.assert_called_once_with( - instance, get_inst_nw_info.return_value) - c_handler = self.clusterops._serial_console_ops.start_console_handler - c_handler.assert_called_once_with(mock.sentinel.instance_name) - - @ddt.data({}, - {'recreate_ports_on_failover': True}) - @ddt.unpack - @mock.patch.object(clusterops.ClusterOps, '_wait_for_pending_instance') - @mock.patch.object(clusterops.ClusterOps, '_failover_migrate_networks') - @mock.patch.object(clusterops.ClusterOps, '_nova_failover_server') - @mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name') - def test_failover_same_node(self, mock_get_instance_by_name, - mock_nova_failover_server, - mock_failover_migrate_networks, - mock_wait_pending_instance, - recreate_ports_on_failover=False): - # In some cases, the instances may bounce between hosts. We're testing - # the case in which the instance is actually returning to the initial - # host during the time in which we're processing events. - self.flags(recreate_ports_on_failover=recreate_ports_on_failover, - group='hyperv') - - instance = mock_get_instance_by_name.return_value - old_host = 'old_host' - new_host = 'new_host' - instance.host = old_host - self.clusterops._this_node = old_host - self._clustutils.get_vm_host.return_value = old_host - - self.clusterops._failover_migrate(mock.sentinel.instance_name, - new_host) - - get_inst_nw_info = self.clusterops._network_api.get_instance_nw_info - get_inst_nw_info.assert_called_once_with(self.clusterops._context, - instance) - mock_nova_failover_server.assert_called_once_with(instance, old_host) - if recreate_ports_on_failover: - self.clusterops._vmops.unplug_vifs.assert_called_once_with( - instance, get_inst_nw_info.return_value) - else: - self.clusterops._vmops.unplug_vifs.assert_not_called() - self.clusterops._vmops.plug_vifs.assert_called_once_with( - instance, get_inst_nw_info.return_value) - self._placement.move_compute_node_allocations.assert_not_called() - mock_failover_migrate_networks.assert_not_called() - c_handler = self.clusterops._serial_console_ops.start_console_handler - c_handler.assert_called_once_with(mock.sentinel.instance_name) - - @mock.patch('time.sleep') - def test_wait_for_pending_instance(self, mock_sleep): - self._clustutils.get_cluster_group_state_info.side_effect = [ - dict(state=os_win_const.CLUSTER_GROUP_PENDING), - dict(state=os_win_const.CLUSTER_GROUP_ONLINE)] - - self.clusterops._wait_for_pending_instance(mock.sentinel.instance_name) - - self._clustutils.get_cluster_group_state_info.assert_has_calls( - [mock.call(mock.sentinel.instance_name)] * 2) - mock_sleep.assert_called_once_with(2) - - def test_failover_migrate_networks(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_source = mock.MagicMock() - fake_migration = {'source_compute': fake_source, - 'dest_compute': self.clusterops._this_node} - - self.clusterops._failover_migrate_networks(mock_instance, - fake_source) - - mock_network_api = self.clusterops._network_api - calls = [mock.call(self.clusterops._context, mock_instance, - self.clusterops._this_node), - mock.call(self.clusterops._context, mock_instance, - self.clusterops._this_node), - mock.call(self.clusterops._context, mock_instance, - self.clusterops._this_node), - mock.call(self.clusterops._context, mock_instance, - fake_source, teardown=True)] - mock_network_api.setup_networks_on_host.assert_has_calls(calls) - mock_network_api.migrate_instance_start.assert_called_once_with( - self.clusterops._context, mock_instance, fake_migration) - mock_network_api.migrate_instance_finish.assert_called_once_with( - self.clusterops._context, mock_instance, fake_migration, - provider_mappings=None) - - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_get_instance_by_name(self, mock_get_by_uuid): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_get_by_uuid.return_value = mock_instance - self.clusterops._instance_map[mock_instance.name] = mock_instance.uuid - - ret = self.clusterops._get_instance_by_name(mock_instance.name) - self.assertEqual(ret, mock_instance) - - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_get_instance_by_name_not_in_cache(self, mock_get_by_uuid): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.clusterops._vmutils.get_instance_uuid.return_value = ( - mock_instance.uuid) - mock_get_by_uuid.return_value = mock_instance - - ret = self.clusterops._get_instance_by_name(mock_instance.name) - self.assertEqual(ret, mock_instance) - self.assertEqual(mock_instance.uuid, - self.clusterops._instance_map[mock_instance.name]) - - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_get_instance_by_name_not_update_map(self, mock_get_by_uuid): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.clusterops._vmutils.get_instance_uuid.side_effect = ( - os_win_exc.HyperVVMNotFoundException(vm_name=mock_instance.name)) - self.clusterops._update_instance_map = mock.MagicMock() - self.clusterops._instance_map = mock.MagicMock() - self.clusterops._instance_map.get.side_effect = [None, - mock_instance.uuid] - mock_get_by_uuid.return_value = mock_instance - - ret = self.clusterops._get_instance_by_name(mock_instance.name) - self.assertEqual(ret, mock_instance) - self.clusterops._update_instance_map.assert_called_with() - - @mock.patch.object(clusterops.ClusterOps, '_get_nova_instances') - def test_update_instance_map(self, mock_get_instances): - mock_instance = mock.MagicMock(uuid=mock.sentinel.uuid) - mock_instance.configure_mock(name=mock.sentinel.name) - mock_get_instances.return_value = [mock_instance] - - self.clusterops._update_instance_map() - - self.assertEqual(mock.sentinel.uuid, - self.clusterops._instance_map[mock.sentinel.name]) - - @ddt.data({'instance_uuids': None}, - {'instance_uuids': []}, - {'instance_uuids': mock.sentinel.uuid}) - @ddt.unpack - @mock.patch.object(clusterops.objects.InstanceList, 'get_by_filters') - def test_get_nova_instances(self, mock_get_by_filters, instance_uuids): - instances = self.clusterops._get_nova_instances( - instance_uuids=instance_uuids) - - self.assertEqual(mock_get_by_filters.return_value, instances) - expected_attrs = ['id', 'uuid', 'name'] - expected_filters = {'deleted': False} - if instance_uuids is not None: - expected_filters['uuid'] = instance_uuids - mock_get_by_filters.assert_called_once_with( - self.clusterops._context, expected_filters, - expected_attrs=expected_attrs) - - @mock.patch.object(clusterops.block_device, 'DriverVolumeBlockDevice') - @mock.patch.object(clusterops.objects.BlockDeviceMappingList, - 'get_by_instance_uuid') - def test_get_instance_block_device_mappings(self, mock_get_by_uuid, - mock_DriverVBD): - mock_get_by_uuid.return_value = [mock.sentinel.bdm] - mock_instance = mock.MagicMock() - - bdms = self.clusterops._get_instance_block_device_mappings( - mock_instance) - - self.assertEqual([mock_DriverVBD.return_value], bdms) - mock_get_by_uuid.assert_called_once_with(self.clusterops._context, - mock_instance.uuid) - mock_DriverVBD.assert_called_once_with(mock.sentinel.bdm) - - def test_nova_failover_server(self): - mock_instance = mock.MagicMock(vm_state=vm_states.ERROR, - power_state=power_state.NOSTATE) - - self.clusterops._nova_failover_server(mock_instance, - mock.sentinel.host) - - self.assertEqual(vm_states.ACTIVE, mock_instance.vm_state) - self.assertEqual(power_state.RUNNING, mock_instance.power_state) - self.assertEqual(mock.sentinel.host, mock_instance.host) - self.assertEqual(mock.sentinel.host, mock_instance.node) - mock_instance.save.assert_called_once_with(expected_task_state=[None]) - - @mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name') - def test_instance_state_change_callback(self, mock_get_instance_by_name): - event = mock.Mock(transition=virtevent.EVENT_LIFECYCLE_STARTED) - mock_instance = mock_get_instance_by_name.return_value - - self.clusterops.instance_state_change_callback(event) - - mock_get_instance_by_name.assert_called_once_with(event.name) - self._network_api.get_instance_nw_info.assert_called_once_with( - self.context, mock_instance) - self.clusterops._vmops.plug_vifs.assert_called_once_with( - mock_instance, - self._network_api.get_instance_nw_info.return_value) diff --git a/compute_hyperv/tests/unit/cluster/test_driver.py b/compute_hyperv/tests/unit/cluster/test_driver.py deleted file mode 100644 index 4a8981b4..00000000 --- a/compute_hyperv/tests/unit/cluster/test_driver.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2016 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the Hyper-V Cluster Driver.""" - -from unittest import mock - -from nova import safe_utils -from nova.virt import driver as nova_base_driver - -from compute_hyperv.nova.cluster import driver -from compute_hyperv.nova import driver as base_driver -from compute_hyperv.tests.unit import test_base - - -class HyperVClusterTestCase(test_base.HyperVBaseTestCase): - - _autospec_classes = [ - driver.clusterops.ClusterOps, - base_driver.eventhandler.InstanceEventHandler, - base_driver.hostops.api.API, - driver.livemigrationops.ClusterLiveMigrationOps, - ] - - @mock.patch.object(base_driver.HyperVDriver, - '_check_minimum_windows_version') - def setUp(self, mock_check_minimum_windows_version): - super(HyperVClusterTestCase, self).setUp() - - self.context = 'context' - self.driver = driver.HyperVClusterDriver(mock.sentinel.virtapi) - - def test_public_api_signatures(self): - driver_methods = dict(driver.HyperVClusterDriver.__dict__, - **base_driver.HyperVDriver.__dict__) - - for attr in driver_methods: - class_member = getattr(driver.HyperVClusterDriver, attr) - if callable(class_member): - mocked_method = mock.patch.object( - driver.HyperVClusterDriver, attr, - safe_utils.get_wrapped_function(class_member)) - mocked_method.start() - self.addCleanup(mocked_method.stop) - - self.assertPublicAPISignatures(nova_base_driver.ComputeDriver, - driver.HyperVClusterDriver) - - def test_set_event_handler_callbacks(self): - self.driver._set_event_handler_callbacks() - - self.driver._event_handler.add_callback.assert_has_calls( - [mock.call(self.driver.emit_event), - mock.call(self.driver._vmops.instance_state_change_callback), - mock.call(self.driver._clops.instance_state_change_callback)]) - - @mock.patch.object(base_driver.HyperVDriver, 'spawn') - def test_spawn(self, mock_superclass_spawn): - self.driver.spawn(self.context, mock.sentinel.fake_instance, - mock.sentinel.image_meta, - mock.sentinel.injected_files, - mock.sentinel.admin_pass, - mock.sentinel.allocations, - mock.sentinel.network_info, - mock.sentinel.block_dev_info, - mock.sentinel.power_on, - mock.sentinel.accel_info) - - mock_superclass_spawn.assert_called_once_with( - self.context, mock.sentinel.fake_instance, - mock.sentinel.image_meta, mock.sentinel.injected_files, - mock.sentinel.admin_pass, mock.sentinel.allocations, - mock.sentinel.network_info, mock.sentinel.block_dev_info, - mock.sentinel.power_on) - self.driver._clops.add_to_cluster.assert_called_once_with( - mock.sentinel.fake_instance) - - @mock.patch.object(base_driver.HyperVDriver, 'destroy') - def test_destroy(self, mock_superclass_destroy): - self.driver.destroy(self.context, mock.sentinel.fake_instance, - mock.sentinel.network_info, - mock.sentinel.block_dev_info, - mock.sentinel.destroy_disks) - - mock_superclass_destroy.assert_called_once_with( - self.context, mock.sentinel.fake_instance, - mock.sentinel.network_info, mock.sentinel.block_dev_info, - mock.sentinel.destroy_disks) - - @mock.patch.object(base_driver.HyperVDriver, 'migrate_disk_and_power_off') - def test_migrate_disk_and_power_off(self, mock_superclass_migrate): - disk_info = self.driver.migrate_disk_and_power_off( - self.context, - mock.sentinel.fake_instance, - mock.sentinel.destination, - mock.sentinel.flavor, - mock.sentinel.network_info, - mock.sentinel.block_dev_info, - mock.sentinel.timeout, - mock.sentinel.retry_interval) - - self.assertEqual(mock_superclass_migrate.return_value, disk_info) - self.driver._clops.remove_from_cluster.assert_called_once_with( - mock.sentinel.fake_instance) - mock_superclass_migrate.assert_called_once_with( - self.context, mock.sentinel.fake_instance, - mock.sentinel.destination, mock.sentinel.flavor, - mock.sentinel.network_info, mock.sentinel.block_dev_info, - mock.sentinel.timeout, mock.sentinel.retry_interval) - - @mock.patch.object(base_driver.HyperVDriver, 'finish_migration') - def test_finish_migration(self, mock_superclass_finish_migration): - self.driver.finish_migration(self.context, - mock.sentinel.migration, - mock.sentinel.fake_instance, - mock.sentinel.disk_info, - mock.sentinel.network_info, - mock.sentinel.image_meta, - mock.sentinel.resize_instance, - mock.sentinel.allocations, - mock.sentinel.block_dev_info, - mock.sentinel.power_on) - mock_superclass_finish_migration.assert_called_once_with( - self.context, mock.sentinel.migration, mock.sentinel.fake_instance, - mock.sentinel.disk_info, mock.sentinel.network_info, - mock.sentinel.image_meta, mock.sentinel.resize_instance, - mock.sentinel.allocations, mock.sentinel.block_dev_info, - mock.sentinel.power_on) - self.driver._clops.add_to_cluster.assert_called_once_with( - mock.sentinel.fake_instance) - - @mock.patch.object(base_driver.HyperVDriver, 'finish_revert_migration') - def test_finish_revert_migration(self, mock_superclass_finish_rev_migr): - self.driver.finish_revert_migration(self.context, - mock.sentinel.fake_instance, - mock.sentinel.network_info, - mock.sentinel.migration, - mock.sentinel.block_dev_info, - mock.sentinel.power_on) - mock_superclass_finish_rev_migr.assert_called_once_with( - self.context, mock.sentinel.fake_instance, - mock.sentinel.network_info, mock.sentinel.migration, - mock.sentinel.block_dev_info, mock.sentinel.power_on) - self.driver._clops.add_to_cluster.assert_called_once_with( - mock.sentinel.fake_instance) - - @mock.patch.object(driver.HyperVClusterDriver, 'unplug_vifs') - def test_rollback_live_migration_at_destination_clustered( - self, mock_unplug_vifs): - mock_is_clustered = self.driver._livemigrationops.is_instance_clustered - mock_instance = mock.Mock() - self.driver.rollback_live_migration_at_destination( - self.context, mock_instance, mock.sentinel.network_info, - mock.sentinel.block_dev_info, mock.sentinel.destroy_disks, - mock.sentinel.migrate_data) - - mock_is_clustered.assert_called_once_with(mock_instance.name) - mock_unplug_vifs.assert_called_once_with( - mock_instance, mock.sentinel.network_info) - - @mock.patch.object(base_driver.HyperVDriver, - 'rollback_live_migration_at_destination') - def test_rollback_live_migration_at_destination(self, - mock_superclass_rollback): - mock_is_clustered = self.driver._livemigrationops.is_instance_clustered - mock_is_clustered.return_value = False - mock_instance = mock.Mock() - self.driver.rollback_live_migration_at_destination( - self.context, mock_instance, mock.sentinel.network_info, - mock.sentinel.block_dev_info, mock.sentinel.destroy_disks, - mock.sentinel.migrate_data) - - mock_is_clustered.assert_called_once_with(mock_instance.name) - mock_superclass_rollback.assert_called_once_with( - self.context, mock_instance, mock.sentinel.network_info, - mock.sentinel.block_dev_info, mock.sentinel.destroy_disks, - mock.sentinel.migrate_data) - - @mock.patch.object(base_driver.HyperVDriver, - 'post_live_migration_at_destination') - def test_post_live_migration_at_destination(self, mock_superclass_post): - self.driver.post_live_migration_at_destination( - self.context, mock.sentinel.fake_instance, - mock.sentinel.network_info, mock.sentinel.block_migration, - mock.sentinel.block_dev_info) - - self.driver._clops.post_migration.assert_called_once_with( - mock.sentinel.fake_instance) - mock_superclass_post.assert_called_once_with( - self.context, mock.sentinel.fake_instance, - mock.sentinel.network_info, mock.sentinel.block_migration, - mock.sentinel.block_dev_info) diff --git a/compute_hyperv/tests/unit/cluster/test_livemigrationops.py b/compute_hyperv/tests/unit/cluster/test_livemigrationops.py deleted file mode 100644 index 7022a5ce..00000000 --- a/compute_hyperv/tests/unit/cluster/test_livemigrationops.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt -from nova.compute import vm_states -from nova import exception -from nova import test as nova_test -from os_win import constants as os_win_const - -from compute_hyperv.nova.cluster import livemigrationops -from compute_hyperv.nova import livemigrationops as base_livemigrationops -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - - -@ddt.ddt -class ClusterLiveMigrationOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V Cluster LivemigrationOps class.""" - - _autospec_classes = [ - base_livemigrationops.volumeops.VolumeOps, - ] - - def setUp(self): - super(ClusterLiveMigrationOpsTestCase, self).setUp() - self._fake_context = 'fake_context' - self.livemigrops = livemigrationops.ClusterLiveMigrationOps() - self._clustutils = self.livemigrops._clustutils - - def test_is_instance_clustered(self): - ret = self.livemigrops.is_instance_clustered( - mock.sentinel.instance) - - self.assertEqual( - self.livemigrops._clustutils.vm_exists.return_value, ret) - - def test_live_migration_in_cluster(self): - migr_timeout = 10 - self.flags(instance_live_migration_timeout=migr_timeout, - group='hyperv') - - mock_instance = fake_instance.fake_instance_obj(self._fake_context) - self.livemigrops._clustutils.vm_exists.return_value = True - post_method = mock.MagicMock() - dest = 'fake_dest' - node_names = [dest, 'fake_node2'] - get_nodes = self.livemigrops._clustutils.get_cluster_node_names - get_nodes.return_value = node_names - - self.livemigrops.live_migration( - self._fake_context, mock_instance, dest, post_method, - mock.sentinel.recover_method, - block_migration=mock.sentinel.block_migration, - migrate_data=mock.sentinel.migrate_data) - - clustutils = self.livemigrops._clustutils - clustutils.live_migrate_vm.assert_called_once_with( - mock_instance.name, dest, migr_timeout) - post_method.assert_called_once_with( - self._fake_context, mock_instance, dest, - mock.sentinel.block_migration, mock.sentinel.migrate_data) - - @mock.patch.object(livemigrationops.ClusterLiveMigrationOps, - '_check_failed_instance_migration') - def test_live_migration_in_cluster_exception(self, mock_check_migr): - mock_instance = fake_instance.fake_instance_obj(self._fake_context) - self.livemigrops._clustutils.vm_exists.return_value = True - recover_method = mock.MagicMock() - dest = 'fake_dest' - node_names = [dest, 'fake_node2'] - get_nodes = self.livemigrops._clustutils.get_cluster_node_names - get_nodes.return_value = node_names - clustutils = self.livemigrops._clustutils - clustutils.live_migrate_vm.side_effect = nova_test.TestingException - - self.livemigrops.live_migration( - self._fake_context, mock_instance, dest, mock.sentinel.post_method, - recover_method, - block_migration=mock.sentinel.block_migration, - migrate_data=mock.sentinel.migrate_data) - - mock_check_migr.assert_called_once_with( - mock_instance, - expected_state=os_win_const.CLUSTER_GROUP_ONLINE) - - recover_method.assert_called_once_with( - self._fake_context, mock_instance, dest, - mock.sentinel.migrate_data) - - @mock.patch.object(base_livemigrationops.LiveMigrationOps, - 'live_migration') - def test_live_migration_outside_cluster(self, mock_super_live_migration): - mock_instance = fake_instance.fake_instance_obj(self._fake_context) - self.livemigrops._clustutils.vm_exists.return_value = True - dest = 'fake_dest' - node_names = ['fake_node1', 'fake_node2'] - get_nodes = self.livemigrops._clustutils.get_cluster_node_names - get_nodes.return_value = node_names - - self.livemigrops.live_migration( - self._fake_context, mock_instance, dest, mock.sentinel.post_method, - mock.sentinel.recover_method, block_migration=False, - migrate_data=None) - - mock_super_live_migration.assert_called_once_with( - self._fake_context, mock_instance, dest, mock.sentinel.post_method, - mock.sentinel.recover_method, False, None) - - @ddt.data({}, - {'state': os_win_const.CLUSTER_GROUP_PENDING, - 'expected_invalid_state': True}, - {'migration_queued': True, - 'expected_invalid_state': True}, - {'owner_node': 'some_other_node', - 'expected_invalid_state': True}) - @ddt.unpack - def test_check_failed_instance_migration( - self, state=os_win_const.CLUSTER_GROUP_ONLINE, - owner_node='source_node', migration_queued=False, - expected_invalid_state=False): - state_info = dict(owner_node=owner_node.upper(), - state=state, - migration_queued=migration_queued) - self._clustutils.get_cluster_group_state_info.return_value = ( - state_info) - self._clustutils.get_node_name.return_value = 'source_node' - - mock_instance = mock.Mock() - - if expected_invalid_state: - self.assertRaises( - exception.InstanceInvalidState, - self.livemigrops._check_failed_instance_migration, - mock_instance, - os_win_const.CLUSTER_GROUP_ONLINE) - self.assertEqual(vm_states.ERROR, mock_instance.vm_state) - else: - self.livemigrops._check_failed_instance_migration( - mock_instance, os_win_const.CLUSTER_GROUP_ONLINE) - - self._clustutils.get_cluster_group_state_info.assert_called_once_with( - mock_instance.name) - self._clustutils.get_node_name.assert_called_once_with() - - def test_pre_live_migration_clustered(self): - self.livemigrops.pre_live_migration(self._fake_context, - mock.sentinel.fake_instance, - mock.sentinel.bdi, - mock.sentinel.network_info) - - fake_conn_vol = self.livemigrops._volumeops.connect_volumes - fake_conn_vol.assert_called_once_with(mock.sentinel.bdi) - - @mock.patch.object(base_livemigrationops.LiveMigrationOps, - 'pre_live_migration') - def test_pre_live_migration_not_clustered(self, mock_pre_live_migration): - self.livemigrops._clustutils.vm_exists.return_value = False - self.livemigrops.pre_live_migration(self._fake_context, - mock.sentinel.fake_instance, - mock.sentinel.bdi, - mock.sentinel.network_info) - - mock_pre_live_migration.assert_called_once_with( - self._fake_context, mock.sentinel.fake_instance, - mock.sentinel.bdi, mock.sentinel.network_info) - - @mock.patch.object(base_livemigrationops.LiveMigrationOps, - 'post_live_migration') - def test_post_live_migration_clustered(self, mock_post_live_migration): - self.livemigrops.post_live_migration(self._fake_context, - mock.sentinel.fake_instance, - mock.sentinel.bdi, - mock.sentinel.migrate_data) - - self.assertFalse(mock_post_live_migration.called) - - @mock.patch.object(base_livemigrationops.LiveMigrationOps, - 'post_live_migration') - def test_post_live_migration_not_clustered(self, mock_post_live_migration): - self.livemigrops._clustutils.vm_exists.return_value = False - self.livemigrops.post_live_migration(self._fake_context, - mock.sentinel.fake_instance, - mock.sentinel.bdi, - mock.sentinel.migrate_data) - - mock_post_live_migration.assert_called_once_with( - self._fake_context, mock.sentinel.fake_instance, - mock.sentinel.bdi, - mock.sentinel.migrate_data) diff --git a/compute_hyperv/tests/unit/cluster/test_volumeops.py b/compute_hyperv/tests/unit/cluster/test_volumeops.py deleted file mode 100644 index 5db4e10f..00000000 --- a/compute_hyperv/tests/unit/cluster/test_volumeops.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import exception - -from compute_hyperv.nova.cluster import volumeops -from compute_hyperv.nova import constants -from compute_hyperv.nova import volumeops as base_volumeops -from compute_hyperv.tests.unit import test_base - - -class ClusterVolumeOpsTestCase(test_base.HyperVBaseTestCase): - _autospec_classes = [ - base_volumeops.cinder.API, - ] - - def setUp(self): - super(ClusterVolumeOpsTestCase, self).setUp() - self._volumeops = volumeops.ClusterVolumeOps() - - def test_loaded_volume_drivers(self): - self.assertEqual(set([constants.STORAGE_PROTOCOL_SMBFS]), - set(self._volumeops.volume_drivers.keys())) - - def test_get_blacklisted_volume_driver(self): - conn_info = dict(driver_volume_type=constants.STORAGE_PROTOCOL_ISCSI) - - self.assertRaises( - exception.VolumeDriverNotFound, - self._volumeops._get_volume_driver, - conn_info) - - def test_get_supported_volume_driver(self): - conn_info = dict(driver_volume_type=constants.STORAGE_PROTOCOL_SMBFS) - drv = self._volumeops._get_volume_driver(conn_info) - - self.assertIsInstance(drv, base_volumeops.SMBFSVolumeDriver) diff --git a/compute_hyperv/tests/unit/test_base.py b/compute_hyperv/tests/unit/test_base.py deleted file mode 100644 index 779d6c60..00000000 --- a/compute_hyperv/tests/unit/test_base.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from os_win import utilsfactory -from oslo_utils import importutils - -from compute_hyperv.tests import test - - -class HyperVBaseTestCase(test.NoDBTestCase): - _autospec_classes = [] - - def setUp(self): - super(HyperVBaseTestCase, self).setUp() - - utilsfactory_patcher = mock.patch.object( - utilsfactory, '_get_class', HyperVBaseTestCase._mock_get_class) - utilsfactory_patcher.start() - self.addCleanup(mock.patch.stopall) - - self._patch_autospec_classes() - - @staticmethod - def _mock_get_class(class_type, *args, **kwargs): - existing_classes = utilsfactory.utils_map[class_type] - class_info = existing_classes[0] - imported_class = importutils.import_class(class_info['path']) - - return mock.Mock(autospec=imported_class) - - def _patch_autospec_classes(self): - for class_type in self._autospec_classes: - mocked_class = mock.MagicMock(autospec=class_type) - patcher = mock.patch( - '.'.join([class_type.__module__, class_type.__name__]), - mocked_class) - patcher.start() - self.addCleanup(patcher.stop) diff --git a/compute_hyperv/tests/unit/test_block_device_manager.py b/compute_hyperv/tests/unit/test_block_device_manager.py deleted file mode 100644 index 751552c2..00000000 --- a/compute_hyperv/tests/unit/test_block_device_manager.py +++ /dev/null @@ -1,631 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -import ddt -from nova import block_device -from nova import exception -from nova import objects -from nova.virt import block_device as driver_block_device -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc -from oslo_serialization import jsonutils - -from compute_hyperv.nova import block_device_manager -from compute_hyperv.nova import constants -from compute_hyperv.tests.unit import test_base - - -@ddt.ddt -class BlockDeviceManagerTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V BlockDeviceInfoManager class.""" - - _autospec_classes = [ - block_device_manager.volumeops.VolumeOps, - block_device_manager.pathutils.PathUtils, - ] - - _FAKE_CONN_INFO = { - 'serial': 'fake_volume_id' - } - - _FAKE_ATTACH_INFO = { - 'controller_type': constants.CTRL_TYPE_SCSI, - 'controller_addr': 0, - 'controller_slot': 1 - } - - def setUp(self): - super(BlockDeviceManagerTestCase, self).setUp() - self._bdman = block_device_manager.BlockDeviceInfoManager() - - self._volops = self._bdman._volops - self._pathutils = self._bdman._pathutils - - @ddt.data(constants.CTRL_TYPE_SCSI, constants.CTRL_TYPE_IDE) - def test_get_device_bus(self, controller_type): - fake_ctrl_addr = self._FAKE_ATTACH_INFO['controller_addr'] - fake_ctrl_slot = self._FAKE_ATTACH_INFO['controller_slot'] - - bus = self._bdman._get_device_bus( - controller_type, fake_ctrl_addr, fake_ctrl_slot) - - if controller_type == constants.CTRL_TYPE_SCSI: - exp_addr = '0:0:%s:%s' % (fake_ctrl_addr, fake_ctrl_slot) - exp_cls = objects.SCSIDeviceBus - else: - exp_addr = '%s:%s' % (fake_ctrl_addr, fake_ctrl_slot) - exp_cls = objects.IDEDeviceBus - - self.assertIsInstance(bus, exp_cls) - self.assertEqual(exp_addr, bus.address) - - @ddt.data({}, - {'bdm_is_vol': False}, - {'conn_info_set': False}) - @ddt.unpack - @mock.patch.object(driver_block_device, 'convert_volume') - def test_get_vol_bdm_att_info(self, mock_convert_vol, - bdm_is_vol=True, - conn_info_set=True): - mock_drv_bdm = (dict(connection_info=self._FAKE_CONN_INFO) - if conn_info_set else {}) - mock_convert_vol.return_value = (mock_drv_bdm - if bdm_is_vol - else None) - - self._volops.get_disk_attachment_info.return_value = ( - self._FAKE_ATTACH_INFO.copy()) - - attach_info = self._bdman._get_vol_bdm_attachment_info( - mock.sentinel.bdm) - - mock_convert_vol.assert_called_once_with( - mock.sentinel.bdm) - - if bdm_is_vol and conn_info_set: - exp_attach_info = self._FAKE_ATTACH_INFO.copy() - exp_attach_info['serial'] = self._FAKE_CONN_INFO['serial'] - - self._volops.get_disk_attachment_info.assert_called_once_with( - self._FAKE_CONN_INFO) - else: - exp_attach_info = None - - self._volops.get_disk_attachment_info.assert_not_called() - - self.assertEqual(exp_attach_info, attach_info) - - @ddt.data({}, - {'eph_name_set': False}, - {'eph_disk_exists': False}) - @ddt.unpack - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - 'get_bdm_connection_info') - @mock.patch('os.path.exists') - def test_get_eph_bdm_attachment_info(self, mock_exists, - mock_get_bdm_conn_info, - eph_name_set=True, - eph_disk_exists=True): - fake_instance_dir = 'fake_instance_dir' - fake_eph_name = 'eph0.vhdx' - mock_instance = mock.Mock() - - fake_conn_info = self._FAKE_CONN_INFO.copy() - if eph_name_set: - fake_conn_info['eph_filename'] = fake_eph_name - - mock_get_bdm_conn_info.return_value = fake_conn_info - mock_exists.return_value = eph_disk_exists - mock_get_attach_info = self._bdman._vmutils.get_disk_attachment_info - - self._pathutils.get_instance_dir.return_value = fake_instance_dir - - attach_info = self._bdman._get_eph_bdm_attachment_info( - mock_instance, mock.sentinel.bdm) - - if eph_name_set and eph_disk_exists: - exp_attach_info = mock_get_attach_info.return_value - exp_eph_path = os.path.join(fake_instance_dir, fake_eph_name) - - mock_exists.assert_called_once_with(exp_eph_path) - mock_get_attach_info.assert_called_once_with( - exp_eph_path, - is_physical=False) - else: - exp_attach_info = None - - mock_get_attach_info.assert_not_called() - - self.assertEqual(exp_attach_info, attach_info) - - mock_get_bdm_conn_info.assert_called_once_with( - mock.sentinel.bdm) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_vol_bdm_attachment_info') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_eph_bdm_attachment_info') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_device_bus') - @mock.patch.object(block_device, 'new_format_is_ephemeral') - @mock.patch.object(objects, 'DiskMetadata') - def test_get_disk_metadata(self, mock_diskmetadata_cls, - mock_is_eph, - mock_get_device_bus, - mock_get_vol_attach_info, - mock_get_eph_attach_info, - bdm_is_eph=False, - bdm_is_vol=False, - attach_info_retrieved=True): - mock_instance = mock.Mock() - mock_bdm = mock.Mock() - mock_bdm.is_volume = bdm_is_vol - - if attach_info_retrieved: - attach_info = self._FAKE_ATTACH_INFO.copy() - attach_info['serial'] = mock.sentinel.serial - else: - attach_info = None - - mock_get_eph_attach_info.return_value = attach_info - mock_get_vol_attach_info.return_value = attach_info - mock_is_eph.return_value = bdm_is_eph - - disk_metadata = self._bdman._get_disk_metadata( - mock_instance, mock_bdm) - - if (bdm_is_vol or bdm_is_eph) and attach_info_retrieved: - exp_disk_meta = mock_diskmetadata_cls.return_value - - mock_get_device_bus.assert_called_once_with( - self._FAKE_ATTACH_INFO['controller_type'], - self._FAKE_ATTACH_INFO['controller_addr'], - self._FAKE_ATTACH_INFO['controller_slot']) - mock_diskmetadata_cls.assert_called_once_with( - bus=mock_get_device_bus.return_value, - tags=[mock_bdm.tag], - serial=mock.sentinel.serial) - else: - exp_disk_meta = None - - mock_get_device_bus.assert_not_called() - - self.assertEqual(exp_disk_meta, disk_metadata) - - if bdm_is_vol: - mock_get_vol_attach_info.assert_called_once_with(mock_bdm) - elif bdm_is_eph: - mock_get_eph_attach_info.assert_called_once_with(mock_instance, - mock_bdm) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_disk_metadata') - @mock.patch.object(objects.BlockDeviceMappingList, - 'get_by_instance_uuid') - def test_get_bdm_metadata(self, mock_get_bdm_list, - mock_get_disk_meta): - bdms = [mock.Mock()] * 4 - disk_meta = mock.Mock() - mock_instance = mock.Mock() - - mock_get_bdm_list.return_value = bdms - mock_get_disk_meta.side_effect = [ - None, - exception.DiskNotFound(message='fake_err'), - os_win_exc.DiskNotFound(message='fake_err'), - disk_meta] - - bdm_meta = self._bdman.get_bdm_metadata(mock.sentinel.context, - mock_instance) - - self.assertEqual([disk_meta], bdm_meta) - - mock_get_bdm_list.assert_called_once_with(mock.sentinel.context, - mock_instance.uuid) - mock_get_disk_meta.assert_has_calls( - [mock.call(mock_instance, bdm) for bdm in bdms]) - - @mock.patch.object(objects.BlockDeviceMapping, - 'get_by_volume_and_instance') - def test_set_vol_bdm_conn_info(self, mock_get_bdm): - mock_instance = mock.Mock() - mock_bdm = mock_get_bdm.return_value - - self._bdman.set_volume_bdm_connection_info( - mock.sentinel.context, mock_instance, self._FAKE_CONN_INFO) - - mock_get_bdm.assert_called_once_with( - mock.sentinel.context, - self._FAKE_CONN_INFO['serial'], - mock_instance.uuid) - - self.assertEqual(self._FAKE_CONN_INFO, - jsonutils.loads(mock_bdm.connection_info)) - mock_bdm.save.assert_called_once_with() - - def test_get_bdm_connection_info(self): - bdm = mock.Mock(connection_info=None) - self.assertEqual({}, self._bdman.get_bdm_connection_info(bdm)) - - bdm = mock.Mock() - bdm.connection_info = jsonutils.dumps(self._FAKE_CONN_INFO) - self.assertEqual(self._FAKE_CONN_INFO, - self._bdman.get_bdm_connection_info(bdm)) - - def test_update_bdm_conn_info(self): - connection_info = self._FAKE_CONN_INFO.copy() - - mock_bdm = mock.Mock() - mock_bdm.connection_info = jsonutils.dumps(connection_info) - - updates = dict(some_key='some_val', - some_other_key='some_other_val') - - self._bdman.update_bdm_connection_info( - mock_bdm, **updates) - - exp_connection_info = connection_info.copy() - exp_connection_info.update(**updates) - - self.assertEqual(exp_connection_info, - jsonutils.loads(mock_bdm.connection_info)) - mock_bdm.save.assert_called_once_with() - - @mock.patch('nova.virt.configdrive.required_by') - def test_init_controller_slot_counter_gen1_no_configdrive( - self, mock_cfg_drive_req): - mock_cfg_drive_req.return_value = False - slot_map = self._bdman._initialize_controller_slot_counter( - mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_1) - - self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][0], - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER) - self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][1], - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER) - self.assertEqual(slot_map[constants.CTRL_TYPE_SCSI][0], - os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER) - - @mock.patch('nova.virt.configdrive.required_by') - def test_init_controller_slot_counter_gen1(self, mock_cfg_drive_req): - slot_map = self._bdman._initialize_controller_slot_counter( - mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_1) - - self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][1], - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER - 1) - - @mock.patch.object(block_device_manager.configdrive, 'required_by') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_initialize_controller_slot_counter') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_root_device') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_ephemerals') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_volumes') - def _check_validate_and_update_bdi(self, mock_check_and_update_vol, - mock_check_and_update_eph, - mock_check_and_update_root, - mock_init_ctrl_cntr, - mock_required_by, available_slots=1): - mock_required_by.return_value = True - slot_map = {constants.CTRL_TYPE_SCSI: [available_slots]} - mock_init_ctrl_cntr.return_value = slot_map - - if available_slots: - self._bdman.validate_and_update_bdi(mock.sentinel.FAKE_INSTANCE, - mock.sentinel.IMAGE_META, - constants.VM_GEN_2, - mock.sentinel.BLOCK_DEV_INFO) - else: - self.assertRaises(exception.InvalidBDMFormat, - self._bdman.validate_and_update_bdi, - mock.sentinel.FAKE_INSTANCE, - mock.sentinel.IMAGE_META, - constants.VM_GEN_2, - mock.sentinel.BLOCK_DEV_INFO) - - mock_init_ctrl_cntr.assert_called_once_with( - mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_2) - mock_check_and_update_root.assert_called_once_with( - constants.VM_GEN_2, mock.sentinel.IMAGE_META, - mock.sentinel.BLOCK_DEV_INFO, slot_map) - mock_check_and_update_eph.assert_called_once_with( - constants.VM_GEN_2, mock.sentinel.BLOCK_DEV_INFO, slot_map) - mock_check_and_update_vol.assert_called_once_with( - constants.VM_GEN_2, mock.sentinel.BLOCK_DEV_INFO, slot_map) - mock_required_by.assert_called_once_with(mock.sentinel.FAKE_INSTANCE) - - def test_validate_and_update_bdi(self): - self._check_validate_and_update_bdi() - - def test_validate_and_update_bdi_insufficient_slots(self): - self._check_validate_and_update_bdi(available_slots=0) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_available_controller_slot') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - 'is_boot_from_volume') - def _test_check_and_update_root_device(self, mock_is_boot_from_vol, - mock_get_avail_ctrl_slot, - disk_format, - vm_gen=constants.VM_GEN_1, - boot_from_volume=False): - image_meta = {'disk_format': disk_format} - bdi = {'root_device': '/dev/sda', - 'block_device_mapping': [ - {'mount_device': '/dev/sda', - 'connection_info': mock.sentinel.FAKE_CONN_INFO}]} - - mock_is_boot_from_vol.return_value = boot_from_volume - mock_get_avail_ctrl_slot.return_value = (0, 0) - - self._bdman._check_and_update_root_device(vm_gen, image_meta, bdi, - mock.sentinel.SLOT_MAP) - - root_disk = bdi['root_disk'] - if boot_from_volume: - self.assertEqual(root_disk['type'], constants.VOLUME) - self.assertIsNone(root_disk['path']) - self.assertEqual(root_disk['connection_info'], - mock.sentinel.FAKE_CONN_INFO) - else: - image_type = self._bdman._TYPE_FOR_DISK_FORMAT.get( - image_meta['disk_format']) - self.assertEqual(root_disk['type'], image_type) - self.assertIsNone(root_disk['path']) - self.assertIsNone(root_disk['connection_info']) - - disk_bus = (constants.CTRL_TYPE_IDE if - vm_gen == constants.VM_GEN_1 else constants.CTRL_TYPE_SCSI) - self.assertEqual(root_disk['disk_bus'], disk_bus) - self.assertEqual(root_disk['drive_addr'], 0) - self.assertEqual(root_disk['ctrl_disk_addr'], 0) - self.assertEqual(root_disk['boot_index'], 0) - self.assertEqual(root_disk['mount_device'], bdi['root_device']) - mock_get_avail_ctrl_slot.assert_called_once_with( - root_disk['disk_bus'], mock.sentinel.SLOT_MAP) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - 'is_boot_from_volume', return_value=False) - def test_check_and_update_root_device_exception(self, mock_is_boot_vol): - bdi = {} - image_meta = mock.MagicMock(disk_format=mock.sentinel.fake_format) - - self.assertRaises(exception.InvalidImageFormat, - self._bdman._check_and_update_root_device, - constants.VM_GEN_1, image_meta, bdi, - mock.sentinel.SLOT_MAP) - - def test_check_and_update_root_device_gen1(self): - self._test_check_and_update_root_device(disk_format='vhd') - - def test_check_and_update_root_device_gen1_vhdx(self): - self._test_check_and_update_root_device(disk_format='vhdx') - - def test_check_and_update_root_device_gen1_iso(self): - self._test_check_and_update_root_device(disk_format='iso') - - def test_check_and_update_root_device_gen2(self): - self._test_check_and_update_root_device(disk_format='vhd', - vm_gen=constants.VM_GEN_2) - - def test_check_and_update_root_device_boot_from_vol_gen1(self): - self._test_check_and_update_root_device(disk_format='vhd', - boot_from_volume=True) - - def test_check_and_update_root_device_boot_from_vol_gen2(self): - self._test_check_and_update_root_device(disk_format='vhd', - vm_gen=constants.VM_GEN_2, - boot_from_volume=True) - - @mock.patch('nova.virt.configdrive.required_by', return_value=True) - def _test_get_available_controller_slot(self, mock_config_drive_req, - bus=constants.CTRL_TYPE_IDE, - fail=False): - - slot_map = self._bdman._initialize_controller_slot_counter( - mock.sentinel.FAKE_VM, constants.VM_GEN_1) - - if fail: - slot_map[constants.CTRL_TYPE_IDE][0] = 0 - slot_map[constants.CTRL_TYPE_IDE][1] = 0 - self.assertRaises(exception.InvalidBDMFormat, - self._bdman._get_available_controller_slot, - constants.CTRL_TYPE_IDE, - slot_map) - else: - (disk_addr, - ctrl_disk_addr) = self._bdman._get_available_controller_slot( - bus, slot_map) - - self.assertEqual(0, disk_addr) - self.assertEqual(0, ctrl_disk_addr) - - def test_get_available_controller_slot(self): - self._test_get_available_controller_slot() - - def test_get_available_controller_slot_scsi_ctrl(self): - self._test_get_available_controller_slot(bus=constants.CTRL_TYPE_SCSI) - - def test_get_available_controller_slot_exception(self): - self._test_get_available_controller_slot(fail=True) - - def test_is_boot_from_volume_true(self): - vol = {'mount_device': self._bdman._DEFAULT_ROOT_DEVICE} - block_device_info = {'block_device_mapping': [vol]} - ret = self._bdman.is_boot_from_volume(block_device_info) - - self.assertTrue(ret) - - def test_is_boot_from_volume_false(self): - block_device_info = {'block_device_mapping': []} - ret = self._bdman.is_boot_from_volume(block_device_info) - - self.assertFalse(ret) - - def test_get_root_device_bdm(self): - mount_device = '/dev/sda' - bdm1 = {'mount_device': None} - bdm2 = {'mount_device': mount_device} - bdi = {'block_device_mapping': [bdm1, bdm2]} - - ret = self._bdman._get_root_device_bdm(bdi, mount_device) - - self.assertEqual(bdm2, ret) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_bdm') - def test_check_and_update_ephemerals(self, mock_check_and_update_bdm): - fake_ephemerals = [mock.sentinel.eph1, mock.sentinel.eph2, - mock.sentinel.eph3] - fake_bdi = {'ephemerals': fake_ephemerals} - expected_calls = [] - for eph in fake_ephemerals: - expected_calls.append(mock.call(mock.sentinel.fake_slot_map, - mock.sentinel.fake_vm_gen, - eph)) - self._bdman._check_and_update_ephemerals(mock.sentinel.fake_vm_gen, - fake_bdi, - mock.sentinel.fake_slot_map) - mock_check_and_update_bdm.assert_has_calls(expected_calls) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_bdm') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_root_device_bdm') - def test_check_and_update_volumes(self, mock_get_root_dev_bdm, - mock_check_and_update_bdm): - fake_vol1 = {'mount_device': '/dev/sda'} - fake_vol2 = {'mount_device': '/dev/sdb'} - fake_volumes = [fake_vol1, fake_vol2] - fake_bdi = {'block_device_mapping': fake_volumes, - 'root_disk': {'mount_device': '/dev/sda'}} - mock_get_root_dev_bdm.return_value = fake_vol1 - - self._bdman._check_and_update_volumes(mock.sentinel.fake_vm_gen, - fake_bdi, - mock.sentinel.fake_slot_map) - - mock_get_root_dev_bdm.assert_called_once_with(fake_bdi, '/dev/sda') - mock_check_and_update_bdm.assert_called_once_with( - mock.sentinel.fake_slot_map, mock.sentinel.fake_vm_gen, fake_vol2) - self.assertNotIn(fake_vol1, fake_bdi) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_available_controller_slot') - def test_check_and_update_bdm_with_defaults(self, mock_get_ctrl_slot): - mock_get_ctrl_slot.return_value = ((mock.sentinel.DRIVE_ADDR, - mock.sentinel.CTRL_DISK_ADDR)) - bdm = {'device_type': None, - 'disk_bus': None, - 'boot_index': None} - - self._bdman._check_and_update_bdm(mock.sentinel.FAKE_SLOT_MAP, - constants.VM_GEN_1, bdm) - - mock_get_ctrl_slot.assert_called_once_with( - bdm['disk_bus'], mock.sentinel.FAKE_SLOT_MAP) - self.assertEqual(mock.sentinel.DRIVE_ADDR, bdm['drive_addr']) - self.assertEqual(mock.sentinel.CTRL_DISK_ADDR, bdm['ctrl_disk_addr']) - self.assertEqual('disk', bdm['device_type']) - self.assertEqual(self._bdman._DEFAULT_BUS, bdm['disk_bus']) - self.assertIsNone(bdm['boot_index']) - - def test_check_and_update_bdm_exception_device_type(self): - bdm = {'device_type': 'cdrom', - 'disk_bus': 'IDE'} - - self.assertRaises(exception.InvalidDiskInfo, - self._bdman._check_and_update_bdm, - mock.sentinel.FAKE_SLOT_MAP, constants.VM_GEN_1, bdm) - - def test_check_and_update_bdm_exception_disk_bus(self): - bdm = {'device_type': 'disk', - 'disk_bus': 'fake_bus'} - - self.assertRaises(exception.InvalidDiskInfo, - self._bdman._check_and_update_bdm, - mock.sentinel.FAKE_SLOT_MAP, constants.VM_GEN_1, bdm) - - def test_sort_by_boot_order(self): - original = [{'boot_index': 2}, {'boot_index': None}, {'boot_index': 1}] - expected = [original[2], original[0], original[1]] - - self._bdman._sort_by_boot_order(original) - self.assertEqual(expected, original) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_boot_order_gen1') - def test_get_boot_order_gen1_vm(self, mock_get_boot_order): - self._bdman.get_boot_order(constants.VM_GEN_1, - mock.sentinel.BLOCK_DEV_INFO) - mock_get_boot_order.assert_called_once_with( - mock.sentinel.BLOCK_DEV_INFO) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_boot_order_gen2') - def test_get_boot_order_gen2_vm(self, mock_get_boot_order): - self._bdman.get_boot_order(constants.VM_GEN_2, - mock.sentinel.BLOCK_DEV_INFO) - mock_get_boot_order.assert_called_once_with( - mock.sentinel.BLOCK_DEV_INFO) - - def test_get_boot_order_gen1_iso(self): - fake_bdi = {'root_disk': {'type': 'iso'}} - expected = [os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - - res = self._bdman._get_boot_order_gen1(fake_bdi) - self.assertEqual(expected, res) - - def test_get_boot_order_gen1_vhd(self): - fake_bdi = {'root_disk': {'type': 'vhd'}} - expected = [os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - - res = self._bdman._get_boot_order_gen1(fake_bdi) - self.assertEqual(expected, res) - - def test_get_boot_order_gen2(self): - fake_root_disk = {'boot_index': 0, - 'path': mock.sentinel.FAKE_ROOT_PATH} - fake_eph1 = {'boot_index': 2, - 'path': mock.sentinel.FAKE_EPH_PATH1} - fake_eph2 = {'boot_index': 3, - 'path': mock.sentinel.FAKE_EPH_PATH2} - fake_bdm = {'boot_index': 1, - 'connection_info': mock.sentinel.FAKE_CONN_INFO} - fake_bdi = {'root_disk': fake_root_disk, - 'ephemerals': [fake_eph1, - fake_eph2], - 'block_device_mapping': [fake_bdm]} - - self._bdman._volops.get_disk_resource_path = ( - mock.MagicMock(return_value=fake_bdm['connection_info'])) - - expected_res = [mock.sentinel.FAKE_ROOT_PATH, - mock.sentinel.FAKE_CONN_INFO, - mock.sentinel.FAKE_EPH_PATH1, - mock.sentinel.FAKE_EPH_PATH2] - - res = self._bdman._get_boot_order_gen2(fake_bdi) - - self.assertEqual(expected_res, res) diff --git a/compute_hyperv/tests/unit/test_coordination.py b/compute_hyperv/tests/unit/test_coordination.py deleted file mode 100644 index 3477909b..00000000 --- a/compute_hyperv/tests/unit/test_coordination.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2015 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -from unittest import mock - -import tooz.coordination -import tooz.locking - -from compute_hyperv.nova import coordination -from compute_hyperv.tests.unit import test_base - -if hasattr(inspect, 'getfullargspec'): - getargspec = inspect.getfullargspec -else: - getargspec = inspect.getargspec - - -class Locked(Exception): - pass - - -class MockToozLock(tooz.locking.Lock): - active_locks = set() - - def acquire(self, blocking=True): - if self.name not in self.active_locks: - self.active_locks.add(self.name) - return True - elif not blocking: - return False - else: - raise Locked - - def release(self): - self.active_locks.remove(self.name) - - -@mock.patch('tooz.coordination.get_coordinator') -class CoordinatorTestCase(test_base.HyperVBaseTestCase): - MOCK_TOOZ = False - - def test_coordinator_start(self, get_coordinator): - crd = get_coordinator.return_value - - agent = coordination.Coordinator() - agent.start() - self.assertTrue(get_coordinator.called) - self.assertTrue(crd.start.called) - - def test_coordinator_stop(self, get_coordinator): - crd = get_coordinator.return_value - - agent = coordination.Coordinator() - agent.start() - self.assertIsNotNone(agent.coordinator) - agent.stop() - self.assertTrue(crd.stop.called) - self.assertIsNone(agent.coordinator) - - def test_coordinator_lock(self, get_coordinator): - crd = get_coordinator.return_value - crd.get_lock.side_effect = lambda n: MockToozLock(n) - - agent1 = coordination.Coordinator() - agent1.start() - agent2 = coordination.Coordinator() - agent2.start() - - lock_name = 'lock' - expected_name = lock_name.encode('ascii') - - self.assertNotIn(expected_name, MockToozLock.active_locks) - with agent1.get_lock(lock_name): - self.assertIn(expected_name, MockToozLock.active_locks) - self.assertRaises(Locked, agent1.get_lock(lock_name).acquire) - self.assertRaises(Locked, agent2.get_lock(lock_name).acquire) - self.assertNotIn(expected_name, MockToozLock.active_locks) - - def test_coordinator_offline(self, get_coordinator): - crd = get_coordinator.return_value - crd.start.side_effect = tooz.coordination.ToozConnectionError('err') - - agent = coordination.Coordinator() - self.assertRaises(tooz.coordination.ToozError, agent.start) - self.assertFalse(agent.started) - - -class CoordinationTestCase(test_base.HyperVBaseTestCase): - MOCK_TOOZ = False - - @mock.patch.object(coordination.COORDINATOR, 'get_lock') - def test_synchronized(self, get_lock): - @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}') - def func(foo, bar): - pass - - foo = mock.Mock() - foo.val = 7 - bar = mock.MagicMock() - bar.__getitem__.return_value = 8 - func(foo, bar) - get_lock.assert_called_with('lock-func-7-8') - self.assertEqual(['foo', 'bar'], getargspec(func)[0]) diff --git a/compute_hyperv/tests/unit/test_driver.py b/compute_hyperv/tests/unit/test_driver.py deleted file mode 100644 index ae5ca242..00000000 --- a/compute_hyperv/tests/unit/test_driver.py +++ /dev/null @@ -1,672 +0,0 @@ -# Copyright 2015 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the Hyper-V Driver.""" - -import platform -import sys -from unittest import mock - -from nova import exception -from nova.image import glance -from nova import safe_utils -from nova.tests.unit import fake_instance -from nova.virt import driver as base_driver -from os_win import exceptions as os_win_exc - -from compute_hyperv.nova import driver -from compute_hyperv.tests.unit import test_base - - -class HyperVDriverTestCase(test_base.HyperVBaseTestCase): - - _autospec_classes = [ - driver.eventhandler.InstanceEventHandler, - driver.hostops.HostOps, - driver.volumeops.VolumeOps, - driver.vmops.VMOps, - driver.snapshotops.SnapshotOps, - driver.livemigrationops.LiveMigrationOps, - driver.migrationops.MigrationOps, - driver.rdpconsoleops.RDPConsoleOps, - driver.serialconsoleops.SerialConsoleOps, - driver.imagecache.ImageCache, - driver.pathutils.PathUtils, - glance.API, - ] - - FAKE_WIN_2008R2_VERSION = '6.0.0' - - @mock.patch.object(driver.hostops, 'api', mock.MagicMock()) - @mock.patch.object(driver.HyperVDriver, '_check_minimum_windows_version') - def setUp(self, mock_check_minimum_windows_version): - super(HyperVDriverTestCase, self).setUp() - - self.context = 'context' - self.driver = driver.HyperVDriver(mock.sentinel.virtapi) - - @mock.patch.object(driver.LOG, 'warning') - @mock.patch.object(driver.utilsfactory, 'get_hostutils') - def test_check_minimum_windows_version(self, mock_get_hostutils, - mock_warning): - mock_hostutils = mock_get_hostutils.return_value - mock_hostutils.check_min_windows_version.return_value = False - - self.assertRaises(exception.HypervisorTooOld, - self.driver._check_minimum_windows_version) - - mock_hostutils.check_min_windows_version.side_effect = [True, False] - - self.driver._check_minimum_windows_version() - self.assertTrue(mock_warning.called) - - def test_public_api_signatures(self): - # NOTE(claudiub): wrapped functions do not keep the same signature in - # Python 2.7, which causes this test to fail. Instead, we should - # compare the public API signatures of the unwrapped methods. - - for attr in driver.HyperVDriver.__dict__: - class_member = getattr(driver.HyperVDriver, attr) - if callable(class_member): - mocked_method = mock.patch.object( - driver.HyperVDriver, attr, - safe_utils.get_wrapped_function(class_member)) - mocked_method.start() - self.addCleanup(mocked_method.stop) - - self.assertPublicAPISignatures(base_driver.ComputeDriver, - driver.HyperVDriver) - - def test_converted_exception(self): - self.driver._vmops.get_info.side_effect = ( - os_win_exc.OSWinException) - self.assertRaises(exception.NovaException, - self.driver.get_info, mock.sentinel.instance) - - self.driver._vmops.get_info.side_effect = os_win_exc.HyperVException - self.assertRaises(exception.NovaException, - self.driver.get_info, mock.sentinel.instance) - - self.driver._vmops.get_info.side_effect = ( - os_win_exc.HyperVVMNotFoundException(vm_name='foofoo')) - self.assertRaises(exception.InstanceNotFound, - self.driver.get_info, mock.sentinel.instance) - - def test_assert_original_traceback_maintained(self): - def bar(self): - foo = "foofoo" - raise os_win_exc.HyperVVMNotFoundException(vm_name=foo) - - self.driver._vmops.get_info.side_effect = bar - try: - self.driver.get_info(mock.sentinel.instance) - self.fail("Test expected exception, but it was not raised.") - except exception.InstanceNotFound: - # exception has been raised as expected. - _, _, trace = sys.exc_info() - while trace.tb_next: - # iterate until the original exception source, bar. - trace = trace.tb_next - - # original frame will contain the 'foo' variable. - self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) - - def test_init_host(self): - mock_get_inst_dir = self.driver._pathutils.get_instances_dir - mock_get_inst_dir.return_value = mock.sentinel.FAKE_DIR - - self.driver.init_host(mock.sentinel.host) - - mock_start_console_handlers = ( - self.driver._serialconsoleops.start_console_handlers) - mock_start_console_handlers.assert_called_once_with() - self.driver._event_handler.add_callback.assert_has_calls( - [mock.call(self.driver.emit_event), - mock.call(self.driver._vmops.instance_state_change_callback)]) - self.driver._event_handler.start_listener.assert_called_once_with() - - mock_get_inst_dir.assert_called_once_with() - self.driver._pathutils.check_create_dir.assert_called_once_with( - mock.sentinel.FAKE_DIR) - - def test_list_instance_uuids(self): - self.driver.list_instance_uuids() - self.driver._vmops.list_instance_uuids.assert_called_once_with() - - def test_list_instances(self): - self.driver.list_instances() - self.driver._vmops.list_instances.assert_called_once_with() - - @mock.patch.object(driver.HyperVDriver, '_recreate_image_meta') - def test_spawn(self, mock_recreate_img_meta): - self.driver.spawn( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_meta, mock.sentinel.injected_files, - mock.sentinel.admin_password, mock.sentinel.allocations, - mock.sentinel.network_info, - mock.sentinel.block_device_info, - mock.sentinel.power_on) - - mock_recreate_img_meta.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_meta) - self.driver._vmops.spawn.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock_recreate_img_meta.return_value, mock.sentinel.injected_files, - mock.sentinel.admin_password, mock.sentinel.network_info, - mock.sentinel.block_device_info, - mock.sentinel.power_on) - - def test_reboot(self): - self.driver.reboot( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.reboot_type, - mock.sentinel.block_device_info, mock.sentinel.bad_vol_callback, - mock.sentinel.accel_info) - - self.driver._vmops.reboot.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info, - mock.sentinel.reboot_type) - - def test_destroy(self): - self.driver.destroy( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.destroy_disks) - - self.driver._vmops.destroy.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info, - mock.sentinel.block_device_info, mock.sentinel.destroy_disks) - - def test_cleanup(self): - self.driver.cleanup( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.destroy_disks, mock.sentinel.migrate_data, - mock.sentinel.destroy_vifs) - - self.driver._vmops.unplug_vifs.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info) - - def test_get_info(self): - self.driver.get_info(mock.sentinel.instance) - self.driver._vmops.get_info.assert_called_once_with( - mock.sentinel.instance) - - def test_attach_volume(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.attach_volume( - mock.sentinel.context, mock.sentinel.connection_info, - mock_instance, mock.sentinel.mountpoint, mock.sentinel.disk_bus, - mock.sentinel.device_type, mock.sentinel.encryption) - - self.driver._volumeops.attach_volume.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.connection_info, - mock_instance, - update_device_metadata=True) - - @mock.patch('nova.context.get_admin_context', - lambda: mock.sentinel.admin_context) - def test_detach_volume(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.detach_volume( - mock.sentinel.context, mock.sentinel.connection_info, - mock_instance, mock.sentinel.mountpoint, mock.sentinel.encryption) - - self.driver._volumeops.detach_volume.assert_called_once_with( - mock.sentinel.admin_context, - mock.sentinel.connection_info, - mock_instance, - update_device_metadata=True) - - def test_extend_volume(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.extend_volume( - mock.sentinel.connection_info, mock_instance, - mock.sentinel.requested_size) - - self.driver._volumeops.extend_volume.assert_called_once_with( - mock.sentinel.connection_info) - - def test_get_volume_connector(self): - self.driver.get_volume_connector(mock.sentinel.instance) - self.driver._volumeops.get_volume_connector.assert_called_once_with() - - def test_get_available_resource(self): - self.driver.get_available_resource(mock.sentinel.nodename) - self.driver._hostops.get_available_resource.assert_called_once_with() - - def test_get_available_nodes(self): - response = self.driver.get_available_nodes(mock.sentinel.refresh) - self.assertEqual([platform.node()], response) - - def test_host_power_action(self): - self.driver.host_power_action(mock.sentinel.action) - self.driver._hostops.host_power_action.assert_called_once_with( - mock.sentinel.action) - - def test_snapshot(self): - self.driver.snapshot( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_id, mock.sentinel.update_task_state) - - self.driver._snapshotops.snapshot.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_id, mock.sentinel.update_task_state) - - def test_volume_snapshot_create(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.volume_snapshot_create( - self.context, mock_instance, mock.sentinel.volume_id, - mock.sentinel.create_info) - - self.driver._volumeops.volume_snapshot_create.assert_called_once_with( - self.context, mock_instance, mock.sentinel.volume_id, - mock.sentinel.create_info) - - def test_volume_snapshot_delete(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.volume_snapshot_delete( - self.context, mock_instance, mock.sentinel.volume_id, - mock.sentinel.snapshot_id, mock.sentinel.delete_info) - - self.driver._volumeops.volume_snapshot_delete.assert_called_once_with( - self.context, mock_instance, mock.sentinel.volume_id, - mock.sentinel.snapshot_id, mock.sentinel.delete_info) - - def test_pause(self): - self.driver.pause(mock.sentinel.instance) - self.driver._vmops.pause.assert_called_once_with( - mock.sentinel.instance) - - def test_unpause(self): - self.driver.unpause(mock.sentinel.instance) - self.driver._vmops.unpause.assert_called_once_with( - mock.sentinel.instance) - - def test_suspend(self): - self.driver.suspend(mock.sentinel.context, mock.sentinel.instance) - self.driver._vmops.suspend.assert_called_once_with( - mock.sentinel.instance) - - def test_resume(self): - self.driver.resume( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - self.driver._vmops.resume.assert_called_once_with( - mock.sentinel.instance) - - def test_power_off(self): - self.driver.power_off( - mock.sentinel.instance, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - self.driver._vmops.power_off.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - def test_power_on(self): - self.driver.power_on( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.accel_info) - - self.driver._vmops.power_on.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.block_device_info, - mock.sentinel.network_info) - - def test_resume_state_on_host_boot(self): - self.driver.resume_state_on_host_boot( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - self.driver._vmops.resume_state_on_host_boot.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - def test_live_migration(self): - self.driver.live_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest, mock.sentinel.post_method, - mock.sentinel.recover_method, mock.sentinel.block_migration, - mock.sentinel.migrate_data) - - self.driver._livemigrationops.live_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest, mock.sentinel.post_method, - mock.sentinel.recover_method, mock.sentinel.block_migration, - mock.sentinel.migrate_data) - - @mock.patch.object(driver.HyperVDriver, 'destroy') - def test_rollback_live_migration_at_destination(self, mock_destroy): - self.driver.rollback_live_migration_at_destination( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.destroy_disks, mock.sentinel.migrate_data) - - mock_destroy.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - destroy_disks=mock.sentinel.destroy_disks) - - def test_pre_live_migration(self): - migrate_data = self.driver.pre_live_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, mock.sentinel.network_info, - mock.sentinel.disk_info, mock.sentinel.migrate_data) - - self.assertEqual(mock.sentinel.migrate_data, migrate_data) - pre_live_migration = self.driver._livemigrationops.pre_live_migration - pre_live_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, mock.sentinel.network_info) - - def test_post_live_migration(self): - self.driver.post_live_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, mock.sentinel.migrate_data) - - post_live_migration = self.driver._livemigrationops.post_live_migration - post_live_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, - mock.sentinel.migrate_data) - - def test_post_live_migration_at_source(self): - self.driver.post_live_migration_at_source( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info) - - self.driver._vmops.unplug_vifs.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info) - - def test_post_live_migration_at_destination(self): - self.driver.post_live_migration_at_destination( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_migration, - mock.sentinel.block_device_info) - - mtd = self.driver._livemigrationops.post_live_migration_at_destination - mtd.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_migration) - - def test_check_can_live_migrate_destination(self): - self.driver.check_can_live_migrate_destination( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.src_compute_info, mock.sentinel.dst_compute_info, - mock.sentinel.block_migration, mock.sentinel.disk_over_commit) - - mtd = self.driver._livemigrationops.check_can_live_migrate_destination - mtd.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.src_compute_info, mock.sentinel.dst_compute_info, - mock.sentinel.block_migration, mock.sentinel.disk_over_commit) - - def test_cleanup_live_migration_destination_check(self): - self.driver.cleanup_live_migration_destination_check( - mock.sentinel.context, mock.sentinel.dest_check_data) - - _livemigrops = self.driver._livemigrationops - method = _livemigrops.cleanup_live_migration_destination_check - method.assert_called_once_with( - mock.sentinel.context, mock.sentinel.dest_check_data) - - def test_check_can_live_migrate_source(self): - self.driver.check_can_live_migrate_source( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest_check_data, mock.sentinel.block_device_info) - - method = self.driver._livemigrationops.check_can_live_migrate_source - method.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest_check_data) - - def test_plug_vifs(self): - self.driver.plug_vifs( - mock.sentinel.instance, mock.sentinel.network_info) - - self.driver._vmops.plug_vifs.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info) - - def test_unplug_vifs(self): - self.driver.unplug_vifs( - mock.sentinel.instance, mock.sentinel.network_info) - - self.driver._vmops.unplug_vifs.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info) - - def test_migrate_disk_and_power_off(self): - self.driver.migrate_disk_and_power_off( - mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, - mock.sentinel.flavor, mock.sentinel.network_info, - mock.sentinel.block_device_info, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - migr_power_off = self.driver._migrationops.migrate_disk_and_power_off - migr_power_off.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, - mock.sentinel.flavor, mock.sentinel.network_info, - mock.sentinel.block_device_info, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - def test_confirm_migration(self): - self.driver.confirm_migration( - mock.sentinel.context, - mock.sentinel.migration, mock.sentinel.instance, - mock.sentinel.network_info) - - self.driver._migrationops.confirm_migration.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.migration, mock.sentinel.instance, - mock.sentinel.network_info) - - def test_finish_revert_migration(self): - self.driver.finish_revert_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.migration, - mock.sentinel.block_device_info, mock.sentinel.power_on) - - finish_revert_migr = self.driver._migrationops.finish_revert_migration - finish_revert_migr.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.power_on) - - @mock.patch.object(driver.HyperVDriver, '_recreate_image_meta') - def test_finish_migration(self, mock_recreate_img_meta): - self.driver.finish_migration( - mock.sentinel.context, mock.sentinel.migration, - mock.sentinel.instance, mock.sentinel.disk_info, - mock.sentinel.network_info, mock.sentinel.image_meta, - mock.sentinel.resize_instance, mock.sentinel.allocations, - mock.sentinel.block_device_info, mock.sentinel.power_on) - - mock_recreate_img_meta.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_meta) - self.driver._migrationops.finish_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.migration, - mock.sentinel.instance, mock.sentinel.disk_info, - mock.sentinel.network_info, mock_recreate_img_meta.return_value, - mock.sentinel.resize_instance, mock.sentinel.block_device_info, - mock.sentinel.power_on) - - def test_get_host_ip_addr(self): - self.driver.get_host_ip_addr() - - self.driver._hostops.get_host_ip_addr.assert_called_once_with() - - def test_get_host_uptime(self): - self.driver.get_host_uptime() - self.driver._hostops.get_host_uptime.assert_called_once_with() - - def test_get_rdp_console(self): - self.driver.get_rdp_console( - mock.sentinel.context, mock.sentinel.instance) - self.driver._rdpconsoleops.get_rdp_console.assert_called_once_with( - mock.sentinel.instance) - - def test_get_console_output(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.get_console_output(self.context, mock_instance) - - mock_get_console_output = ( - self.driver._serialconsoleops.get_console_output) - mock_get_console_output.assert_called_once_with( - mock_instance.name) - - def test_get_serial_console(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.get_console_output(self.context, mock_instance) - - mock_get_serial_console = ( - self.driver._serialconsoleops.get_console_output) - mock_get_serial_console.assert_called_once_with( - mock_instance.name) - - def test_manage_image_cache(self): - self.driver.manage_image_cache(mock.sentinel.context, - mock.sentinel.all_instances) - self.driver._imagecache.update.assert_called_once_with( - mock.sentinel.context, mock.sentinel.all_instances) - - def test_cache_image(self): - self.driver._imagecache.cache_image.return_value = ( - mock.sentinel.image_path, mock.sentinel.fetched) - - fetched = self.driver.cache_image( - mock.sentinel.context, mock.sentinel.image_id) - - self.assertEqual(mock.sentinel.fetched, fetched) - self.driver._imagecache.cache_image.assert_called_once_with( - mock.sentinel.context, mock.sentinel.image_id) - - def test_attach_interface(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.attach_interface( - self.context, mock_instance, mock.sentinel.image_meta, - mock.sentinel.vif) - - self.driver._vmops.attach_interface.assert_called_once_with( - self.context, mock_instance, mock.sentinel.vif) - - @mock.patch.object(driver.HyperVDriver, '_recreate_image_meta') - def test_rescue(self, mock_recreate_img_meta): - self.driver.rescue( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.image_meta, - mock.sentinel.rescue_password, mock.sentinel.block_device_info) - - mock_recreate_img_meta.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_meta) - self.driver._vmops.rescue_instance.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, - mock_recreate_img_meta.return_value, - mock.sentinel.rescue_password) - - def test_unrescue(self): - self.driver.unrescue( - mock.sentinel.context, mock.sentinel.instance) - - self.driver._vmops.unrescue_instance.assert_called_once_with( - mock.sentinel.instance) - - def _check_recreate_image_meta(self, mock_image_meta, image_ref='', - instance_img_ref=''): - system_meta = {'image_base_image_ref': instance_img_ref} - mock_instance = mock.MagicMock(system_metadata=system_meta) - self.driver._image_api.get.return_value = {} - - image_meta = self.driver._recreate_image_meta( - mock.sentinel.context, mock_instance, mock_image_meta) - - if image_ref: - self.driver._image_api.get.assert_called_once_with( - mock.sentinel.context, image_ref) - else: - mock_image_meta.obj_to_primitive.assert_called_once_with() - self.assertEqual({'base_image_ref': image_ref}, - image_meta['properties']) - - self.assertEqual(image_ref, image_meta['id']) - - def test_recreate_image_meta_has_id(self): - mock_image_meta = mock.MagicMock(id=mock.sentinel.image_meta_id) - self._check_recreate_image_meta( - mock_image_meta, mock.sentinel.image_meta_id) - - def test_recreate_image_meta_instance(self): - mock_image_meta = mock.MagicMock() - mock_image_meta.obj_attr_is_set.return_value = False - self._check_recreate_image_meta( - mock_image_meta, mock.sentinel.instance_img_ref, - mock.sentinel.instance_img_ref) - - def test_recreate_image_meta_boot_from_volume(self): - mock_image_meta = mock.MagicMock() - mock_image_meta.obj_attr_is_set.return_value = False - mock_image_meta.obj_to_primitive.return_value = { - 'nova_object.data': {}} - - self._check_recreate_image_meta(mock_image_meta) - - def test_check_instance_shared_storage_local(self): - check_local = ( - self.driver._pathutils.check_instance_shared_storage_local) - - ret_val = self.driver.check_instance_shared_storage_local( - mock.sentinel.context, mock.sentinel.instance) - - self.assertEqual(check_local.return_value, ret_val) - check_local.assert_called_once_with(mock.sentinel.instance) - - def test_check_instance_shared_storage_remote(self): - check_remote = ( - self.driver._pathutils.check_instance_shared_storage_remote) - - ret_val = self.driver.check_instance_shared_storage_remote( - mock.sentinel.context, mock.sentinel.data) - - self.assertEqual(check_remote.return_value, ret_val) - check_remote.assert_called_once_with(mock.sentinel.data) - - def test_check_instance_shared_storage_cleanup(self): - check_cleanup = ( - self.driver._pathutils.check_instance_shared_storage_cleanup) - - ret_val = self.driver.check_instance_shared_storage_cleanup( - mock.sentinel.context, mock.sentinel.data) - - self.assertEqual(check_cleanup.return_value, ret_val) - check_cleanup.assert_called_once_with(mock.sentinel.data) - - @mock.patch.object(driver.HyperVDriver, '_get_allocation_ratios') - def test_update_provider_tree(self, mock_get_alloc_ratios): - mock_ptree = mock.Mock() - mock_inventory = mock_ptree.data.return_value.inventory - - self.driver.update_provider_tree( - mock_ptree, mock.sentinel.nodename, mock.sentinel.allocations) - - mock_get_alloc_ratios.assert_called_once_with(mock_inventory) - self.driver._hostops.update_provider_tree.assert_called_once_with( - mock_ptree, mock.sentinel.nodename, - mock_get_alloc_ratios.return_value, - mock.sentinel.allocations) diff --git a/compute_hyperv/tests/unit/test_eventhandler.py b/compute_hyperv/tests/unit/test_eventhandler.py deleted file mode 100644 index 82de4146..00000000 --- a/compute_hyperv/tests/unit/test_eventhandler.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -import ddt -from nova import utils -from os_win import constants - -from compute_hyperv.nova import eventhandler -from compute_hyperv.nova import vmops -from compute_hyperv.tests.unit import test_base - - -@ddt.ddt -class EventHandlerTestCase(test_base.HyperVBaseTestCase): - _FAKE_POLLING_INTERVAL = 3 - _FAKE_EVENT_CHECK_TIMEFRAME = 15 - - def setUp(self): - super(EventHandlerTestCase, self).setUp() - - self.flags( - power_state_check_timeframe=self._FAKE_EVENT_CHECK_TIMEFRAME, - group='hyperv') - self.flags( - power_state_event_polling_interval=self._FAKE_POLLING_INTERVAL, - group='hyperv') - - self._event_handler = eventhandler.InstanceEventHandler() - - @ddt.data(True, False) - @mock.patch.object(vmops.VMOps, 'get_instance_uuid') - @mock.patch.object(eventhandler.InstanceEventHandler, '_emit_event') - def test_handle_event(self, missing_uuid, mock_emit_event, mock_get_uuid): - mock_get_uuid.return_value = ( - mock.sentinel.instance_uuid if not missing_uuid else None) - self._event_handler._vmutils.get_vm_power_state.return_value = ( - mock.sentinel.power_state) - - self._event_handler._handle_event(mock.sentinel.instance_name, - mock.sentinel.power_state) - - if not missing_uuid: - mock_emit_event.assert_called_once_with( - mock.sentinel.instance_name, - mock.sentinel.instance_uuid, - mock.sentinel.power_state) - else: - self.assertFalse(mock_emit_event.called) - - @mock.patch.object(eventhandler.InstanceEventHandler, '_get_virt_event') - @mock.patch.object(utils, 'spawn_n', - lambda f, *args, **kwargs: f(*args, **kwargs)) - def test_emit_event(self, mock_get_event): - state = constants.HYPERV_VM_STATE_ENABLED - callbacks = [mock.Mock(), mock.Mock()] - - for cbk in callbacks: - self._event_handler.add_callback(cbk) - - self._event_handler._emit_event(mock.sentinel.instance_name, - mock.sentinel.instance_uuid, - state) - - for cbk in callbacks: - cbk.assert_called_once_with(mock_get_event.return_value) - - def test_get_virt_event(self): - instance_state = constants.HYPERV_VM_STATE_ENABLED - expected_transition = self._event_handler._TRANSITION_MAP[ - instance_state] - - virt_event = self._event_handler._get_virt_event( - mock.sentinel.instance_uuid, - mock.sentinel.instance_name, - instance_state) - - self.assertEqual(mock.sentinel.instance_name, virt_event.name) - self.assertEqual(mock.sentinel.instance_uuid, virt_event.uuid) - self.assertEqual(expected_transition, virt_event.transition) diff --git a/compute_hyperv/tests/unit/test_hostops.py b/compute_hyperv/tests/unit/test_hostops.py deleted file mode 100644 index 21430db3..00000000 --- a/compute_hyperv/tests/unit/test_hostops.py +++ /dev/null @@ -1,436 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -from unittest import mock - -from nova import context as nova_context -from nova import exception -from nova import objects -from nova.objects import fields as obj_fields -import os_resource_classes as orc -from os_win import constants as os_win_const -from oslo_serialization import jsonutils -from oslo_utils import units - -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import hostops -from compute_hyperv.tests.unit import test_base - -CONF = compute_hyperv.nova.conf.CONF - - -class HostOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V HostOps class.""" - - _autospec_classes = [ - hostops.pathutils.PathUtils, - hostops.vmops.VMOps, - hostops.api.API, - ] - - FAKE_ARCHITECTURE = 0 - FAKE_NAME = 'fake_name' - FAKE_MANUFACTURER = 'FAKE_MANUFACTURER' - FAKE_NUM_CPUS = 1 - FAKE_INSTANCE_DIR = "C:/fake/dir" - FAKE_LOCAL_IP = '10.11.12.13' - FAKE_TICK_COUNT = 1000000 - - def setUp(self): - super(HostOpsTestCase, self).setUp() - self._hostops = hostops.HostOps() - - def test_get_cpu_info(self): - mock_processors = mock.MagicMock() - info = {'Architecture': self.FAKE_ARCHITECTURE, - 'Name': self.FAKE_NAME, - 'Manufacturer': self.FAKE_MANUFACTURER, - 'NumberOfCores': self.FAKE_NUM_CPUS, - 'NumberOfLogicalProcessors': self.FAKE_NUM_CPUS} - - def getitem(key): - return info[key] - mock_processors.__getitem__.side_effect = getitem - self._hostops._hostutils.get_cpus_info.return_value = [mock_processors] - - response = self._hostops._get_cpu_info() - - self._hostops._hostutils.get_cpus_info.assert_called_once_with() - - expected = [mock.call(fkey) - for fkey in os_win_const.PROCESSOR_FEATURE.keys()] - self._hostops._hostutils.is_cpu_feature_present.has_calls(expected) - expected_response = self._get_mock_cpu_info() - self.assertEqual(expected_response, response) - - def _get_mock_cpu_info(self): - return {'vendor': self.FAKE_MANUFACTURER, - 'model': self.FAKE_NAME, - 'arch': constants.WMI_WIN32_PROCESSOR_ARCHITECTURE[ - self.FAKE_ARCHITECTURE], - 'features': list(os_win_const.PROCESSOR_FEATURE.values()), - 'topology': {'cores': self.FAKE_NUM_CPUS, - 'threads': self.FAKE_NUM_CPUS, - 'sockets': self.FAKE_NUM_CPUS}} - - def _get_mock_gpu_info(self): - return {'remotefx_total_video_ram': 4096, - 'remotefx_available_video_ram': 2048, - 'remotefx_gpu_info': mock.sentinel.FAKE_GPU_INFO} - - def test_get_memory_info(self): - self._hostops._hostutils.get_memory_info.return_value = (2 * units.Ki, - 1 * units.Ki) - response = self._hostops._get_memory_info() - self._hostops._hostutils.get_memory_info.assert_called_once_with() - self.assertEqual((2, 1, 1), response) - - def test_get_storage_info_gb(self): - self._hostops._pathutils.get_instances_dir.return_value = '' - self._hostops._diskutils.get_disk_capacity.return_value = ( - 2 * units.Gi, 1 * units.Gi) - - response = self._hostops._get_storage_info_gb() - self._hostops._pathutils.get_instances_dir.assert_called_once_with() - self._hostops._diskutils.get_disk_capacity.assert_called_once_with('') - self.assertEqual((2, 1, 1), response) - - def test_get_hypervisor_version(self): - self._hostops._hostutils.get_windows_version.return_value = '6.3.9600' - response_lower = self._hostops._get_hypervisor_version() - - self._hostops._hostutils.get_windows_version.return_value = '10.1.0' - response_higher = self._hostops._get_hypervisor_version() - - self.assertEqual(6003, response_lower) - self.assertEqual(10001, response_higher) - - def test_get_remotefx_gpu_info(self): - self.flags(enable_remotefx=True, group='hyperv') - fake_gpus = [{'total_video_ram': '2048', - 'available_video_ram': '1024'}, - {'total_video_ram': '1024', - 'available_video_ram': '1024'}] - self._hostops._hostutils.get_remotefx_gpu_info.return_value = fake_gpus - - ret_val = self._hostops._get_remotefx_gpu_info() - - self.assertEqual(3072, ret_val['total_video_ram']) - self.assertEqual(1024, ret_val['used_video_ram']) - - def test_get_remotefx_gpu_info_disabled(self): - self.flags(enable_remotefx=False, group='hyperv') - - ret_val = self._hostops._get_remotefx_gpu_info() - - self.assertEqual(0, ret_val['total_video_ram']) - self.assertEqual(0, ret_val['used_video_ram']) - self._hostops._hostutils.get_remotefx_gpu_info.assert_not_called() - - @mock.patch.object(hostops.objects, 'NUMACell') - @mock.patch.object(hostops.objects, 'NUMATopology') - def test_get_host_numa_topology(self, mock_NUMATopology, mock_NUMACell): - numa_node = {'id': mock.sentinel.id, 'memory': mock.sentinel.memory, - 'memory_usage': mock.sentinel.memory_usage, - 'cpuset': mock.sentinel.cpuset, - 'cpu_usage': mock.sentinel.cpu_usage} - self._hostops._hostutils.get_numa_nodes.return_value = [ - numa_node.copy()] - - result = self._hostops._get_host_numa_topology() - - self.assertEqual(mock_NUMATopology.return_value, result) - mock_NUMACell.assert_called_once_with( - pinned_cpus=set([]), mempages=[], siblings=[], **numa_node) - mock_NUMATopology.assert_called_once_with( - cells=[mock_NUMACell.return_value]) - - @mock.patch.object(hostops.HostOps, '_get_pci_passthrough_devices') - @mock.patch.object(hostops.HostOps, '_get_host_numa_topology') - @mock.patch.object(hostops.HostOps, '_get_remotefx_gpu_info') - @mock.patch.object(hostops.HostOps, '_get_cpu_info') - @mock.patch.object(hostops.HostOps, '_get_memory_info') - @mock.patch.object(hostops.HostOps, '_get_hypervisor_version') - @mock.patch.object(hostops.HostOps, '_get_storage_info_gb') - @mock.patch('platform.node') - def test_get_available_resource(self, mock_node, - mock_get_storage_info_gb, - mock_get_hypervisor_version, - mock_get_memory_info, mock_get_cpu_info, - mock_get_gpu_info, mock_get_numa_topology, - mock_get_pci_devices): - mock_get_storage_info_gb.return_value = (mock.sentinel.LOCAL_GB, - mock.sentinel.LOCAL_GB_FREE, - mock.sentinel.LOCAL_GB_USED) - mock_get_memory_info.return_value = (mock.sentinel.MEMORY_MB, - mock.sentinel.MEMORY_MB_FREE, - mock.sentinel.MEMORY_MB_USED) - mock_cpu_info = self._get_mock_cpu_info() - mock_get_cpu_info.return_value = mock_cpu_info - mock_get_hypervisor_version.return_value = mock.sentinel.VERSION - mock_get_numa_topology.return_value._to_json.return_value = ( - mock.sentinel.numa_topology_json) - mock_get_pci_devices.return_value = mock.sentinel.pcis - - mock_gpu_info = self._get_mock_gpu_info() - mock_get_gpu_info.return_value = mock_gpu_info - - response = self._hostops.get_available_resource() - - mock_get_memory_info.assert_called_once_with() - mock_get_cpu_info.assert_called_once_with() - mock_get_hypervisor_version.assert_called_once_with() - mock_get_pci_devices.assert_called_once_with() - expected = {'supported_instances': [("i686", "hyperv", "hvm"), - ("x86_64", "hyperv", "hvm")], - 'hypervisor_hostname': mock_node(), - 'cpu_info': jsonutils.dumps(mock_cpu_info), - 'hypervisor_version': mock.sentinel.VERSION, - 'memory_mb': mock.sentinel.MEMORY_MB, - 'memory_mb_used': mock.sentinel.MEMORY_MB_USED, - 'local_gb': mock.sentinel.LOCAL_GB, - 'local_gb_used': mock.sentinel.LOCAL_GB_USED, - 'disk_available_least': mock.sentinel.LOCAL_GB_FREE, - 'vcpus': self.FAKE_NUM_CPUS, - 'vcpus_used': 0, - 'hypervisor_type': 'hyperv', - 'numa_topology': mock.sentinel.numa_topology_json, - 'remotefx_available_video_ram': 2048, - 'remotefx_gpu_info': mock.sentinel.FAKE_GPU_INFO, - 'remotefx_total_video_ram': 4096, - 'pci_passthrough_devices': mock.sentinel.pcis, - } - self.assertEqual(expected, response) - - @mock.patch.object(hostops.jsonutils, 'dumps') - def test_get_pci_passthrough_devices(self, mock_jsonutils_dumps): - mock_pci_dev = {'vendor_id': 'fake_vendor_id', - 'product_id': 'fake_product_id', - 'dev_id': 'fake_dev_id', - 'address': 'fake_address'} - mock_get_pcis = self._hostops._hostutils.get_pci_passthrough_devices - mock_get_pcis.return_value = [mock_pci_dev] - - expected_label = 'label_%(vendor_id)s_%(product_id)s' % { - 'vendor_id': mock_pci_dev['vendor_id'], - 'product_id': mock_pci_dev['product_id']} - expected_pci_dev = mock_pci_dev.copy() - expected_pci_dev.update(dev_type=obj_fields.PciDeviceType.STANDARD, - label=expected_label, - numa_node=None) - - result = self._hostops._get_pci_passthrough_devices() - - self.assertEqual(mock_jsonutils_dumps.return_value, result) - mock_jsonutils_dumps.assert_called_once_with([expected_pci_dev]) - - def _test_host_power_action(self, action): - self._hostops._hostutils.host_power_action = mock.Mock() - - self._hostops.host_power_action(action) - self._hostops._hostutils.host_power_action.assert_called_with( - action) - - def test_host_power_action_shutdown(self): - self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN) - - def test_host_power_action_reboot(self): - self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT) - - def test_host_power_action_exception(self): - self.assertRaises(NotImplementedError, - self._hostops.host_power_action, - constants.HOST_POWER_ACTION_STARTUP) - - def test_get_host_ip_addr(self): - CONF.set_override('my_ip', None) - self._hostops._hostutils.get_local_ips.return_value = [ - self.FAKE_LOCAL_IP] - response = self._hostops.get_host_ip_addr() - self._hostops._hostutils.get_local_ips.assert_called_once_with() - self.assertEqual(self.FAKE_LOCAL_IP, response) - - @mock.patch('time.strftime') - def test_get_host_uptime(self, mock_time): - self._hostops._hostutils.get_host_tick_count64.return_value = ( - self.FAKE_TICK_COUNT) - - response = self._hostops.get_host_uptime() - tdelta = datetime.timedelta(milliseconds=int(self.FAKE_TICK_COUNT)) - expected = "%s up %s, 0 users, load average: 0, 0, 0" % ( - str(mock_time()), str(tdelta)) - - self.assertEqual(expected, response) - - @mock.patch.object(hostops.HostOps, '_wait_for_instance_pending_task') - @mock.patch.object(hostops.HostOps, '_set_service_state') - @mock.patch.object(hostops.HostOps, '_migrate_vm') - @mock.patch.object(nova_context, 'get_admin_context') - def _test_host_maintenance_mode(self, mock_get_admin_context, - mock_migrate_vm, - mock_set_service_state, - mock_wait_for_instance_pending_task, - vm_counter): - context = mock_get_admin_context.return_value - self._hostops._vmutils.list_instances.return_value = [ - mock.sentinel.VM_NAME] - self._hostops._vmops.list_instance_uuids.return_value = [ - mock.sentinel.UUID] * vm_counter - if vm_counter == 0: - result = self._hostops.host_maintenance_mode( - host=mock.sentinel.HOST, mode=True) - self.assertEqual('on_maintenance', result) - else: - self.assertRaises(exception.MigrationError, - self._hostops.host_maintenance_mode, - host=mock.sentinel.HOST, - mode=True) - - mock_set_service_state.assert_called_once_with( - host=mock.sentinel.HOST, binary='nova-compute', is_disabled=True) - - mock_migrate_vm.assert_called_with( - context, mock.sentinel.VM_NAME, mock.sentinel.HOST) - - @mock.patch.object(hostops.HostOps, '_set_service_state') - @mock.patch.object(nova_context, 'get_admin_context') - def test_host_maintenance_mode_disabled(self, mock_get_admin_context, - mock_set_service_state): - result = self._hostops.host_maintenance_mode( - host=mock.sentinel.HOST, mode=False) - mock_set_service_state.assert_called_once_with( - host=mock.sentinel.HOST, binary='nova-compute', is_disabled=False) - self.assertEqual('off_maintenance', result) - - def test_host_maintenance_mode_enabled(self): - self._test_host_maintenance_mode(vm_counter=0) - - def test_host_maintenance_mode_exception(self): - self._test_host_maintenance_mode(vm_counter=2) - - @mock.patch.object(hostops.HostOps, '_wait_for_instance_pending_task') - @mock.patch.object(objects.Instance, 'get_by_uuid') - def _test_migrate_vm(self, mock_get_by_uuid, - mock_wait_for_instance_pending_task, - instance_uuid=None, vm_state='active'): - self._hostops._vmutils.get_instance_uuid.return_value = instance_uuid - instance = mock_get_by_uuid.return_value - type(instance).vm_state = mock.PropertyMock( - side_effect=[vm_state]) - self._hostops._migrate_vm(ctxt=mock.sentinel.CONTEXT, - vm_name=mock.sentinel.VM_NAME, - host=mock.sentinel.HOST) - if not instance_uuid: - self.assertFalse(self._hostops._api.live_migrate.called) - return - if vm_state == 'active': - self._hostops._api.live_migrate.assert_called_once_with( - mock.sentinel.CONTEXT, instance, block_migration=False, - disk_over_commit=False, host_name=None) - else: - self._hostops._api.resize.assert_called_once_with( - mock.sentinel.CONTEXT, instance, flavor_id=None, - clean_shutdown=True) - mock_wait_for_instance_pending_task.assert_called_once_with( - mock.sentinel.CONTEXT, instance_uuid) - - def test_migrate_vm_not_found(self): - self._test_migrate_vm() - - def test_livemigrate_vm(self): - self._test_migrate_vm(instance_uuid=mock.sentinel.INSTANCE_UUID) - - def test_resize_vm(self): - self._test_migrate_vm(instance_uuid=mock.sentinel.INSTANCE_UUID, - vm_state='shutoff') - - def test_migrate_vm_exception(self): - self.assertRaises(exception.MigrationError, self._hostops._migrate_vm, - ctxt=mock.sentinel.CONTEXT, - vm_name=mock.sentinel.VM_NAME, - host=mock.sentinel.HOST) - - @mock.patch("time.sleep") - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_wait_for_instance_pending_task(self, mock_get_by_uuid, - mock_sleep): - instance = mock_get_by_uuid.return_value - type(instance).task_state = mock.PropertyMock( - side_effect=['migrating', 'migrating', None]) - - self._hostops._wait_for_instance_pending_task( - context=mock.sentinel.CONTEXT, vm_uuid=mock.sentinel.VM_UUID) - - instance.refresh.assert_called_once_with() - - @mock.patch("time.sleep") - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_wait_for_instance_pending_task_timeout(self, mock_get_by_uuid, - mock_sleep): - instance = mock_get_by_uuid.return_value - self.flags(evacuate_task_state_timeout=2, group='hyperv') - instance.task_state = 'migrating' - - self.assertRaises(exception.InternalError, - self._hostops._wait_for_instance_pending_task, - context=mock.sentinel.CONTEXT, - vm_uuid=mock.sentinel.VM_UUID) - - @mock.patch.object(hostops.HostOps, 'get_available_resource') - def test_update_provider_tree(self, mock_get_avail_res): - resources = mock.MagicMock() - allocation_ratios = mock.MagicMock() - provider_tree = mock.Mock() - - mock_get_avail_res.return_value = resources - - self.flags(reserved_host_disk_mb=1) - - exp_inventory = { - orc.VCPU: { - 'total': resources['vcpus'], - 'min_unit': 1, - 'max_unit': resources['vcpus'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.VCPU], - 'reserved': CONF.reserved_host_cpus, - }, - orc.MEMORY_MB: { - 'total': resources['memory_mb'], - 'min_unit': 1, - 'max_unit': resources['memory_mb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.MEMORY_MB], - 'reserved': CONF.reserved_host_memory_mb, - }, - orc.DISK_GB: { - 'total': resources['local_gb'], - 'min_unit': 1, - 'max_unit': resources['local_gb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.DISK_GB], - 'reserved': 1, - }, - } - - self._hostops.update_provider_tree( - provider_tree, mock.sentinel.node_name, allocation_ratios, - mock.sentinel.allocations) - - provider_tree.update_inventory.assert_called_once_with( - mock.sentinel.node_name, - exp_inventory) diff --git a/compute_hyperv/tests/unit/test_imagecache.py b/compute_hyperv/tests/unit/test_imagecache.py deleted file mode 100644 index da4d0547..00000000 --- a/compute_hyperv/tests/unit/test_imagecache.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -import ddt -import fixtures -from nova import exception -from nova import objects -from nova.tests.unit.objects import test_flavor -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import units - -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import imagecache -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - -CONF = compute_hyperv.nova.conf.CONF - - -@ddt.ddt -class ImageCacheTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V ImageCache class.""" - - _autospec_classes = [ - imagecache.pathutils.PathUtils, - ] - - FAKE_FORMAT = 'fake_format' - FAKE_IMAGE_REF = 'fake_image_ref' - FAKE_VHD_SIZE_GB = 1 - - def setUp(self): - super(ImageCacheTestCase, self).setUp() - - self.context = 'fake-context' - self.instance = fake_instance.fake_instance_obj( - self.context, - expected_attrs=['trusted_certs']) - - self.imagecache = imagecache.ImageCache() - self.tmpdir = self.useFixture(fixtures.TempDir()).path - - def _test_get_root_vhd_size_gb(self, old_flavor=True): - if old_flavor: - mock_flavor = objects.Flavor(**test_flavor.fake_flavor) - self.instance.old_flavor = mock_flavor - else: - self.instance.old_flavor = None - return self.imagecache._get_root_vhd_size_gb(self.instance) - - def test_get_root_vhd_size_gb_old_flavor(self): - ret_val = self._test_get_root_vhd_size_gb() - self.assertEqual(test_flavor.fake_flavor['root_gb'], ret_val) - - def test_get_root_vhd_size_gb(self): - ret_val = self._test_get_root_vhd_size_gb(old_flavor=False) - self.assertEqual(self.instance.flavor.root_gb, ret_val) - - @mock.patch.object(imagecache.ImageCache, '_get_root_vhd_size_gb') - def test_resize_and_cache_vhd_smaller(self, mock_get_vhd_size_gb): - self.imagecache._vhdutils.get_vhd_size.return_value = { - 'VirtualSize': (self.FAKE_VHD_SIZE_GB + 1) * units.Gi - } - mock_get_vhd_size_gb.return_value = self.FAKE_VHD_SIZE_GB - mock_internal_vhd_size = ( - self.imagecache._vhdutils.get_internal_vhd_size_by_file_size) - mock_internal_vhd_size.return_value = self.FAKE_VHD_SIZE_GB * units.Gi - - self.assertRaises(exception.FlavorDiskSmallerThanImage, - self.imagecache._resize_and_cache_vhd, - mock.sentinel.instance, - mock.sentinel.vhd_path) - - self.imagecache._vhdutils.get_vhd_size.assert_called_once_with( - mock.sentinel.vhd_path) - mock_get_vhd_size_gb.assert_called_once_with(mock.sentinel.instance) - mock_internal_vhd_size.assert_called_once_with( - mock.sentinel.vhd_path, self.FAKE_VHD_SIZE_GB * units.Gi) - - def _prepare_get_cached_image(self, path_exists=False, use_cow=False, - rescue_image_id=None, - image_format=constants.DISK_FORMAT_VHD): - self.instance.image_ref = self.FAKE_IMAGE_REF - self.instance.system_metadata = {'image_disk_format': image_format} - self.imagecache._pathutils.get_base_vhd_dir.return_value = ( - self.tmpdir) - self.imagecache._pathutils.exists.return_value = path_exists - self.imagecache._vhdutils.get_vhd_format.return_value = ( - constants.DISK_FORMAT_VHD) - - mock.patch.object(imagecache.images, 'fetch').start() - mock.patch.object(imagecache.images, 'get_info').start() - - self._mock_fetch = imagecache.images.fetch - self._mock_img_info = imagecache.images.get_info - self._mock_img_info.return_value = dict(disk_format=image_format) - - CONF.set_override('use_cow_images', use_cow) - - image_file_name = rescue_image_id or self.FAKE_IMAGE_REF - expected_path = os.path.join(self.tmpdir, - image_file_name) - expected_vhd_path = "%s.%s" % (expected_path, - constants.DISK_FORMAT_VHD.lower()) - return (expected_path, expected_vhd_path) - - @ddt.data({}, - {'exists': False, 'provide_img_type': False}) - @ddt.unpack - def test_cache_image(self, exists=True, provide_img_type=True): - (expected_path, - expected_image_path) = self._prepare_get_cached_image( - path_exists=exists) - img_type = constants.DISK_FORMAT_VHD if provide_img_type else None - - ret_path, fetched = self.imagecache.cache_image( - self.context, self.FAKE_IMAGE_REF, img_type) - - self.assertEqual(expected_image_path, ret_path) - self.assertEqual(not exists, fetched) - - if not provide_img_type: - self._mock_img_info.assert_called_once_with( - self.context, self.FAKE_IMAGE_REF) - - def test_get_cached_image_with_fetch(self): - (expected_path, - expected_image_path) = self._prepare_get_cached_image( - path_exists=False, - use_cow=False) - - result = self.imagecache.get_cached_image(self.context, self.instance) - self.assertEqual(expected_image_path, result) - - self._mock_fetch.assert_called_once_with( - self.context, self.FAKE_IMAGE_REF, - expected_path, - self.instance.trusted_certs) - self.imagecache._vhdutils.get_vhd_format.assert_called_once_with( - expected_path) - self.imagecache._pathutils.rename.assert_called_once_with( - expected_path, expected_image_path) - - def test_get_cached_image_with_fetch_exception(self): - (expected_path, - expected_image_path) = self._prepare_get_cached_image(False, False) - - # path doesn't exist until fetched. - self.imagecache._pathutils.exists.side_effect = [False, False, False, - True] - self._mock_fetch.side_effect = exception.InvalidImageRef( - image_href=self.FAKE_IMAGE_REF) - - self.assertRaises(exception.InvalidImageRef, - self.imagecache.get_cached_image, - self.context, self.instance) - - self.imagecache._pathutils.remove.assert_called_once_with( - expected_path) - - @mock.patch.object(imagecache.ImageCache, '_resize_and_cache_vhd') - @mock.patch.object(imagecache.ImageCache, '_update_image_timestamp') - def test_get_cached_image_use_cow(self, mock_update_img_timestamp, - mock_resize): - (expected_path, - expected_image_path) = self._prepare_get_cached_image(True, True) - - expected_resized_image_path = expected_image_path + 'x' - mock_resize.return_value = expected_resized_image_path - - result = self.imagecache.get_cached_image(self.context, self.instance) - self.assertEqual(expected_resized_image_path, result) - - mock_resize.assert_called_once_with(self.instance, expected_image_path) - mock_update_img_timestamp.assert_called_once_with( - self.instance.image_ref) - - def test_cache_rescue_image_bigger_than_flavor(self): - fake_rescue_image_id = 'fake_rescue_image_id' - - self.imagecache._vhdutils.get_vhd_info.return_value = { - 'VirtualSize': (self.instance.flavor.root_gb + 1) * units.Gi} - (expected_path, - expected_vhd_path) = self._prepare_get_cached_image( - rescue_image_id=fake_rescue_image_id) - - self.assertRaises(exception.ImageUnacceptable, - self.imagecache.get_cached_image, - self.context, self.instance, - fake_rescue_image_id) - - self._mock_fetch.assert_called_once_with( - self.context, fake_rescue_image_id, expected_path, - self.instance.trusted_certs) - self.imagecache._vhdutils.get_vhd_info.assert_called_once_with( - expected_vhd_path) - - @ddt.data(True, False) - def test_age_and_verify_cached_images(self, remove_unused_base_images): - self.flags(remove_unused_base_images=remove_unused_base_images, - group='image_cache') - - fake_images = [mock.sentinel.FAKE_IMG1, mock.sentinel.FAKE_IMG2] - fake_used_images = [mock.sentinel.FAKE_IMG1] - - self.imagecache.originals = fake_images - self.imagecache.used_images = fake_used_images - - self.imagecache._update_image_timestamp = mock.Mock() - self.imagecache._remove_if_old_image = mock.Mock() - - self.imagecache._age_and_verify_cached_images( - mock.sentinel.FAKE_CONTEXT, - mock.sentinel.all_instances, - self.tmpdir) - - self.imagecache._update_image_timestamp.assert_called_once_with( - mock.sentinel.FAKE_IMG1) - - if remove_unused_base_images: - self.imagecache._remove_if_old_image.assert_called_once_with( - mock.sentinel.FAKE_IMG2) - else: - self.imagecache._remove_if_old_image.assert_not_called() - - @mock.patch.object(imagecache.os, 'utime') - @mock.patch.object(imagecache.ImageCache, '_get_image_backing_files') - def test_update_image_timestamp(self, mock_get_backing_files, mock_utime): - mock_get_backing_files.return_value = [mock.sentinel.backing_file, - mock.sentinel.resized_file] - - self.imagecache._update_image_timestamp(mock.sentinel.image) - - mock_get_backing_files.assert_called_once_with(mock.sentinel.image) - mock_utime.assert_has_calls([ - mock.call(mock.sentinel.backing_file, None), - mock.call(mock.sentinel.resized_file, None)]) - - def test_get_image_backing_files(self): - image = 'fake-img' - self.imagecache.unexplained_images = ['%s_42' % image, - 'unexplained-img'] - self.imagecache._pathutils.get_image_path.side_effect = [ - mock.sentinel.base_file, mock.sentinel.resized_file] - - backing_files = self.imagecache._get_image_backing_files(image) - - self.assertEqual([mock.sentinel.base_file, mock.sentinel.resized_file], - backing_files) - self.imagecache._pathutils.get_image_path.assert_has_calls( - [mock.call(image), mock.call('%s_42' % image)]) - - @mock.patch.object(imagecache.ImageCache, '_get_image_backing_files') - def test_remove_if_old_image(self, mock_get_backing_files): - mock_get_backing_files.return_value = [mock.sentinel.backing_file, - mock.sentinel.resized_file] - self.imagecache._pathutils.get_age_of_file.return_value = 3600 - - self.imagecache._remove_if_old_image(mock.sentinel.image) - - calls = [mock.call(mock.sentinel.backing_file), - mock.call(mock.sentinel.resized_file)] - self.imagecache._pathutils.get_age_of_file.assert_has_calls(calls) - mock_get_backing_files.assert_called_once_with(mock.sentinel.image) - - def test_remove_old_image(self): - fake_img_path = os.path.join(self.tmpdir, - self.FAKE_IMAGE_REF) - self.imagecache._remove_old_image(fake_img_path) - self.imagecache._pathutils.remove.assert_called_once_with( - fake_img_path) - - @mock.patch.object(imagecache.ImageCache, '_age_and_verify_cached_images') - @mock.patch.object(imagecache.ImageCache, '_list_base_images') - @mock.patch.object(imagecache.ImageCache, '_list_running_instances') - def test_update(self, mock_list_instances, mock_list_images, - mock_age_cached_images): - base_vhd_dir = self.imagecache._pathutils.get_base_vhd_dir.return_value - mock_list_instances.return_value = { - 'used_images': {mock.sentinel.image: mock.sentinel.instances}} - mock_list_images.return_value = { - 'originals': [mock.sentinel.original_image], - 'unexplained_images': [mock.sentinel.unexplained_image]} - - self.imagecache.update(mock.sentinel.context, - mock.sentinel.all_instances) - - self.assertEqual([mock.sentinel.image], - list(self.imagecache.used_images)) - self.assertEqual([mock.sentinel.original_image], - self.imagecache.originals) - self.assertEqual([mock.sentinel.unexplained_image], - self.imagecache.unexplained_images) - mock_list_instances.assert_called_once_with( - mock.sentinel.context, mock.sentinel.all_instances) - mock_list_images.assert_called_once_with(base_vhd_dir) - mock_age_cached_images.assert_called_once_with( - mock.sentinel.context, mock.sentinel.all_instances, base_vhd_dir) - - @mock.patch.object(imagecache.os, 'listdir') - def test_list_base_images(self, mock_listdir): - original_image = uuids.fake - unexplained_image = 'just-an-image' - ignored_file = 'foo.bar' - mock_listdir.return_value = ['%s.VHD' % original_image, - '%s.vhdx' % unexplained_image, - ignored_file] - - images = self.imagecache._list_base_images(mock.sentinel.base_dir) - - self.assertEqual([original_image], images['originals']) - self.assertEqual([unexplained_image], images['unexplained_images']) - mock_listdir.assert_called_once_with(mock.sentinel.base_dir) diff --git a/compute_hyperv/tests/unit/test_livemigrationops.py b/compute_hyperv/tests/unit/test_livemigrationops.py deleted file mode 100644 index 23646495..00000000 --- a/compute_hyperv/tests/unit/test_livemigrationops.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -import ddt -from nova import exception -from nova.objects import migrate_data as migrate_data_obj -from os_win import exceptions as os_win_exc - -import compute_hyperv.nova.conf -from compute_hyperv.nova import livemigrationops -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - -CONF = compute_hyperv.nova.conf.CONF - - -@ddt.ddt -class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V LiveMigrationOps class.""" - - _autospec_classes = [ - livemigrationops.pathutils.PathUtils, - livemigrationops.vmops.VMOps, - livemigrationops.volumeops.VolumeOps, - livemigrationops.serialconsoleops.SerialConsoleOps, - livemigrationops.imagecache.ImageCache, - livemigrationops.block_device_manager.BlockDeviceInfoManager, - ] - - def setUp(self): - super(LiveMigrationOpsTestCase, self).setUp() - self.context = 'fake_context' - self._livemigrops = livemigrationops.LiveMigrationOps() - self._pathutils = self._livemigrops._pathutils - self._vmops = self._livemigrops._vmops - - def _test_live_migration(self, side_effect=None, - shared_storage=False, - migrate_data_received=True, - migrate_data_version='1.1'): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_post = mock.MagicMock() - mock_recover = mock.MagicMock() - - mock_copy_dvd_disks = self._livemigrops._vmops.copy_vm_dvd_disks - mock_stop_console_handler = ( - self._livemigrops._serial_console_ops.stop_console_handler) - mock_copy_logs = self._livemigrops._pathutils.copy_vm_console_logs - fake_dest = mock.sentinel.DESTINATION - mock_check_shared_inst_dir = ( - self._pathutils.check_remote_instances_dir_shared) - mock_check_shared_inst_dir.return_value = shared_storage - self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [ - side_effect] - - if migrate_data_received: - migrate_data = migrate_data_obj.HyperVLiveMigrateData() - if migrate_data_version != '1.0': - migrate_data.is_shared_instance_path = shared_storage - else: - migrate_data = None - - self._livemigrops.live_migration(context=self.context, - instance_ref=mock_instance, - dest=fake_dest, - post_method=mock_post, - recover_method=mock_recover, - block_migration=( - mock.sentinel.block_migr), - migrate_data=migrate_data) - - if side_effect is os_win_exc.HyperVException: - mock_recover.assert_called_once_with(self.context, mock_instance, - fake_dest, - migrate_data) - mock_post.assert_not_called() - else: - post_call_args = mock_post.call_args_list - self.assertEqual(1, len(post_call_args)) - - post_call_args_list = post_call_args[0][0] - self.assertEqual((self.context, mock_instance, - fake_dest, mock.sentinel.block_migr), - post_call_args_list[:-1]) - # The last argument, the migrate_data object, should be created - # by the callee if not received. - migrate_data_arg = post_call_args_list[-1] - self.assertIsInstance( - migrate_data_arg, - migrate_data_obj.HyperVLiveMigrateData) - self.assertEqual(shared_storage, - migrate_data_arg.is_shared_instance_path) - - if not migrate_data_received or migrate_data_version == '1.0': - mock_check_shared_inst_dir.assert_called_once_with(fake_dest) - else: - self.assertFalse(mock_check_shared_inst_dir.called) - - mock_stop_console_handler.assert_called_once_with(mock_instance.name) - - if not shared_storage: - mock_copy_logs.assert_called_once_with(mock_instance.name, - fake_dest) - mock_copy_dvd_disks.assert_called_once_with(mock_instance.name, - fake_dest) - else: - self.assertFalse(mock_copy_logs.called) - self.assertFalse(mock_copy_dvd_disks.called) - - mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm - mock_live_migr.assert_called_once_with( - mock_instance.name, - fake_dest, - migrate_disks=not shared_storage) - - def test_live_migration(self): - self._test_live_migration(migrate_data_received=False) - - def test_live_migration_old_migrate_data_version(self): - self._test_live_migration(migrate_data_version='1.0') - - def test_live_migration_exception(self): - self._test_live_migration(side_effect=os_win_exc.HyperVException) - - def test_live_migration_shared_storage(self): - self._test_live_migration(shared_storage=True) - - def _test_pre_live_migration(self, phys_disks_attached=True): - mock_get_disk_path_mapping = ( - self._livemigrops._volumeops.get_disk_path_mapping) - mock_get_cached_image = self._livemigrops._imagecache.get_cached_image - - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.image_ref = "fake_image_ref" - mock_get_disk_path_mapping.return_value = ( - mock.sentinel.disk_path_mapping if phys_disks_attached - else None) - bdman = self._livemigrops._block_dev_man - mock_is_boot_from_vol = bdman.is_boot_from_volume - mock_is_boot_from_vol.return_value = None - CONF.set_override('use_cow_images', True) - self._livemigrops.pre_live_migration( - self.context, mock_instance, - block_device_info=mock.sentinel.BLOCK_INFO, - network_info=mock.sentinel.NET_INFO) - - check_config = ( - self._livemigrops._livemigrutils.check_live_migration_config) - check_config.assert_called_once_with() - mock_is_boot_from_vol.assert_called_once_with( - mock.sentinel.BLOCK_INFO) - mock_get_cached_image.assert_called_once_with(self.context, - mock_instance) - self._livemigrops._volumeops.connect_volumes.assert_called_once_with( - mock.sentinel.BLOCK_INFO) - mock_get_disk_path_mapping.assert_called_once_with( - mock.sentinel.BLOCK_INFO, block_dev_only=True) - if phys_disks_attached: - livemigrutils = self._livemigrops._livemigrutils - livemigrutils.create_planned_vm.assert_called_once_with( - mock_instance.name, - mock_instance.host, - mock.sentinel.disk_path_mapping) - - def test_pre_live_migration(self): - self._test_pre_live_migration() - - def test_pre_live_migration_invalid_disk_mapping(self): - self._test_pre_live_migration(phys_disks_attached=False) - - def _test_post_live_migration(self, shared_storage=False): - migrate_data = migrate_data_obj.HyperVLiveMigrateData( - is_shared_instance_path=shared_storage) - - self._livemigrops.post_live_migration( - self.context, mock.sentinel.instance, - mock.sentinel.block_device_info, - migrate_data) - mock_disconnect_volumes = ( - self._livemigrops._volumeops.disconnect_volumes) - mock_disconnect_volumes.assert_called_once_with( - mock.sentinel.block_device_info) - mock_get_inst_dir = self._pathutils.get_instance_dir - - if not shared_storage: - mock_get_inst_dir.assert_called_once_with( - mock.sentinel.instance.name, - create_dir=False, remove_dir=True) - else: - self.assertFalse(mock_get_inst_dir.called) - - def test_post_block_migration(self): - self._test_post_live_migration() - - def test_post_live_migration_shared_storage(self): - self._test_post_live_migration(shared_storage=True) - - @mock.patch.object(migrate_data_obj, 'HyperVLiveMigrateData') - def test_check_can_live_migrate_destination(self, mock_migr_data_cls): - mock_instance = fake_instance.fake_instance_obj(self.context) - migr_data = self._livemigrops.check_can_live_migrate_destination( - mock.sentinel.context, mock_instance, mock.sentinel.src_comp_info, - mock.sentinel.dest_comp_info) - - mock_check_shared_inst_dir = ( - self._pathutils.check_remote_instances_dir_shared) - mock_check_shared_inst_dir.assert_called_once_with(mock_instance.host) - - self.assertEqual(mock_migr_data_cls.return_value, migr_data) - self.assertEqual(mock_check_shared_inst_dir.return_value, - migr_data.is_shared_instance_path) - - def test_check_can_live_migrate_destination_exception(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_check_shared_inst_dir = ( - self._pathutils.check_remote_instances_dir_shared) - mock_check_shared_inst_dir.side_effect = OSError - - self.assertRaises( - exception.MigrationPreCheckError, - self._livemigrops.check_can_live_migrate_destination, - mock.sentinel.context, mock_instance, mock.sentinel.src_comp_info, - mock.sentinel.dest_comp_info) - - def test_post_live_migration_at_destination(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._livemigrops.post_live_migration_at_destination( - self.context, mock_instance, - network_info=mock.sentinel.NET_INFO, - block_migration=mock.sentinel.BLOCK_INFO) - self._livemigrops._vmops.plug_vifs.assert_called_once_with( - mock_instance, mock.sentinel.NET_INFO) - self._vmops.configure_instance_metrics.assert_called_once_with( - mock_instance.name) diff --git a/compute_hyperv/tests/unit/test_migrationops.py b/compute_hyperv/tests/unit/test_migrationops.py deleted file mode 100644 index d8d6e6fb..00000000 --- a/compute_hyperv/tests/unit/test_migrationops.py +++ /dev/null @@ -1,647 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -import ddt -from nova import block_device -from nova import exception -from nova.virt import driver -from os_win import exceptions as os_win_exc -from oslo_utils import units - -from compute_hyperv.nova import constants -from compute_hyperv.nova import migrationops -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - - -@ddt.ddt -class MigrationOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V MigrationOps class.""" - - _autospec_classes = [ - migrationops.pathutils.PathUtils, - migrationops.volumeops.VolumeOps, - migrationops.vmops.VMOps, - migrationops.imagecache.ImageCache, - migrationops.block_device_manager.BlockDeviceInfoManager, - ] - - _FAKE_DISK = 'fake_disk' - _FAKE_TIMEOUT = 10 - _FAKE_RETRY_INTERVAL = 5 - - def setUp(self): - super(MigrationOpsTestCase, self).setUp() - self.context = 'fake-context' - - self._migrationops = migrationops.MigrationOps() - self._vmops = self._migrationops._vmops - self._vmutils = self._migrationops._vmutils - self._pathutils = self._migrationops._pathutils - self._vhdutils = self._migrationops._vhdutils - self._volumeops = self._migrationops._volumeops - self._imagecache = self._migrationops._imagecache - self._block_dev_man = self._migrationops._block_dev_man - - def test_move_vm_files(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - - vm_files_path = self._migrationops._move_vm_files(mock_instance) - - mock_get_inst_dir = self._migrationops._pathutils.get_instance_dir - mock_get_inst_dir.assert_called_once_with(mock_instance.name) - mock_get_revert_dir = ( - self._migrationops._pathutils.get_instance_migr_revert_dir) - mock_get_revert_dir.assert_called_once_with( - mock_get_inst_dir.return_value, remove_dir=True, create_dir=True) - mock_get_export_dir = self._migrationops._pathutils.get_export_dir - mock_get_export_dir.assert_called_once_with( - instance_dir=mock_get_revert_dir.return_value, create_dir=True) - - mock_move = self._migrationops._pathutils.move_folder_files - mock_move.assert_called_once_with(mock_get_inst_dir.return_value, - mock_get_revert_dir.return_value) - copy_config_files = self._migrationops._pathutils.copy_vm_config_files - copy_config_files.assert_called_once_with( - mock_instance.name, mock_get_export_dir.return_value) - self.assertEqual(mock_get_revert_dir.return_value, vm_files_path) - - @ddt.data({}, - {'ephemerals_size': 2}, - {'ephemerals_size': 3, 'flavor_eph_size': 0}, - {'ephemerals_size': 3, 'expect_invalid_flavor': True}, - {'current_root_gb': 3, 'expect_invalid_flavor': True}, - {'current_root_gb': 3, 'boot_from_vol': True}) - @ddt.unpack - @mock.patch.object(driver, 'block_device_info_get_ephemerals') - @mock.patch.object(block_device, 'get_bdm_ephemeral_disk_size') - def test_check_target_flavor(self, mock_get_eph_size, mock_get_eph, - ephemerals_size=0, - flavor_eph_size=2, - flavor_root_gb=2, - current_root_gb=1, - boot_from_vol=False, - expect_invalid_flavor=False): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.root_gb = current_root_gb - mock_flavor = mock.MagicMock(root_gb=flavor_root_gb, - ephemeral_gb=flavor_eph_size) - - mock_get_eph_size.return_value = ephemerals_size - self._block_dev_man.is_boot_from_volume.return_value = boot_from_vol - - if expect_invalid_flavor: - self.assertRaises(exception.InstanceFaultRollback, - self._migrationops._check_target_flavor, - mock_instance, mock_flavor, - mock.sentinel.block_device_info) - else: - self._migrationops._check_target_flavor( - mock_instance, mock_flavor, mock.sentinel.block_device_info) - - mock_get_eph.assert_called_once_with(mock.sentinel.block_device_info) - mock_get_eph_size.assert_called_once_with(mock_get_eph.return_value) - self._block_dev_man.is_boot_from_volume.assert_called_once_with( - mock.sentinel.block_device_info) - - def test_check_and_attach_config_drive(self): - mock_instance = fake_instance.fake_instance_obj( - self.context, expected_attrs=['system_metadata']) - mock_instance.config_drive = 'True' - - self._migrationops._check_and_attach_config_drive( - mock_instance, mock.sentinel.vm_gen) - - self._migrationops._vmops.attach_config_drive.assert_called_once_with( - mock_instance, - self._migrationops._pathutils.lookup_configdrive_path.return_value, - mock.sentinel.vm_gen) - - def test_check_and_attach_config_drive_unknown_path(self): - instance = fake_instance.fake_instance_obj( - self.context, expected_attrs=['system_metadata']) - instance.config_drive = 'True' - self._migrationops._pathutils.lookup_configdrive_path.return_value = ( - None) - self.assertRaises(exception.ConfigDriveNotFound, - self._migrationops._check_and_attach_config_drive, - instance, - mock.sentinel.FAKE_VM_GEN) - - @mock.patch.object(migrationops.MigrationOps, '_move_vm_files') - @mock.patch.object(migrationops.MigrationOps, '_check_target_flavor') - def test_migrate_disk_and_power_off(self, mock_check_flavor, - mock_move_vm_files): - instance = mock.MagicMock() - instance.system_metadata = {} - flavor = mock.MagicMock() - network_info = mock.MagicMock() - - disk_info = self._migrationops.migrate_disk_and_power_off( - self.context, instance, mock.sentinel.FAKE_DEST, flavor, - network_info, mock.sentinel.bdi, - self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL) - - self.assertEqual(mock_move_vm_files.return_value, disk_info) - mock_check_flavor.assert_called_once_with( - instance, flavor, mock.sentinel.bdi) - self._migrationops._vmops.power_off.assert_called_once_with( - instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL) - mock_move_vm_files.assert_called_once_with(instance) - self.assertEqual(mock_move_vm_files.return_value, - instance.system_metadata['backup_location']) - instance.save.assert_called_once_with() - self._migrationops._vmops.destroy.assert_called_once_with( - instance, network_info, mock.sentinel.bdi, destroy_disks=True, - cleanup_migration_files=False) - - def test_confirm_migration(self): - mock_instance = fake_instance.fake_instance_obj( - self.context, expected_attrs=['system_metadata']) - fake_path_revert = 'fake_path_revert' - mock_instance.system_metadata['backup_location'] = fake_path_revert - - self._migrationops.confirm_migration( - context=self.context, - migration=mock.sentinel.migration, instance=mock_instance, - network_info=mock.sentinel.network_info) - - get_export_dir = self._migrationops._pathutils.get_export_dir - get_export_dir.assert_called_once_with(instance_dir=fake_path_revert) - self._migrationops._pathutils.check_dir.assert_has_calls([ - mock.call(get_export_dir.return_value, remove_dir=True), - mock.call(fake_path_revert, remove_dir=True)]) - - def test_revert_migration_files(self): - mock_instance = fake_instance.fake_instance_obj( - self.context, expected_attrs=['system_metadata']) - fake_path_revert = 'fake_path_revert' - mock_instance.system_metadata['backup_location'] = fake_path_revert - - instance_path = self._migrationops._revert_migration_files( - mock_instance) - - expected_instance_path = fake_path_revert.rstrip('_revert') - self.assertEqual(expected_instance_path, instance_path) - self._migrationops._pathutils.rename.assert_called_once_with( - fake_path_revert, expected_instance_path) - - @mock.patch.object(migrationops.MigrationOps, '_import_and_setup_vm') - @mock.patch.object(migrationops.MigrationOps, '_revert_migration_files') - def test_finish_revert_migration(self, mock_revert_migration_files, - mock_import_and_setup_vm): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._migrationops.finish_revert_migration( - context=self.context, instance=mock_instance, - network_info=mock.sentinel.network_info, - block_device_info=mock.sentinel.block_device_info, - power_on=True) - - mock_revert_migration_files.assert_called_once_with( - mock_instance) - image_meta = self._imagecache.get_image_details.return_value - mock_import_and_setup_vm.assert_called_once_with( - self.context, mock_instance, - mock_revert_migration_files.return_value, - image_meta, mock.sentinel.block_device_info) - self._migrationops._vmops.power_on.assert_called_once_with( - mock_instance, network_info=mock.sentinel.network_info) - - def test_merge_base_vhd(self): - fake_diff_vhd_path = 'fake/diff/path' - fake_base_vhd_path = 'fake/base/path' - base_vhd_copy_path = os.path.join( - os.path.dirname(fake_diff_vhd_path), - os.path.basename(fake_base_vhd_path)) - - self._migrationops._merge_base_vhd(diff_vhd_path=fake_diff_vhd_path, - base_vhd_path=fake_base_vhd_path) - - self._migrationops._pathutils.copyfile.assert_called_once_with( - fake_base_vhd_path, base_vhd_copy_path) - recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd - recon_parent_vhd.assert_called_once_with(fake_diff_vhd_path, - base_vhd_copy_path) - self._migrationops._vhdutils.merge_vhd.assert_called_once_with( - fake_diff_vhd_path) - self._migrationops._pathutils.rename.assert_called_once_with( - base_vhd_copy_path, fake_diff_vhd_path) - - def test_merge_base_vhd_exception(self): - fake_diff_vhd_path = 'fake/diff/path' - fake_base_vhd_path = 'fake/base/path' - base_vhd_copy_path = os.path.join( - os.path.dirname(fake_diff_vhd_path), - os.path.basename(fake_base_vhd_path)) - - self._migrationops._vhdutils.reconnect_parent_vhd.side_effect = ( - os_win_exc.HyperVException) - self._migrationops._pathutils.exists.return_value = True - - self.assertRaises(os_win_exc.HyperVException, - self._migrationops._merge_base_vhd, - fake_diff_vhd_path, fake_base_vhd_path) - self._migrationops._pathutils.exists.assert_called_once_with( - base_vhd_copy_path) - self._migrationops._pathutils.remove.assert_called_once_with( - base_vhd_copy_path) - - @mock.patch.object(migrationops.MigrationOps, '_resize_vhd') - def test_check_resize_vhd(self, mock_resize_vhd): - self._migrationops._check_resize_vhd( - vhd_path=mock.sentinel.vhd_path, vhd_info={'VirtualSize': 1}, - new_size=2) - mock_resize_vhd.assert_called_once_with(mock.sentinel.vhd_path, 2) - - def test_check_resize_vhd_exception(self): - self.assertRaises(exception.CannotResizeDisk, - self._migrationops._check_resize_vhd, - mock.sentinel.vhd_path, - {'VirtualSize': 1}, 0) - - @mock.patch.object(migrationops.MigrationOps, '_merge_base_vhd') - def test_resize_vhd(self, mock_merge_base_vhd): - fake_vhd_path = 'fake/path.vhd' - new_vhd_size = 2 - self._migrationops._resize_vhd(vhd_path=fake_vhd_path, - new_size=new_vhd_size) - - get_vhd_parent_path = self._migrationops._vhdutils.get_vhd_parent_path - get_vhd_parent_path.assert_called_once_with(fake_vhd_path) - mock_merge_base_vhd.assert_called_once_with( - fake_vhd_path, - self._migrationops._vhdutils.get_vhd_parent_path.return_value) - self._migrationops._vhdutils.resize_vhd.assert_called_once_with( - fake_vhd_path, new_vhd_size) - - def test_check_base_disk(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_src_vhd_path = 'fake/src/path' - fake_base_vhd = 'fake/vhd' - get_cached_image = self._migrationops._imagecache.get_cached_image - get_cached_image.return_value = fake_base_vhd - - self._migrationops._check_base_disk( - context=self.context, instance=mock_instance, - diff_vhd_path=mock.sentinel.diff_vhd_path, - src_base_disk_path=fake_src_vhd_path) - - get_cached_image.assert_called_once_with(self.context, mock_instance) - recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd - recon_parent_vhd.assert_called_once_with( - mock.sentinel.diff_vhd_path, fake_base_vhd) - - @ddt.data((False, '\\\\fake-srv\\C$\\inst_dir_0000000e_revert', True), - (False, '\\\\fake-srv\\share_path\\inst_dir_0000000e_revert'), - (True, 'C:\\fake_inst_dir_0000000e_revert')) - @ddt.unpack - def test_migrate_disks_from_source(self, move_disks_on_migration, - source_inst_dir, is_remote_path=False): - self.flags(move_disks_on_cold_migration=move_disks_on_migration, - group='hyperv') - mock_migration = mock.MagicMock() - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_get_remote_path = self._migrationops._pathutils.get_remote_path - mock_get_remote_path.return_value = source_inst_dir - - mock_get_export_dir = self._migrationops._pathutils.get_export_dir - mock_get_export_dir.side_effect = [mock.sentinel.source_export_dir, - mock.sentinel.dest_export_dir] - - instance_dir = self._migrationops._migrate_disks_from_source( - mock_migration, mock_instance, mock.sentinel.source_dir) - - mock_get_remote_path.assert_called_once_with( - mock_migration.source_compute, mock.sentinel.source_dir) - - if move_disks_on_migration or is_remote_path: - mock_get_inst_dir = self._migrationops._pathutils.get_instance_dir - mock_get_inst_dir.assert_called_once_with( - mock_instance.name, create_dir=True, remove_dir=True) - expected_inst_dir = mock_get_inst_dir.return_value - else: - expected_inst_dir = source_inst_dir[0: - len('_revert')] - self._migrationops._pathutils.check_dir.assert_called_once_with( - expected_inst_dir, create_dir=True) - - mock_get_export_dir.assert_has_calls([ - mock.call(instance_dir=mock_get_remote_path.return_value), - mock.call(instance_dir=expected_inst_dir)]) - - mock_copy = self._migrationops._pathutils.copy_folder_files - mock_copy.assert_called_once_with(mock_get_remote_path.return_value, - expected_inst_dir) - self._migrationops._pathutils.copy_dir.assert_called_once_with( - mock.sentinel.source_export_dir, mock.sentinel.dest_export_dir) - self.assertEqual(expected_inst_dir, instance_dir) - - @mock.patch.object(migrationops.MigrationOps, '_import_and_setup_vm') - @mock.patch.object(migrationops.MigrationOps, '_migrate_disks_from_source') - def test_finish_migration(self, mock_migrate_disks_from_source, - mock_import_and_setup_vm): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_migration = mock.MagicMock() - - self._migrationops.finish_migration( - context=self.context, migration=mock_migration, - instance=mock_instance, disk_info=mock.sentinel.disk_info, - network_info=mock.sentinel.network_info, - image_meta=mock.sentinel.image_meta, resize_instance=False, - block_device_info=mock.sentinel.block_device_info) - - mock_migrate_disks_from_source.assert_called_once_with( - mock_migration, mock_instance, mock.sentinel.disk_info) - mock_import_and_setup_vm.assert_called_once_with( - self.context, mock_instance, - mock_migrate_disks_from_source.return_value, - mock.sentinel.image_meta, mock.sentinel.block_device_info, True) - self._vmops.power_on.assert_called_once_with( - mock_instance, network_info=mock.sentinel.network_info) - - @mock.patch.object(migrationops.MigrationOps, '_check_ephemeral_disks') - @mock.patch.object(migrationops.MigrationOps, '_check_and_update_disks') - @mock.patch.object(migrationops.MigrationOps, '_update_disk_image_paths') - @mock.patch.object(migrationops.MigrationOps, '_import_vm') - def test_import_and_setup_vm(self, mock_import_vm, - mock_update_disk_image_paths, - mock_check_and_update_disks, - mock_check_eph_disks): - block_device_info = {'ephemerals': mock.sentinel.ephemerals} - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._migrationops._import_and_setup_vm( - self.context, mock_instance, mock.sentinel.instance_dir, - mock.sentinel.image_meta, block_device_info, - resize_instance=mock.sentinel.resize_instance) - - get_image_vm_gen = self._vmops.get_image_vm_generation - get_image_vm_gen.assert_called_once_with(mock_instance.uuid, - mock.sentinel.image_meta) - mock_import_vm.assert_called_once_with(mock.sentinel.instance_dir) - self._migrationops._vmops.update_vm_resources.assert_called_once_with( - mock_instance, get_image_vm_gen.return_value, - mock.sentinel.image_meta, mock.sentinel.instance_dir, - mock.sentinel.resize_instance) - self._migrationops._volumeops.connect_volumes.assert_called_once_with( - block_device_info) - mock_update_disk_image_paths.assert_called_once_with( - mock_instance, mock.sentinel.instance_dir) - mock_check_and_update_disks.assert_called_once_with( - self.context, mock_instance, get_image_vm_gen.return_value, - mock.sentinel.image_meta, block_device_info, - resize_instance=mock.sentinel.resize_instance) - self._volumeops.fix_instance_volume_disk_paths.assert_called_once_with( - mock_instance.name, block_device_info) - self._migrationops._migrationutils.realize_vm.assert_called_once_with( - mock_instance.name) - mock_check_eph_disks.assert_called_once_with( - mock_instance, mock.sentinel.ephemerals, - mock.sentinel.resize_instance) - self._migrationops._vmops.configure_remotefx.assert_called_once_with( - mock_instance, get_image_vm_gen.return_value, - mock.sentinel.resize_instance) - self._vmops.configure_instance_metrics.assert_called_once_with( - mock_instance.name) - - def test_import_vm(self): - self._migrationops._import_vm(mock.sentinel.instance_dir) - - self._pathutils.get_instance_snapshot_dir.assert_called_once_with( - instance_dir=mock.sentinel.instance_dir) - self._pathutils.get_vm_config_file.assert_called_once_with( - self._migrationops._pathutils.get_export_dir.return_value) - mock_import_vm_definition = ( - self._migrationops._migrationutils.import_vm_definition) - mock_import_vm_definition.assert_called_once_with( - self._pathutils.get_vm_config_file.return_value, - self._pathutils.get_instance_snapshot_dir.return_value) - self._migrationops._pathutils.get_export_dir.assert_has_calls([ - mock.call(instance_dir=mock.sentinel.instance_dir), - mock.call(instance_dir=mock.sentinel.instance_dir, - remove_dir=True)]) - - @mock.patch('os.path.exists') - def test_update_disk_image_paths(self, mock_exists): - mock_instance = fake_instance.fake_instance_obj(self.context) - inst_dir = "instances" - expected_inst_dir = "expected_instances" - config_drive_iso = os.path.join(inst_dir, 'configdrive.iso') - expected_config_drive_iso = os.path.join(expected_inst_dir, - 'configdrive.iso') - ephemeral_disk = os.path.join(inst_dir, 'eph1.vhdx') - expected_ephemeral_disk = os.path.join(expected_inst_dir, 'eph1.vhdx') - other_disk = '//some/path/to/vol-UUID.vhdx' - disk_files = [config_drive_iso, ephemeral_disk, other_disk] - - self._vmutils.get_vm_storage_paths.return_value = ( - disk_files, mock.sentinel.volume_drives) - mock_exists.return_value = True - - self._migrationops._update_disk_image_paths(mock_instance, - expected_inst_dir) - - self._vmutils.get_vm_storage_paths.assert_called_once_with( - mock_instance.name) - expected_calls = [ - mock.call(config_drive_iso, expected_config_drive_iso, - is_physical=False), - mock.call(ephemeral_disk, expected_ephemeral_disk, - is_physical=False)] - self._vmutils.update_vm_disk_path.assert_has_calls(expected_calls) - - @mock.patch('os.path.exists') - def test_update_disk_image_paths_exception(self, mock_exists): - mock_instance = fake_instance.fake_instance_obj(self.context) - inst_dir = "instances" - disk_files = [os.path.join(inst_dir, "root.vhdx")] - - self._vmutils.get_vm_storage_paths.return_value = ( - disk_files, mock.sentinel.volume_drives) - self._pathutils.get_instance_dir.return_value = inst_dir - mock_exists.return_value = False - - self.assertRaises(exception.DiskNotFound, - self._migrationops._update_disk_image_paths, - mock_instance, inst_dir) - - self._vmutils.get_vm_storage_paths.assert_called_once_with( - mock_instance.name) - self.assertFalse(self._vmutils.update_vm_disk_path.called) - - @ddt.data(constants.DISK, mock.sentinel.root_type) - @mock.patch.object(migrationops.MigrationOps, '_check_base_disk') - @mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd') - def test_check_and_update_disks(self, root_type, - mock_check_resize_vhd, - mock_check_base_disk): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.root_gb = 1 - root_device = {'type': root_type} - block_device_info = {'root_disk': root_device, - 'ephemerals': mock.sentinel.ephemerals} - expected_check_resize = [] - expected_get_info = [] - - self._migrationops._check_and_update_disks( - self.context, mock_instance, mock.sentinel.vm_gen, - mock.sentinel.image_meta, block_device_info, resize_instance=True) - - mock_bdi = self._block_dev_man.validate_and_update_bdi - mock_bdi.assert_called_once_with( - mock_instance, mock.sentinel.image_meta, mock.sentinel.vm_gen, - block_device_info) - - if root_device['type'] == constants.DISK: - root_device_path = ( - self._pathutils.lookup_root_vhd_path.return_value) - self._pathutils.lookup_root_vhd_path.assert_called_once_with( - mock_instance.name) - expected_get_info = [mock.call(root_device_path)] - - mock_vhd_info = self._vhdutils.get_vhd_info.return_value - mock_vhd_info.get.assert_called_once_with("ParentPath") - mock_check_base_disk.assert_called_once_with( - self.context, mock_instance, root_device_path, - mock_vhd_info.get.return_value) - expected_check_resize.append( - mock.call(root_device_path, mock_vhd_info, - mock_instance.flavor.root_gb * units.Gi)) - else: - self.assertFalse(self._pathutils.lookup_root_vhd_path.called) - - mock_check_resize_vhd.assert_has_calls(expected_check_resize) - self._vhdutils.get_vhd_info.assert_has_calls( - expected_get_info) - - def test_check_and_update_disks_not_found(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - root_device = {'type': constants.DISK} - block_device_info = {'root_disk': root_device} - - self._pathutils.lookup_root_vhd_path.return_value = None - - self.assertRaises(exception.DiskNotFound, - self._migrationops._check_and_update_disks, - self.context, mock_instance, mock.sentinel.vm_gen, - mock.sentinel.image_meta, block_device_info, - resize_instance=True) - - self._pathutils.get_instance_dir.assert_called_once_with( - mock_instance.name) - - @mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd') - @mock.patch.object(migrationops.LOG, 'warning') - def test_check_ephemeral_disks_multiple_eph_warn(self, mock_warn, - mock_check_resize_vhd): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.ephemeral_gb = 3 - mock_ephemerals = [{'size': 1}, {'size': 1}] - - self._migrationops._check_ephemeral_disks(mock_instance, - mock_ephemerals, - True) - - mock_warn.assert_called_once_with( - "Cannot resize multiple ephemeral disks for instance.", - instance=mock_instance) - - def test_check_ephemeral_disks_exception(self): - mock_instance = fake_instance.fake_instance_obj(self.context, - ephemeral_gb=1) - mock_ephemerals = [dict(size=1)] - - lookup_eph_path = ( - self._migrationops._pathutils.lookup_ephemeral_vhd_path) - lookup_eph_path.return_value = None - - self.assertRaises(exception.DiskNotFound, - self._migrationops._check_ephemeral_disks, - mock_instance, mock_ephemerals) - - @ddt.data({}, - {'existing_eph_path': mock.sentinel.eph_path}, - {'existing_eph_path': mock.sentinel.eph_path, - 'new_eph_size': 0}, - {'use_default_eph': True}) - @ddt.unpack - @mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd') - def test_check_ephemeral_disks(self, mock_check_resize_vhd, - existing_eph_path=None, new_eph_size=42, - use_default_eph=False): - mock_vmops = self._migrationops._vmops - - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.ephemeral_gb = new_eph_size - eph = {} - mock_ephemerals = [eph] if not use_default_eph else [] - - mock_pathutils = self._migrationops._pathutils - lookup_eph_path = mock_pathutils.lookup_ephemeral_vhd_path - lookup_eph_path.return_value = existing_eph_path - mock_get_eph_vhd_path = mock_pathutils.get_ephemeral_vhd_path - mock_get_eph_vhd_path.return_value = mock.sentinel.get_path - - mock_vhdutils = self._migrationops._vhdutils - mock_get_vhd_format = mock_vhdutils.get_best_supported_vhd_format - mock_get_vhd_format.return_value = mock.sentinel.vhd_format - - self._vmutils.get_free_controller_slot.return_value = ( - mock.sentinel.ctrl_slot) - - attached_eph_paths = [mock.sentinel.eph_path, - mock.sentinel.default_eph_path] - mock_vmops.get_attached_ephemeral_disks.return_value = ( - attached_eph_paths) - - self._migrationops._check_ephemeral_disks(mock_instance, - mock_ephemerals, - True) - - if not use_default_eph: - self.assertEqual(mock_instance.ephemeral_gb, eph['size']) - if not existing_eph_path: - mock_vmops.create_ephemeral_disk.assert_called_once_with( - mock_instance.name, mock.ANY) - self._vmutils.get_vm_scsi_controller.assert_called_once_with( - mock_instance.name) - self._vmutils.get_free_controller_slot.assert_called_once_with( - self._vmutils.get_vm_scsi_controller.return_value) - - create_eph_args = mock_vmops.create_ephemeral_disk.call_args_list - created_eph = create_eph_args[0][0][1] - self.assertEqual(mock.sentinel.vhd_format, created_eph['format']) - self.assertEqual(mock.sentinel.get_path, created_eph['path']) - self.assertEqual(constants.CTRL_TYPE_SCSI, - created_eph['disk_bus']) - self.assertEqual(mock.sentinel.ctrl_slot, - created_eph['ctrl_disk_addr']) - elif new_eph_size: - mock_check_resize_vhd.assert_called_once_with( - existing_eph_path, - self._migrationops._vhdutils.get_vhd_info.return_value, - mock_instance.ephemeral_gb * units.Gi) - self.assertEqual(existing_eph_path, eph['path']) - else: - self._vmutils.detach_vm_disk.assert_has_calls( - [mock.call(mock_instance.name, eph_path, - is_physical=False) - for eph_path in attached_eph_paths], - any_order=True) - self._migrationops._pathutils.remove.assert_has_calls( - [mock.call(eph_path) for eph_path in attached_eph_paths], - any_order=True) diff --git a/compute_hyperv/tests/unit/test_pathutils.py b/compute_hyperv/tests/unit/test_pathutils.py deleted file mode 100644 index 8c61c407..00000000 --- a/compute_hyperv/tests/unit/test_pathutils.py +++ /dev/null @@ -1,491 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import time -from unittest import mock - -import ddt -from nova import exception -from os_win import exceptions as os_win_exc -from oslo_utils import fileutils -from six.moves import builtins - -from compute_hyperv.nova import constants -from compute_hyperv.nova import pathutils -from compute_hyperv.tests.unit import test_base - - -@ddt.ddt -class PathUtilsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V PathUtils class.""" - - def setUp(self): - super(PathUtilsTestCase, self).setUp() - self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir') - self.fake_instance_name = 'fake_instance_name' - - self._pathutils = pathutils.PathUtils() - - @mock.patch.object(pathutils.PathUtils, 'copy') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(os, 'listdir') - def test_copy_folder_files(self, mock_listdir, mock_isfile, mock_copy): - src_dir = 'src' - dest_dir = 'dest' - fname = 'tmp_file.txt' - subdir = 'tmp_folder' - src_fname = os.path.join(src_dir, fname) - dest_fname = os.path.join(dest_dir, fname) - - # making sure src_subdir is not copied. - mock_listdir.return_value = [fname, subdir] - mock_isfile.side_effect = [True, False] - - self._pathutils.copy_folder_files(src_dir, dest_dir) - mock_copy.assert_called_once_with(src_fname, dest_fname) - - @ddt.data({'conf_instances_path': r'c:\inst_dir', - 'expected_dir': r'c:\inst_dir'}, - {'conf_instances_path': r'c:\inst_dir', - 'remote_server': 'fake_remote', - 'expected_dir': r'\\fake_remote\c$\inst_dir'}, - {'conf_instances_path': r'\\fake_share\fake_path', - 'remote_server': 'fake_remote', - 'expected_dir': r'\\fake_share\fake_path'}, - {'conf_instances_path_share': r'inst_share', - 'remote_server': 'fake_remote', - 'expected_dir': r'\\fake_remote\inst_share'}) - @ddt.unpack - def test_get_instances_dir(self, expected_dir, remote_server=None, - conf_instances_path='', - conf_instances_path_share=''): - self.flags(instances_path=conf_instances_path) - self.flags(instances_path_share=conf_instances_path_share, - group='hyperv') - - instances_dir = self._pathutils.get_instances_dir(remote_server) - - self.assertEqual(expected_dir, instances_dir) - - def test_get_remote_path_share(self): - fake_remote_path = '\\\\fake_path' - - actual_path = self._pathutils.get_remote_path(mock.sentinel.server, - fake_remote_path) - self.assertEqual(fake_remote_path, actual_path) - - @mock.patch.object(pathutils.os, 'getenv') - def test_get_remote_path_csv(self, mock_getenv): - mock_getenv.return_value = 'C:' - fake_server = 'fake_server' - fake_remote_path = 'C:\\ClusterStorage\\Volume1\\fake_dir' - - actual_path = self._pathutils.get_remote_path(fake_server, - fake_remote_path) - - self.assertEqual(fake_remote_path, actual_path) - mock_getenv.assert_called_once_with('SYSTEMDRIVE', 'C:') - - def test_get_remote_path_normal(self): - fake_server = 'fake_server' - fake_remote_path = 'C:\\fake_path' - - actual_path = self._pathutils.get_remote_path(fake_server, - fake_remote_path) - - expected_path = ('\\\\%(remote_server)s\\%(path)s' % - dict(remote_server=fake_server, - path=fake_remote_path.replace(':', '$'))) - self.assertEqual(expected_path, actual_path) - - @mock.patch.object(pathutils.PathUtils, 'get_instances_dir') - @mock.patch.object(pathutils.PathUtils, 'check_dir') - def test_get_instances_sub_dir(self, mock_check_dir, - mock_get_instances_dir): - fake_instances_dir = 'fake_instances_dir' - mock_get_instances_dir.return_value = fake_instances_dir - - sub_dir = 'fake_subdir' - expected_path = os.path.join(fake_instances_dir, sub_dir) - - path = self._pathutils._get_instances_sub_dir( - sub_dir, - remote_server=mock.sentinel.remote_server, - create_dir=mock.sentinel.create_dir, - remove_dir=mock.sentinel.remove_dir) - - self.assertEqual(expected_path, path) - - mock_get_instances_dir.assert_called_once_with( - mock.sentinel.remote_server) - mock_check_dir.assert_called_once_with( - expected_path, - create_dir=mock.sentinel.create_dir, - remove_dir=mock.sentinel.remove_dir) - - @ddt.data({'create_dir': True, 'remove_dir': False}, - {'create_dir': False, 'remove_dir': True}) - @ddt.unpack - @mock.patch.object(pathutils.PathUtils, 'check_create_dir') - @mock.patch.object(pathutils.PathUtils, 'check_remove_dir') - def test_check_dir(self, mock_check_remove_dir, mock_check_create_dir, - create_dir, remove_dir): - self._pathutils.check_dir( - mock.sentinel.dir, create_dir=create_dir, remove_dir=remove_dir) - - if create_dir: - mock_check_create_dir.assert_called_once_with(mock.sentinel.dir) - else: - self.assertFalse(mock_check_create_dir.called) - - if remove_dir: - mock_check_remove_dir.assert_called_once_with(mock.sentinel.dir) - else: - self.assertFalse(mock_check_remove_dir.called) - - @mock.patch.object(pathutils.PathUtils, 'check_create_dir') - def test_check_dir_exc(self, mock_check_create_dir): - - class FakeWindowsError(Exception): - def __init__(self, winerror=None): - self.winerror = winerror - - mock_check_create_dir.side_effect = FakeWindowsError( - pathutils.ERROR_INVALID_NAME) - with mock.patch.object(builtins, 'WindowsError', - FakeWindowsError, create=True): - self.assertRaises(exception.AdminRequired, - self._pathutils.check_dir, - mock.sentinel.dir_name, - create_dir=True) - - @mock.patch.object(pathutils.PathUtils, 'check_dir') - def test_get_instance_migr_revert_dir(self, mock_check_dir): - dir_name = 'fake_dir' - expected_dir_name = '%s_revert' % dir_name - - revert_dir = self._pathutils.get_instance_migr_revert_dir( - dir_name, create_dir=mock.sentinel.create_dir, - remove_dir=mock.sentinel.remove_dir) - - self.assertEqual(expected_dir_name, revert_dir) - mock_check_dir.assert_called_once_with(expected_dir_name, - mock.sentinel.create_dir, - mock.sentinel.remove_dir) - - @ddt.data({}, - {'configured_dir_exists': True}, - {'vm_exists': True}, - {'vm_exists': True, - 'remote_server': mock.sentinel.remote_server}) - @ddt.unpack - @mock.patch.object(pathutils.PathUtils, '_get_instances_sub_dir') - @mock.patch.object(pathutils.PathUtils, 'get_remote_path') - @mock.patch.object(pathutils.PathUtils, 'check_dir') - @mock.patch.object(pathutils.os.path, 'exists') - @mock.patch('os_win.utilsfactory.get_vmutils') - def test_get_instance_dir(self, mock_get_vmutils, - mock_exists, - mock_check_dir, - mock_get_remote_path, - mock_get_instances_sub_dir, - configured_dir_exists=False, - remote_server=None, vm_exists=False): - mock_get_instances_sub_dir.return_value = mock.sentinel.configured_dir - mock_exists.return_value = configured_dir_exists - - expected_vmutils = (self._pathutils._vmutils - if not remote_server - else mock_get_vmutils.return_value) - mock_get_root_dir = expected_vmutils.get_vm_config_root_dir - mock_get_root_dir.side_effect = ( - (mock.sentinel.config_root_dir,) - if vm_exists - else os_win_exc.HyperVVMNotFoundException( - vm_name=mock.sentinel.instance_name)) - - mock_get_remote_path.return_value = mock.sentinel.remote_root_dir - - instance_dir = self._pathutils.get_instance_dir( - mock.sentinel.instance_name, - remote_server=remote_server, - create_dir=mock.sentinel.create_dir, - remove_dir=mock.sentinel.remove_dir) - - if configured_dir_exists or not vm_exists: - expected_instance_dir = mock.sentinel.configured_dir - else: - # In this case, we expect the instance location to be - # retrieved from the vm itself. - mock_get_root_dir.assert_called_once_with( - mock.sentinel.instance_name) - - if remote_server: - expected_instance_dir = mock.sentinel.remote_root_dir - mock_get_remote_path.assert_called_once_with( - mock.sentinel.remote_server, - mock.sentinel.config_root_dir) - else: - expected_instance_dir = mock.sentinel.config_root_dir - - self.assertEqual(expected_instance_dir, instance_dir) - - mock_get_instances_sub_dir.assert_called_once_with( - mock.sentinel.instance_name, remote_server, - create_dir=False, remove_dir=False) - mock_check_dir.assert_called_once_with( - expected_instance_dir, - create_dir=mock.sentinel.create_dir, - remove_dir=mock.sentinel.remove_dir) - - def _mock_lookup_configdrive_path(self, ext, rescue=False): - self._pathutils.get_instance_dir = mock.MagicMock( - return_value=self.fake_instance_dir) - - def mock_exists(*args, **kwargs): - path = args[0] - return True if path[(path.rfind('.') + 1):] == ext else False - self._pathutils.exists = mock_exists - configdrive_path = self._pathutils.lookup_configdrive_path( - self.fake_instance_name, rescue) - return configdrive_path - - def _test_lookup_configdrive_path(self, rescue=False): - configdrive_name = 'configdrive' - if rescue: - configdrive_name += '-rescue' - - for format_ext in constants.DISK_FORMAT_MAP: - configdrive_path = self._mock_lookup_configdrive_path(format_ext, - rescue) - expected_path = os.path.join(self.fake_instance_dir, - configdrive_name + '.' + format_ext) - self.assertEqual(expected_path, configdrive_path) - - def test_lookup_configdrive_path(self): - self._test_lookup_configdrive_path() - - def test_lookup_rescue_configdrive_path(self): - self._test_lookup_configdrive_path(rescue=True) - - def test_lookup_configdrive_path_non_exist(self): - self._pathutils.get_instance_dir = mock.MagicMock( - return_value=self.fake_instance_dir) - self._pathutils.exists = mock.MagicMock(return_value=False) - configdrive_path = self._pathutils.lookup_configdrive_path( - self.fake_instance_name) - self.assertIsNone(configdrive_path) - - @mock.patch.object(pathutils.PathUtils, 'check_dir') - @mock.patch.object(pathutils.PathUtils, 'get_instance_dir') - def test_export_dir(self, mock_get_instance_dir, mock_check_dir): - mock_get_instance_dir.return_value = self.fake_instance_dir - - export_dir = self._pathutils.get_export_dir( - mock.sentinel.instance_name, create_dir=mock.sentinel.create_dir, - remove_dir=mock.sentinel.remove_dir) - - expected_dir = os.path.join(self.fake_instance_dir, 'export') - self.assertEqual(expected_dir, export_dir) - mock_get_instance_dir.assert_called_once_with( - mock.sentinel.instance_name, create_dir=mock.sentinel.create_dir) - mock_check_dir.assert_called_once_with( - expected_dir, create_dir=mock.sentinel.create_dir, - remove_dir=mock.sentinel.remove_dir) - - def test_copy_vm_console_logs(self): - fake_local_logs = [mock.sentinel.log_path, - mock.sentinel.archived_log_path] - fake_remote_logs = [mock.sentinel.remote_log_path, - mock.sentinel.remote_archived_log_path] - - self._pathutils.exists = mock.Mock(return_value=True) - self._pathutils.copy = mock.Mock() - self._pathutils.get_vm_console_log_paths = mock.Mock( - side_effect=[fake_local_logs, fake_remote_logs]) - - self._pathutils.copy_vm_console_logs(mock.sentinel.instance_name, - mock.sentinel.dest_host) - - self._pathutils.get_vm_console_log_paths.assert_has_calls( - [mock.call(mock.sentinel.instance_name), - mock.call(mock.sentinel.instance_name, - remote_server=mock.sentinel.dest_host)]) - self._pathutils.copy.assert_has_calls([ - mock.call(mock.sentinel.log_path, - mock.sentinel.remote_log_path), - mock.call(mock.sentinel.archived_log_path, - mock.sentinel.remote_archived_log_path)]) - - @mock.patch.object(pathutils.PathUtils, 'get_base_vhd_dir') - @mock.patch.object(pathutils.PathUtils, 'exists') - def _test_get_image_path(self, mock_exists, mock_get_base_vhd_dir, - found=True): - fake_image_name = 'fake_image_name' - if found: - mock_exists.side_effect = [False, True] - expected_path = os.path.join('fake_base_dir', - 'fake_image_name.vhdx') - else: - mock_exists.return_value = False - expected_path = None - mock_get_base_vhd_dir.return_value = 'fake_base_dir' - - res = self._pathutils.get_image_path(fake_image_name) - - mock_get_base_vhd_dir.assert_called_once_with() - self.assertEqual(expected_path, res) - - def test_get_image_path(self): - self._test_get_image_path() - - def test_get_image_path_not_found(self): - self._test_get_image_path(found=False) - - @mock.patch('os.path.getmtime') - @mock.patch.object(pathutils, 'time') - def test_get_age_of_file(self, mock_time, mock_getmtime): - mock_time.time.return_value = time.time() - mock_getmtime.return_value = mock_time.time.return_value - 42 - - actual_age = self._pathutils.get_age_of_file(mock.sentinel.filename) - self.assertEqual(42, actual_age) - mock_time.time.assert_called_once_with() - mock_getmtime.assert_called_once_with(mock.sentinel.filename) - - @mock.patch('os.path.exists') - @mock.patch('tempfile.NamedTemporaryFile') - def test_check_dirs_shared_storage(self, mock_named_tempfile, - mock_exists): - fake_src_dir = 'fake_src_dir' - fake_dest_dir = 'fake_dest_dir' - - mock_exists.return_value = True - mock_tmpfile = mock_named_tempfile.return_value.__enter__.return_value - mock_tmpfile.name = 'fake_tmp_fname' - expected_src_tmp_path = os.path.join(fake_src_dir, - mock_tmpfile.name) - - self._pathutils.check_dirs_shared_storage( - fake_src_dir, fake_dest_dir) - - mock_named_tempfile.assert_called_once_with(dir=fake_dest_dir) - mock_exists.assert_called_once_with(expected_src_tmp_path) - - @mock.patch.object(pathutils.PathUtils, 'check_dirs_shared_storage') - @mock.patch.object(pathutils.PathUtils, 'get_instances_dir') - def test_check_remote_instances_shared(self, mock_get_instances_dir, - mock_check_dirs_shared_storage): - mock_get_instances_dir.side_effect = [mock.sentinel.local_inst_dir, - mock.sentinel.remote_inst_dir] - - shared_storage = self._pathutils.check_remote_instances_dir_shared( - mock.sentinel.dest) - - self.assertEqual(mock_check_dirs_shared_storage.return_value, - shared_storage) - mock_get_instances_dir.assert_has_calls( - [mock.call(), mock.call(mock.sentinel.dest)]) - mock_check_dirs_shared_storage.assert_called_once_with( - mock.sentinel.local_inst_dir, mock.sentinel.remote_inst_dir) - - @mock.patch.object(os, 'close') - @mock.patch('tempfile.mkstemp') - @mock.patch.object(pathutils.PathUtils, 'get_instance_dir') - def test_check_instance_shared_storage_local(self, mock_get_instance_dir, - mock_mkstemp, mock_close): - mock_instance = mock.Mock() - mock_mkstemp.return_value = (mock.sentinel.tmp_fd, - mock.sentinel.tmp_file) - - ret_val = self._pathutils.check_instance_shared_storage_local( - mock_instance) - exp_ret_val = {'filename': mock.sentinel.tmp_file} - - self.assertEqual(exp_ret_val, ret_val) - mock_get_instance_dir.assert_called_once_with(mock_instance.name) - mock_mkstemp.assert_called_once_with( - dir=mock_get_instance_dir.return_value) - mock_close.assert_called_once_with(mock.sentinel.tmp_fd) - - @mock.patch.object(os.path, 'exists') - def test_check_instance_shared_storage_remote(self, mock_exists): - check_data = dict(filename=mock.sentinel.filename) - ret_val = self._pathutils.check_instance_shared_storage_remote( - check_data) - - self.assertEqual(mock_exists.return_value, ret_val) - - @mock.patch.object(fileutils, 'delete_if_exists') - def test_check_instance_shared_storage_cleanup(self, - mock_delete_if_exists): - check_data = dict(filename=mock.sentinel.filename) - self._pathutils.check_instance_shared_storage_cleanup(check_data) - - mock_delete_if_exists.assert_called_once_with(mock.sentinel.filename) - - @mock.patch.object(pathutils.PathUtils, 'get_instance_dir') - def test_get_instance_snapshot_dir(self, mock_get_instance_dir): - mock_get_instance_dir.return_value = self.fake_instance_dir - response = self._pathutils.get_instance_snapshot_dir( - self.fake_instance_name) - - expected_path = os.path.join(self.fake_instance_dir, 'Snapshots') - self.assertEqual(expected_path, response) - mock_get_instance_dir.assert_called_once_with(self.fake_instance_name, - create_dir=False) - - @mock.patch.object(pathutils.PathUtils, 'get_instance_dir') - def test_get_instance_virtual_machines_dir(self, mock_get_instance_dir): - mock_get_instance_dir.return_value = self.fake_instance_dir - response = self._pathutils.get_instance_virtual_machines_dir( - self.fake_instance_name) - - expected_path = os.path.join(self.fake_instance_dir, - 'Virtual Machines') - self.assertEqual(expected_path, response) - mock_get_instance_dir.assert_called_once_with(self.fake_instance_name, - create_dir=False) - - @mock.patch.object(pathutils.PathUtils, 'copy_folder_files') - @mock.patch.object(pathutils.PathUtils, - 'get_instance_virtual_machines_dir') - def test_copy_vm_config_files(self, mock_get_inst_vm_dir, mock_copy_files): - self._pathutils.copy_vm_config_files(mock.sentinel.instance_name, - mock.sentinel.dest_dir) - - mock_get_inst_vm_dir.assert_called_once_with( - mock.sentinel.instance_name) - mock_copy_files.assert_called_once_with( - mock_get_inst_vm_dir.return_value, mock.sentinel.dest_dir) - - @mock.patch('os.listdir') - def test_get_vm_config_file(self, mock_listdir): - config_file = '81027A62-7187-4EC4-AFF5-9CA853BF7C68.vmcx' - mock_listdir.return_value = [config_file] - - response = self._pathutils.get_vm_config_file(self.fake_instance_dir) - - expected_path = os.path.join(self.fake_instance_dir, config_file) - self.assertEqual(expected_path, response) - mock_listdir.assert_called_once_with(self.fake_instance_dir) - - @mock.patch('os.listdir') - def test_get_vm_config_file_exception(self, mock_listdir): - mock_listdir.return_value = ['fake_file'] - - self.assertRaises(exception.NotFound, - self._pathutils.get_vm_config_file, - mock.sentinel.instances_path) diff --git a/compute_hyperv/tests/unit/test_pdk.py b/compute_hyperv/tests/unit/test_pdk.py deleted file mode 100644 index e432575e..00000000 --- a/compute_hyperv/tests/unit/test_pdk.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from nova import exception -from six.moves import builtins - -from compute_hyperv.nova import pdk -from compute_hyperv.tests.unit import test_base - - -class PDKTestCase(test_base.HyperVBaseTestCase): - - _FAKE_PDK_FILE_PATH = 'C:\\path\\to\\fakepdk.pdk' - - def setUp(self): - super(PDKTestCase, self).setUp() - self._pdk = pdk.PDK() - - @mock.patch.object(builtins, 'open') - @mock.patch.object(pdk.PDK, '_get_pdk_data') - @mock.patch.object(pdk.PDK, '_get_pdk_container') - @mock.patch.object(pdk.PDK, '_get_pdk_reference') - def test_create_pdk(self, mock_get_pdk_reference, mock_get_pdk_container, - mock_get_pdk_data, mock_open): - mock_instance = mock.MagicMock() - pdk_file_handle = mock_open.return_value.__enter__.return_value - - pdk_reference = mock_get_pdk_reference.return_value - pdk_container = mock_get_pdk_container.return_value - - self._pdk.create_pdk(mock.sentinel.context, - mock_instance, - mock.sentinel.image_meta, - self._FAKE_PDK_FILE_PATH) - mock_get_pdk_reference.assert_called_once_with( - mock_instance, mock.sentinel.image_meta) - mock_get_pdk_container.assert_called_once_with(mock.sentinel.context, - mock_instance, - pdk_reference) - mock_get_pdk_data.assert_called_once_with(pdk_container) - pdk_file_handle.write.assert_called_once_with( - mock_get_pdk_data.return_value) - - def _test_get_pdk_reference(self, pdk_reference=None, - image_meta_pdk_ref=None): - mock_instance = mock.MagicMock( - metadata={'img_pdk_reference': image_meta_pdk_ref}) - image_meta = { - 'properties': {'img_pdk_reference': pdk_reference}} - - expected_result = image_meta_pdk_ref or pdk_reference - result = self._pdk._get_pdk_reference(mock_instance, - image_meta) - self.assertEqual(expected_result, result) - - def test_get_pdk_boot_reference(self): - self._test_get_pdk_reference( - image_meta_pdk_ref=mock.sentinel.image_meta_pdk_ref) - - def test_get_pdk_image_reference(self): - self._test_get_pdk_reference(pdk_reference=mock.sentinel.pdk_reference) - - def test_get_pdk_no_reference(self): - image_meta = {'properties': {}} - mock_instance = mock.MagicMock(metadata={}) - - self.assertRaises(exception.InstanceUnacceptable, - self._pdk._get_pdk_reference, - mock_instance, image_meta) - - @mock.patch('barbicanclient.client.Client') - @mock.patch('keystoneauth1.session.Session') - def test_get_pdk_container(self, mock_session, mock_barbican_client): - instance = mock.MagicMock() - context = mock.MagicMock() - auth = context.get_auth_plugin.return_value - sess = mock_session.return_value - barbican_client = mock_barbican_client.return_value - barbican_client.containers.get.return_value = ( - mock.sentinel.pdk_container) - - result = self._pdk._get_pdk_container(context, instance, - mock.sentinel.pdk_reference) - - self.assertEqual(mock.sentinel.pdk_container, result) - mock_session.assert_called_once_with(auth=auth) - mock_barbican_client.assert_called_once_with(session=sess) - - @mock.patch('barbicanclient.client.Client') - @mock.patch('keystoneauth1.session.Session') - def test_get_pdk_container_exception(self, mock_session, - mock_barbican_client): - instance = mock.MagicMock() - context = mock.MagicMock() - auth = context.get_auth_plugin.return_value - sess = mock_session.return_value - - barbican_client = mock_barbican_client.return_value - barbican_client.containers.get.side_effect = [ - exception.InvalidMetadata] - - self.assertRaises(exception.InvalidMetadata, - self._pdk._get_pdk_container, - context, - instance, - mock.sentinel.pdk_reference) - mock_session.assert_called_once_with(auth=auth) - mock_barbican_client.assert_called_once_with(session=sess) - - def test_get_pdk_data(self): - pdk_container = mock.MagicMock() - pdk_container.secrets = {'1': mock.MagicMock(payload=b'fake_secret1'), - '2': mock.MagicMock(payload=b'fake_secret2')} - - response = self._pdk._get_pdk_data(pdk_container) - expected_result = b'fake_secret1fake_secret2' - self.assertEqual(expected_result, response) diff --git a/compute_hyperv/tests/unit/test_rdpconsoleops.py b/compute_hyperv/tests/unit/test_rdpconsoleops.py deleted file mode 100644 index 15e1f2b0..00000000 --- a/compute_hyperv/tests/unit/test_rdpconsoleops.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2015 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the Hyper-V RDPConsoleOps.""" - -from unittest import mock - -from compute_hyperv.nova import rdpconsoleops -from compute_hyperv.tests.unit import test_base - - -class RDPConsoleOpsTestCase(test_base.HyperVBaseTestCase): - - _autospec_classes = [ - rdpconsoleops.hostops.HostOps, - ] - - def setUp(self): - super(RDPConsoleOpsTestCase, self).setUp() - self.rdpconsoleops = rdpconsoleops.RDPConsoleOps() - - def test_get_rdp_console(self): - mock_get_host_ip = self.rdpconsoleops._hostops.get_host_ip_addr - mock_get_rdp_port = ( - self.rdpconsoleops._rdpconsoleutils.get_rdp_console_port) - mock_get_vm_id = self.rdpconsoleops._vmutils.get_vm_id - - connect_info = self.rdpconsoleops.get_rdp_console(mock.DEFAULT) - - self.assertEqual(mock_get_host_ip.return_value, connect_info.host) - self.assertEqual(mock_get_rdp_port.return_value, connect_info.port) - self.assertEqual(mock_get_vm_id.return_value, - connect_info.internal_access_path) diff --git a/compute_hyperv/tests/unit/test_serialconsolehandler.py b/compute_hyperv/tests/unit/test_serialconsolehandler.py deleted file mode 100644 index 8c496ee5..00000000 --- a/compute_hyperv/tests/unit/test_serialconsolehandler.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from nova import exception - -from compute_hyperv.nova import constants -from compute_hyperv.nova import pathutils -from compute_hyperv.nova import serialconsolehandler -from compute_hyperv.nova import serialproxy -from compute_hyperv.tests.unit import test_base - - -class SerialConsoleHandlerTestCase(test_base.HyperVBaseTestCase): - - _autospec_classes = [ - pathutils.PathUtils, - ] - - def setUp(self): - super(SerialConsoleHandlerTestCase, self).setUp() - - mock_get_vm_console_logs = ( - pathutils.PathUtils.return_value.get_vm_console_log_paths) - mock_get_vm_console_logs.return_value = [mock.sentinel.log_path] - self._consolehandler = serialconsolehandler.SerialConsoleHandler( - mock.sentinel.instance_name) - self._consolehandler._log_path = mock.sentinel.log_path - - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_setup_handlers') - def test_start(self, mock_setup_handlers): - mock_workers = [mock.Mock(), mock.Mock()] - self._consolehandler._workers = mock_workers - - self._consolehandler.start() - - mock_setup_handlers.assert_called_once_with() - for worker in mock_workers: - worker.start.assert_called_once_with() - - @mock.patch('nova.console.serial.release_port') - def test_stop(self, mock_release_port): - mock_serial_proxy = mock.Mock() - mock_workers = [mock_serial_proxy, mock.Mock()] - - self._consolehandler._serial_proxy = mock_serial_proxy - self._consolehandler._listen_host = mock.sentinel.host - self._consolehandler._listen_port = mock.sentinel.port - self._consolehandler._workers = mock_workers - - self._consolehandler.stop() - - mock_release_port.assert_called_once_with(mock.sentinel.host, - mock.sentinel.port) - for worker in mock_workers: - worker.stop.assert_called_once_with() - - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_setup_named_pipe_handlers') - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_setup_serial_proxy_handler') - def _test_setup_handlers(self, mock_setup_proxy, mock_setup_pipe_handlers, - serial_console_enabled=True): - self.flags(enabled=serial_console_enabled, group='serial_console') - - self._consolehandler._setup_handlers() - - self.assertEqual(serial_console_enabled, mock_setup_proxy.called) - mock_setup_pipe_handlers.assert_called_once_with() - - def test_setup_handlers(self): - self._test_setup_handlers() - - def test_setup_handlers_console_disabled(self): - self._test_setup_handlers(serial_console_enabled=False) - - @mock.patch.object(serialproxy, 'SerialProxy') - @mock.patch('nova.console.serial.acquire_port') - @mock.patch.object(serialconsolehandler.threading, 'Event') - @mock.patch.object(serialconsolehandler.ioutils, 'IOQueue') - def test_setup_serial_proxy_handler(self, mock_io_queue, mock_event, - mock_acquire_port, - mock_serial_proxy_class): - mock_input_queue = mock.sentinel.input_queue - mock_output_queue = mock.sentinel.output_queue - mock_client_connected = mock_event.return_value - mock_io_queue.side_effect = [mock_input_queue, mock_output_queue] - mock_serial_proxy = mock_serial_proxy_class.return_value - - mock_acquire_port.return_value = mock.sentinel.port - self.flags(proxyclient_address='127.0.0.3', - group='serial_console') - - self._consolehandler._setup_serial_proxy_handler() - - mock_serial_proxy_class.assert_called_once_with( - mock.sentinel.instance_name, - '127.0.0.3', mock.sentinel.port, - mock_input_queue, - mock_output_queue, - mock_client_connected) - - self.assertIn(mock_serial_proxy, self._consolehandler._workers) - - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_get_named_pipe_handler') - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_get_vm_serial_port_mapping') - def _mock_setup_named_pipe_handlers(self, mock_get_port_mapping, - mock_get_pipe_handler, - serial_port_mapping=None): - mock_get_port_mapping.return_value = serial_port_mapping - - self._consolehandler._setup_named_pipe_handlers() - - expected_workers = [mock_get_pipe_handler.return_value - for port in serial_port_mapping] - - self.assertEqual(expected_workers, self._consolehandler._workers) - - return mock_get_pipe_handler - - def test_setup_rw_pipe_handler(self): - serial_port_mapping = { - constants.SERIAL_PORT_TYPE_RW: mock.sentinel.pipe_path - } - - mock_get_handler = self._mock_setup_named_pipe_handlers( - serial_port_mapping=serial_port_mapping) - - mock_get_handler.assert_called_once_with( - mock.sentinel.pipe_path, - pipe_type=constants.SERIAL_PORT_TYPE_RW, - enable_logging=True) - self.assertEqual(mock_get_handler.return_value, - self._consolehandler._log_handler) - - def test_setup_pipe_handlers(self): - serial_port_mapping = { - constants.SERIAL_PORT_TYPE_RO: mock.sentinel.ro_pipe_path, - constants.SERIAL_PORT_TYPE_RW: mock.sentinel.rw_pipe_path - } - - mock_get_handler = self._mock_setup_named_pipe_handlers( - serial_port_mapping=serial_port_mapping) - - expected_calls = [mock.call(mock.sentinel.ro_pipe_path, - pipe_type=constants.SERIAL_PORT_TYPE_RO, - enable_logging=True), - mock.call(mock.sentinel.rw_pipe_path, - pipe_type=constants.SERIAL_PORT_TYPE_RW, - enable_logging=False)] - mock_get_handler.assert_has_calls(expected_calls, any_order=True) - - @mock.patch.object(serialconsolehandler.utilsfactory, - 'get_named_pipe_handler') - def _test_get_named_pipe_handler(self, mock_get_pipe_handler, - pipe_type=None, enable_logging=False): - expected_args = {} - - if pipe_type == constants.SERIAL_PORT_TYPE_RW: - self._consolehandler._input_queue = mock.sentinel.input_queue - self._consolehandler._output_queue = mock.sentinel.output_queue - self._consolehandler._client_connected = ( - mock.sentinel.connect_event) - expected_args.update({ - 'input_queue': mock.sentinel.input_queue, - 'output_queue': mock.sentinel.output_queue, - 'connect_event': mock.sentinel.connect_event}) - - if enable_logging: - expected_args['log_file'] = mock.sentinel.log_path - - ret_val = self._consolehandler._get_named_pipe_handler( - mock.sentinel.pipe_path, pipe_type, enable_logging) - - self.assertEqual(mock_get_pipe_handler.return_value, ret_val) - mock_get_pipe_handler.assert_called_once_with( - mock.sentinel.pipe_path, - **expected_args) - - def test_get_ro_named_pipe_handler(self): - self._test_get_named_pipe_handler( - pipe_type=constants.SERIAL_PORT_TYPE_RO, - enable_logging=True) - - def test_get_rw_named_pipe_handler(self): - self._test_get_named_pipe_handler( - pipe_type=constants.SERIAL_PORT_TYPE_RW, - enable_logging=False) - - def _mock_get_port_connections(self, port_connections): - get_port_connections = ( - self._consolehandler._vmutils.get_vm_serial_port_connections) - get_port_connections.return_value = port_connections - - def test_get_vm_serial_port_mapping_having_tagged_pipes(self): - ro_pipe_path = 'fake_pipe_ro' - rw_pipe_path = 'fake_pipe_rw' - self._mock_get_port_connections([ro_pipe_path, rw_pipe_path]) - - ret_val = self._consolehandler._get_vm_serial_port_mapping() - - expected_mapping = { - constants.SERIAL_PORT_TYPE_RO: ro_pipe_path, - constants.SERIAL_PORT_TYPE_RW: rw_pipe_path - } - - self.assertEqual(expected_mapping, ret_val) - - def test_get_vm_serial_port_mapping_untagged_pipe(self): - pipe_path = 'fake_pipe_path' - self._mock_get_port_connections([pipe_path]) - - ret_val = self._consolehandler._get_vm_serial_port_mapping() - - expected_mapping = {constants.SERIAL_PORT_TYPE_RW: pipe_path} - self.assertEqual(expected_mapping, ret_val) - - def test_get_vm_serial_port_mapping_exception(self): - self._mock_get_port_connections([]) - self.assertRaises(exception.NovaException, - self._consolehandler._get_vm_serial_port_mapping) - - @mock.patch('nova.console.type.ConsoleSerial') - def test_get_serial_console(self, mock_serial_console): - self.flags(enabled=True, group='serial_console') - self._consolehandler._listen_host = mock.sentinel.host - self._consolehandler._listen_port = mock.sentinel.port - - ret_val = self._consolehandler.get_serial_console() - self.assertEqual(mock_serial_console.return_value, ret_val) - mock_serial_console.assert_called_once_with( - host=mock.sentinel.host, - port=mock.sentinel.port) - - def test_get_serial_console_disabled(self): - self.flags(enabled=False, group='serial_console') - self.assertRaises(exception.ConsoleTypeUnavailable, - self._consolehandler.get_serial_console) - - def test_flush_console_log(self): - self._consolehandler._log_handler = None - self._consolehandler.flush_console_log() - - mock_handler = mock.Mock() - self._consolehandler._log_handler = mock_handler - self._consolehandler.flush_console_log() - mock_handler.flush_log_file.assert_called_once_with() diff --git a/compute_hyperv/tests/unit/test_serialconsoleops.py b/compute_hyperv/tests/unit/test_serialconsoleops.py deleted file mode 100644 index a7c4b3d5..00000000 --- a/compute_hyperv/tests/unit/test_serialconsoleops.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from nova import exception -from six.moves import builtins - -from compute_hyperv.nova import serialconsolehandler -from compute_hyperv.nova import serialconsoleops -from compute_hyperv.nova import vmops -from compute_hyperv.tests.unit import test_base - - -class SerialConsoleOpsTestCase(test_base.HyperVBaseTestCase): - - _autospec_classes = [ - serialconsoleops.pathutils.PathUtils, - ] - - def setUp(self): - super(SerialConsoleOpsTestCase, self).setUp() - serialconsoleops._console_handlers = {} - self._serialops = serialconsoleops.SerialConsoleOps() - self._vmutils = self._serialops._vmutils - - def _setup_console_handler_mock(self): - mock_console_handler = mock.Mock() - serialconsoleops._console_handlers = {mock.sentinel.instance_name: - mock_console_handler} - return mock_console_handler - - @mock.patch.object(serialconsolehandler, 'SerialConsoleHandler') - @mock.patch.object(serialconsoleops.SerialConsoleOps, - 'stop_console_handler_unsync') - def _test_start_console_handler(self, mock_stop_handler, - mock_console_handler, - raise_exception=False): - mock_handler = mock_console_handler.return_value - self._serialops._vmutils.is_secure_vm.return_value = False - - if raise_exception: - mock_handler.start.side_effect = Exception - - self._serialops.start_console_handler(mock.sentinel.instance_name) - - mock_stop_handler.assert_called_once_with(mock.sentinel.instance_name) - mock_console_handler.assert_called_once_with( - mock.sentinel.instance_name) - - if raise_exception: - mock_handler.stop.assert_called_once_with() - else: - console_handler = serialconsoleops._console_handlers.get( - mock.sentinel.instance_name) - self.assertEqual(mock_handler, console_handler) - - def test_start_console_handler(self): - self._test_start_console_handler() - - def test_start_console_handler_exception(self): - self._test_start_console_handler(raise_exception=True) - - @mock.patch.object(serialconsoleops.SerialConsoleOps, - 'stop_console_handler_unsync') - def test_start_console_handler_secure_vm(self, mock_stop_handler): - self._serialops._vmutils.is_secure_vm.return_value = True - - self._serialops.start_console_handler(mock.sentinel.instance_name) - self.assertFalse(mock_stop_handler.called) - - def test_stop_console_handler(self): - mock_console_handler = self._setup_console_handler_mock() - - self._serialops.stop_console_handler(mock.sentinel.instance_name) - - mock_console_handler.stop.assert_called_once_with() - handler = serialconsoleops._console_handlers.get( - mock.sentinel.instance_name) - self.assertIsNone(handler) - - def test_get_serial_console(self): - mock_console_handler = self._setup_console_handler_mock() - - ret_val = self._serialops.get_serial_console( - mock.sentinel.instance_name) - - self.assertEqual(mock_console_handler.get_serial_console(), - ret_val) - - def test_get_serial_console_exception(self): - self.assertRaises(exception.ConsoleTypeUnavailable, - self._serialops.get_serial_console, - mock.sentinel.instance_name) - - @mock.patch.object(builtins, 'open') - @mock.patch("os.path.exists") - def test_get_console_output_exception(self, fake_path_exists, fake_open): - mock_handler = self._setup_console_handler_mock() - - self._serialops._vmutils.is_secure_vm.return_value = False - self._serialops._pathutils.get_vm_console_log_paths.return_value = [ - mock.sentinel.log_path_1, mock.sentinel.log_path_2] - fake_open.side_effect = IOError - fake_path_exists.return_value = True - - self.assertRaises(exception.ConsoleLogOutputException, - self._serialops.get_console_output, - mock.sentinel.instance_name) - mock_handler.flush_console_log.assert_called_once_with() - fake_open.assert_called_once_with(mock.sentinel.log_path_2, 'rb') - - def test_get_console_output_secure_vm(self): - self._serialops._vmutils.is_secure_vm.return_value = True - self.assertRaises(exception.ConsoleNotAvailable, - self._serialops.get_console_output, - mock.sentinel.instance_name) - - @mock.patch.object(vmops.VMOps, 'get_instance_uuid') - @mock.patch.object(serialconsoleops.SerialConsoleOps, - 'start_console_handler') - def test_start_console_handlers(self, mock_start_handler, mock_get_uuid): - fake_inst_names = [mock.sentinel.other_instance, - mock.sentinel.instance_name] - - self._vmutils.get_active_instances.return_value = fake_inst_names - mock_get_uuid.side_effect = [None, mock.sentinel.instance_id] - - self._serialops.start_console_handlers() - - self._serialops._vmutils.get_active_instances.assert_called_once_with() - mock_start_handler.assert_called_once_with(mock.sentinel.instance_name) - mock_get_uuid.assert_has_calls( - [mock.call(instance_name) for instance_name in fake_inst_names]) diff --git a/compute_hyperv/tests/unit/test_serialproxy.py b/compute_hyperv/tests/unit/test_serialproxy.py deleted file mode 100644 index 61b481c2..00000000 --- a/compute_hyperv/tests/unit/test_serialproxy.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket -from unittest import mock - -from nova import exception - -from compute_hyperv.nova import serialproxy -from compute_hyperv.tests.unit import test_base - - -class SerialProxyTestCase(test_base.HyperVBaseTestCase): - @mock.patch.object(socket, 'socket') - def setUp(self, mock_socket): - super(SerialProxyTestCase, self).setUp() - - self._mock_socket = mock_socket - self._mock_input_queue = mock.Mock() - self._mock_output_queue = mock.Mock() - self._mock_client_connected = mock.Mock() - - threading_patcher = mock.patch.object(serialproxy, 'threading') - threading_patcher.start() - self.addCleanup(threading_patcher.stop) - - self._proxy = serialproxy.SerialProxy( - mock.sentinel.instance_nane, - mock.sentinel.host, - mock.sentinel.port, - self._mock_input_queue, - self._mock_output_queue, - self._mock_client_connected) - - @mock.patch.object(socket, 'socket') - def test_setup_socket_exception(self, mock_socket): - fake_socket = mock_socket.return_value - - fake_socket.listen.side_effect = socket.error - - self.assertRaises(exception.NovaException, - self._proxy._setup_socket) - - fake_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, - socket.SO_REUSEADDR, - 1) - fake_socket.bind.assert_called_once_with((mock.sentinel.host, - mock.sentinel.port)) - - def test_stop_serial_proxy(self): - self._proxy._conn = mock.Mock() - self._proxy._sock = mock.Mock() - - self._proxy.stop() - - self._proxy._stopped.set.assert_called_once_with() - self._proxy._client_connected.clear.assert_called_once_with() - self._proxy._conn.shutdown.assert_called_once_with(socket.SHUT_RDWR) - self._proxy._conn.close.assert_called_once_with() - self._proxy._sock.close.assert_called_once_with() - - @mock.patch.object(serialproxy.SerialProxy, '_accept_conn') - @mock.patch.object(serialproxy.SerialProxy, '_setup_socket') - def test_run(self, mock_setup_socket, mock_accept_con): - self._proxy._stopped = mock.MagicMock() - self._proxy._stopped.isSet.side_effect = [False, True] - - self._proxy.run() - - mock_setup_socket.assert_called_once_with() - mock_accept_con.assert_called_once_with() - - def test_accept_connection(self): - mock_conn = mock.Mock() - self._proxy._sock = mock.Mock() - self._proxy._sock.accept.return_value = [ - mock_conn, (mock.sentinel.client_addr, mock.sentinel.client_port)] - - self._proxy._accept_conn() - - self._proxy._client_connected.set.assert_called_once_with() - mock_conn.close.assert_called_once_with() - self.assertIsNone(self._proxy._conn) - - thread = serialproxy.threading.Thread - for job in [self._proxy._get_data, - self._proxy._send_data]: - thread.assert_any_call(target=job) - - def test_get_data(self): - self._mock_client_connected.isSet.return_value = True - self._proxy._conn = mock.Mock() - self._proxy._conn.recv.side_effect = [mock.sentinel.data, None] - - self._proxy._get_data() - - self._mock_client_connected.clear.assert_called_once_with() - self._mock_input_queue.put.assert_called_once_with(mock.sentinel.data) - - def _test_send_data(self, exception=None): - self._mock_client_connected.isSet.side_effect = [True, False] - self._mock_output_queue.get_burst.return_value = mock.sentinel.data - self._proxy._conn = mock.Mock() - self._proxy._conn.sendall.side_effect = exception - - self._proxy._send_data() - - self._proxy._conn.sendall.assert_called_once_with( - mock.sentinel.data) - - if exception: - self._proxy._client_connected.clear.assert_called_once_with() - - def test_send_data(self): - self._test_send_data() - - def test_send_data_exception(self): - self._test_send_data(exception=socket.error) diff --git a/compute_hyperv/tests/unit/test_snapshotops.py b/compute_hyperv/tests/unit/test_snapshotops.py deleted file mode 100644 index 347aabe5..00000000 --- a/compute_hyperv/tests/unit/test_snapshotops.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -from nova.compute import task_states -from nova import exception -from os_win import exceptions as os_win_exc - -from compute_hyperv.nova import snapshotops -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - - -class SnapshotOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V SnapshotOps class.""" - - _autospec_classes = [ - snapshotops.pathutils.PathUtils, - ] - - def setUp(self): - super(SnapshotOpsTestCase, self).setUp() - - self.context = 'fake_context' - self._snapshotops = snapshotops.SnapshotOps() - - self._vhdutils = self._snapshotops._vhdutils - - @mock.patch('nova.image.glance.get_remote_image_service') - def test_save_glance_image(self, mock_get_remote_image_service): - fake_fmt = 'fake_fmt' - image_metadata = {"disk_format": fake_fmt, - "container_format": "bare"} - glance_image_service = mock.MagicMock() - self._vhdutils.get_vhd_format.return_value = fake_fmt.upper() - mock_get_remote_image_service.return_value = (glance_image_service, - mock.sentinel.IMAGE_ID) - - self._snapshotops._save_glance_image(context=self.context, - image_id=mock.sentinel.IMAGE_ID, - image_vhd_path=mock.sentinel.PATH) - - self._vhdutils.get_vhd_format.assert_called_once_with( - mock.sentinel.PATH) - mock_get_remote_image_service.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID) - self._snapshotops._pathutils.open.assert_called_with( - mock.sentinel.PATH, 'rb') - glance_image_service.update.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID, image_metadata, - self._snapshotops._pathutils.open.return_value.__enter__(), - purge_props=False) - - @mock.patch('compute_hyperv.nova.snapshotops.SnapshotOps' - '._save_glance_image') - def _test_snapshot(self, mock_save_glance_image, base_disk_path): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_update = mock.MagicMock() - fake_src_path = os.path.join('fake', 'path') - self._snapshotops._pathutils.lookup_root_vhd_path.return_value = ( - fake_src_path) - fake_exp_dir = os.path.join(os.path.join('fake', 'exp'), 'dir') - self._snapshotops._pathutils.get_export_dir.return_value = fake_exp_dir - self._snapshotops._vhdutils.get_vhd_parent_path.return_value = ( - base_disk_path) - fake_snapshot_path = ( - self._snapshotops._vmutils.take_vm_snapshot.return_value) - - self._snapshotops.snapshot(context=self.context, - instance=mock_instance, - image_id=mock.sentinel.IMAGE_ID, - update_task_state=mock_update) - - self._snapshotops._vmutils.take_vm_snapshot.assert_called_once_with( - mock_instance.name) - mock_lookup_path = self._snapshotops._pathutils.lookup_root_vhd_path - mock_lookup_path.assert_called_once_with(mock_instance.name) - mock_get_vhd_path = self._snapshotops._vhdutils.get_vhd_parent_path - mock_get_vhd_path.assert_called_once_with(fake_src_path) - self._snapshotops._pathutils.get_export_dir.assert_called_once_with( - mock_instance.name, create_dir=True, remove_dir=True) - - expected = [mock.call(fake_src_path, - os.path.join(fake_exp_dir, - os.path.basename(fake_src_path)))] - dest_vhd_path = os.path.join(fake_exp_dir, - os.path.basename(fake_src_path)) - if base_disk_path: - basename = os.path.basename(base_disk_path) - base_dest_disk_path = os.path.join(fake_exp_dir, basename) - expected.append(mock.call(base_disk_path, base_dest_disk_path)) - mock_reconnect = self._snapshotops._vhdutils.reconnect_parent_vhd - mock_reconnect.assert_called_once_with(dest_vhd_path, - base_dest_disk_path) - self._snapshotops._vhdutils.merge_vhd.assert_called_once_with( - dest_vhd_path) - mock_save_glance_image.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID, base_dest_disk_path) - else: - mock_save_glance_image.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID, dest_vhd_path) - self._snapshotops._pathutils.copyfile.has_calls(expected) - expected_update = [ - mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), - mock.call(task_state=task_states.IMAGE_UPLOADING, - expected_state=task_states.IMAGE_PENDING_UPLOAD)] - mock_update.has_calls(expected_update) - self._snapshotops._vmutils.remove_vm_snapshot.assert_called_once_with( - fake_snapshot_path) - self._snapshotops._pathutils.rmtree.assert_called_once_with( - fake_exp_dir) - - def test_snapshot(self): - base_disk_path = os.path.join('fake', 'disk') - self._test_snapshot(base_disk_path=base_disk_path) - - def test_snapshot_no_base_disk(self): - self._test_snapshot(base_disk_path=None) - - @mock.patch.object(snapshotops.SnapshotOps, '_snapshot') - def test_snapshot_instance_not_found(self, mock_snapshot): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_snapshot.side_effect = os_win_exc.HyperVVMNotFoundException( - vm_name=mock_instance.name) - - self.assertRaises(exception.InstanceNotFound, - self._snapshotops.snapshot, - self.context, mock_instance, mock.sentinel.image_id, - mock.sentinel.update_task_state) - - mock_snapshot.assert_called_once_with(self.context, mock_instance, - mock.sentinel.image_id, - mock.sentinel.update_task_state) diff --git a/compute_hyperv/tests/unit/test_vif.py b/compute_hyperv/tests/unit/test_vif.py deleted file mode 100644 index b808315a..00000000 --- a/compute_hyperv/tests/unit/test_vif.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -import ddt -from nova import exception -from nova.network import model -from os_win import constants as os_win_const - -import compute_hyperv.nova.conf -from compute_hyperv.nova import vif -from compute_hyperv.tests.unit import test_base - - -CONF = compute_hyperv.nova.conf.CONF - - -class HyperVNovaNetworkVIFPluginTestCase(test_base.HyperVBaseTestCase): - def setUp(self): - super(HyperVNovaNetworkVIFPluginTestCase, self).setUp() - self.vif_driver = vif.HyperVNovaNetworkVIFPlugin() - - def test_plug(self): - self.flags(vswitch_name='fake_vswitch_name', group='hyperv') - fake_vif = {'id': mock.sentinel.fake_id} - - self.vif_driver.plug(mock.sentinel.instance, fake_vif) - netutils = self.vif_driver._netutils - netutils.connect_vnic_to_vswitch.assert_called_once_with( - 'fake_vswitch_name', mock.sentinel.fake_id) - - -@ddt.ddt -class HyperVVIFDriverTestCase(test_base.HyperVBaseTestCase): - def setUp(self): - super(HyperVVIFDriverTestCase, self).setUp() - self.vif_driver = vif.HyperVVIFDriver() - self.vif_driver._vif_plugin = mock.MagicMock() - - self._netutils = self.vif_driver._netutils - self._vmutils = self.vif_driver._vmutils - self._metricsutils = self.vif_driver._metricsutils - - def test_plug(self): - vif = {'type': model.VIF_TYPE_HYPERV} - self.vif_driver.plug(mock.sentinel.instance, vif) - - self.vif_driver._vif_plugin.plug.assert_called_once_with( - mock.sentinel.instance, vif) - - @mock.patch.object(vif, 'os_vif') - @mock.patch.object(vif.HyperVVIFDriver, 'enable_metrics') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_instance') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_vif') - def test_plug_ovs(self, mock_nova_to_osvif_vif, - mock_nova_to_osvif_instance, - mock_enable_metrics, mock_os_vif): - vif = {'type': model.VIF_TYPE_OVS} - self.assertRaises(exception.VirtualInterfacePlugException, - self.vif_driver.plug, - mock.sentinel.instance, vif) - - @ddt.data(True, False) - def test_enable_metrics(self, vm_running): - state = (os_win_const.HYPERV_VM_STATE_ENABLED if vm_running - else os_win_const.HYPERV_VM_STATE_DISABLED) - self._vmutils.get_vm_state.return_value = state - - enable_metrics = self._metricsutils.enable_port_metrics_collection - - self.vif_driver.enable_metrics(mock.sentinel.instance_name, - mock.sentinel.vif_id) - - self._vmutils.get_vm_state.assert_called_once_with( - mock.sentinel.instance_name) - if vm_running: - enable_metrics.assert_called_once_with(mock.sentinel.vif_id) - else: - enable_metrics.assert_not_called() - - def test_plug_type_unknown(self): - vif = {'type': mock.sentinel.vif_type} - self.assertRaises(exception.VirtualInterfacePlugException, - self.vif_driver.plug, - mock.sentinel.instance, vif) - - def test_unplug(self): - vif = {'type': model.VIF_TYPE_HYPERV} - self.vif_driver.unplug(mock.sentinel.instance, vif) - - self.vif_driver._vif_plugin.unplug.assert_called_once_with( - mock.sentinel.instance, vif) - - @mock.patch.object(vif, 'os_vif') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_instance') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_vif') - def test_unplug_ovs(self, mock_nova_to_osvif_vif, - mock_nova_to_osvif_instance, mock_os_vif): - vif = {'type': model.VIF_TYPE_OVS} - self.vif_driver.unplug(mock.sentinel.instance, vif) - - mock_nova_to_osvif_vif.assert_called_once_with(vif) - mock_nova_to_osvif_instance.assert_called_once_with( - mock.sentinel.instance) - mock_os_vif.unplug.assert_called_once_with( - mock_nova_to_osvif_vif.return_value, - mock_nova_to_osvif_instance.return_value) - - def test_unplug_type_unknown(self): - vif = {'type': mock.sentinel.vif_type} - self.assertRaises(exception.VirtualInterfaceUnplugException, - self.vif_driver.unplug, - mock.sentinel.instance, vif) diff --git a/compute_hyperv/tests/unit/test_vmops.py b/compute_hyperv/tests/unit/test_vmops.py deleted file mode 100644 index 613a814b..00000000 --- a/compute_hyperv/tests/unit/test_vmops.py +++ /dev/null @@ -1,2491 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -import ddt -from eventlet import timeout as etimeout -from nova.compute import task_states -from nova.compute import vm_states -from nova import exception -from nova import objects -from nova.objects import fields -from nova.tests.unit.objects import test_virtual_interface -from nova import utils -from nova.virt import event as virtevent -from nova.virt import hardware -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc -from oslo_concurrency import processutils -from oslo_utils import fileutils -from oslo_utils import units - -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import eventhandler -from compute_hyperv.nova import vmops -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - -CONF = compute_hyperv.nova.conf.CONF - - -@ddt.ddt -class VMOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V VMOps class.""" - - _autospec_classes = [ - vmops.pathutils.PathUtils, - vmops.volumeops.VolumeOps, - vmops.imagecache.ImageCache, - vmops.serialconsoleops.SerialConsoleOps, - vmops.block_device_manager.BlockDeviceInfoManager, - vmops.vif_utils.HyperVVIFDriver, - vmops.pdk.PDK, - ] - - _FAKE_TIMEOUT = 2 - FAKE_SIZE = 10 - FAKE_DIR = 'fake_dir' - FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s' - FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso' - FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd' - FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3' - FAKE_LOG = 'fake_log' - _FAKE_PDK_FILE_PATH = 'C:\\path\\to\\fakepdk.pdk' - _FAKE_FSK_FILE_PATH = 'C:\\path\\to\\fakefsk.fsk' - - _WIN_VERSION_6_3 = '6.3.0' - _WIN_VERSION_10 = '10.0' - - ISO9660 = 'iso9660' - VFAT = 'vfat' - _FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd' - - def setUp(self): - super(VMOpsTestCase, self).setUp() - self.context = 'fake-context' - - self._vmops = vmops.VMOps(virtapi=mock.MagicMock()) - self._pathutils = self._vmops._pathutils - self._vmutils = self._vmops._vmutils - self._metricsutils = self._vmops._metricsutils - self._vif_driver = self._vmops._vif_driver - - def test_list_instances(self): - mock_instance = mock.MagicMock() - self._vmops._vmutils.list_instances.return_value = [mock_instance] - response = self._vmops.list_instances() - self._vmops._vmutils.list_instances.assert_called_once_with() - self.assertEqual(response, [mock_instance]) - - def _test_get_info(self, vm_exists): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_info = mock.MagicMock(spec_set=dict) - fake_info = {'EnabledState': 2, - 'MemoryUsage': mock.sentinel.FAKE_MEM_KB, - 'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU, - 'UpTime': mock.sentinel.FAKE_CPU_NS} - - def getitem(key): - return fake_info[key] - mock_info.__getitem__.side_effect = getitem - - expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2]) - - self._vmops._vmutils.vm_exists.return_value = vm_exists - self._vmops._vmutils.get_vm_summary_info.return_value = mock_info - - if not vm_exists: - self.assertRaises(exception.InstanceNotFound, - self._vmops.get_info, mock_instance) - else: - response = self._vmops.get_info(mock_instance) - self._vmops._vmutils.vm_exists.assert_called_once_with( - mock_instance.name) - self._vmops._vmutils.get_vm_summary_info.assert_called_once_with( - mock_instance.name) - self.assertEqual(response, expected) - - def test_get_info(self): - self._test_get_info(vm_exists=True) - - def test_get_info_exception(self): - self._test_get_info(vm_exists=False) - - @mock.patch.object(vmops.VMOps, 'check_vm_image_type') - @mock.patch.object(vmops.VMOps, '_create_root_vhd') - def test_create_root_device_type_disk(self, mock_create_root_device, - mock_check_vm_image_type): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_root_disk_info = {'type': constants.DISK} - - self._vmops._create_root_device(self.context, mock_instance, - mock_root_disk_info, - mock.sentinel.VM_GEN_1) - - mock_create_root_device.assert_called_once_with( - self.context, mock_instance) - mock_check_vm_image_type.assert_called_once_with( - mock_instance.uuid, mock.sentinel.VM_GEN_1, - mock_create_root_device.return_value) - - @mock.patch.object(vmops.VMOps, '_create_root_iso') - def test_create_root_device_type_iso(self, mock_create_root_iso): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_root_disk_info = {'type': constants.DVD} - - self._vmops._create_root_device(self.context, mock_instance, - mock_root_disk_info, - mock.sentinel.VM_GEN_1) - - mock_create_root_iso.assert_called_once_with(self.context, - mock_instance) - - @mock.patch('os.path.exists') - def _test_create_root_iso(self, mock_os_path_exists, - iso_already_exists=False): - mock_instance = fake_instance.fake_instance_obj(self.context) - - mock_get_root_vhd_path = self._vmops._pathutils.get_root_vhd_path - mock_get_root_vhd_path.return_value = mock.sentinel.ROOT_ISO_PATH - mock_get_cached_image = self._vmops._imagecache.get_cached_image - mock_get_cached_image.return_value = mock.sentinel.CACHED_ISO_PATH - mock_os_path_exists.return_value = iso_already_exists - - self._vmops._create_root_iso(self.context, mock_instance) - - mock_get_cached_image.assert_called_once_with(self.context, - mock_instance) - mock_get_root_vhd_path.assert_called_once_with(mock_instance.name, - 'iso') - if not iso_already_exists: - self._vmops._pathutils.copyfile.assert_called_once_with( - mock.sentinel.CACHED_ISO_PATH, mock.sentinel.ROOT_ISO_PATH) - else: - self._vmops._pathutils.copyfile.assert_not_called() - - def test_create_root_iso(self): - self._test_create_root_iso() - - def test_create_root_iso_already_existing_image(self): - self._test_create_root_iso(iso_already_exists=True) - - def _prepare_create_root_device_mocks(self, use_cow_images, vhd_format, - vhd_size): - mock_instance = fake_instance.fake_instance_obj( - self.context, - expected_attrs=['trusted_certs']) - mock_instance.flavor.root_gb = self.FAKE_SIZE - self.flags(use_cow_images=use_cow_images) - self._vmops._vhdutils.get_vhd_info.return_value = {'VirtualSize': - vhd_size * units.Gi} - self._vmops._vhdutils.get_vhd_format.return_value = vhd_format - root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi - get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size - get_size.return_value = root_vhd_internal_size - self._vmops._pathutils.exists.return_value = True - - return mock_instance - - @mock.patch('os.path.exists') - def _test_create_root_vhd_exception(self, mock_os_path_exists, vhd_format): - mock_instance = self._prepare_create_root_device_mocks( - use_cow_images=False, vhd_format=vhd_format, - vhd_size=(self.FAKE_SIZE + 1)) - fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format - self._vmops._imagecache.get_cached_image.return_value = fake_vhd_path - fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value - mock_os_path_exists.return_value = False - - self.assertRaises(exception.FlavorDiskSmallerThanImage, - self._vmops._create_root_vhd, self.context, - mock_instance) - - self.assertFalse(self._vmops._vhdutils.resize_vhd.called) - self._vmops._pathutils.exists.assert_called_once_with( - fake_root_path) - self._vmops._pathutils.remove.assert_called_once_with( - fake_root_path) - - @mock.patch('os.path.exists') - def _test_create_root_vhd_qcow(self, mock_os_path_exists, vhd_format, - vhd_already_exists=False): - mock_instance = self._prepare_create_root_device_mocks( - use_cow_images=True, vhd_format=vhd_format, - vhd_size=(self.FAKE_SIZE - 1)) - fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format - self._vmops._imagecache.get_cached_image.return_value = fake_vhd_path - mock_os_path_exists.return_value = vhd_already_exists - - fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value - root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi - get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size - - response = self._vmops._create_root_vhd(context=self.context, - instance=mock_instance) - - self.assertEqual(fake_root_path, response) - self._vmops._pathutils.get_root_vhd_path.assert_called_with( - mock_instance.name, vhd_format, False) - - differencing_vhd = self._vmops._vhdutils.create_differencing_vhd - - if not vhd_already_exists: - differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path) - - if vhd_format is constants.DISK_FORMAT_VHD: - self.assertFalse(get_size.called) - self.assertFalse(self._vmops._vhdutils.resize_vhd.called) - else: - self._vmops._vhdutils.get_vhd_info.assert_called_once_with( - fake_root_path) - get_size.assert_called_once_with(fake_vhd_path, - root_vhd_internal_size) - self._vmops._vhdutils.resize_vhd.assert_called_once_with( - fake_root_path, root_vhd_internal_size, - is_file_max_size=False) - else: - differencing_vhd.assert_not_called() - self._vmops._vhdutils.resize_vhd.assert_not_called() - - @mock.patch('os.path.exists') - def _test_create_root_vhd(self, mock_os_path_exists, - vhd_format, is_rescue_vhd=False, - vhd_already_exists=False): - mock_instance = self._prepare_create_root_device_mocks( - use_cow_images=False, vhd_format=vhd_format, - vhd_size=(self.FAKE_SIZE - 1)) - fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format - mock_get_cached_image = self._vmops._imagecache.get_cached_image - mock_get_cached_image.return_value = fake_vhd_path - rescue_image_id = ( - mock.sentinel.rescue_image_id if is_rescue_vhd else None) - - fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value - root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi - get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size - mock_os_path_exists.return_value = vhd_already_exists - - response = self._vmops._create_root_vhd( - context=self.context, - instance=mock_instance, - rescue_image_id=rescue_image_id) - - self.assertEqual(fake_root_path, response) - mock_get_cached_image.assert_called_once_with(self.context, - mock_instance, - rescue_image_id) - self._vmops._pathutils.get_root_vhd_path.assert_called_with( - mock_instance.name, vhd_format, is_rescue_vhd) - - if not vhd_already_exists: - self._vmops._pathutils.copyfile.assert_called_once_with( - fake_vhd_path, fake_root_path) - get_size.assert_called_once_with(fake_vhd_path, - root_vhd_internal_size) - - if is_rescue_vhd: - self.assertFalse(self._vmops._vhdutils.resize_vhd.called) - else: - self._vmops._vhdutils.resize_vhd.assert_called_once_with( - fake_root_path, root_vhd_internal_size, - is_file_max_size=False) - else: - self._vmops._pathutils.copyfile.assert_not_called() - - def test_create_root_vhd(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD) - - def test_create_root_vhdx(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX) - - def test_create_root_vhd_existing_disk(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD, - vhd_already_exists=True) - - def test_create_root_vhdx_existing_disk(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX, - vhd_already_exists=True) - - def test_create_root_vhd_use_cow_images_true(self): - self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD) - - def test_create_root_vhdx_use_cow_images_true(self): - self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX) - - def test_create_root_vhd_use_already_existing_cow_images(self): - self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD, - vhd_already_exists=True) - - def test_create_root_vhdx_use_already_existing_cow_images(self): - self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX, - vhd_already_exists=True) - - def test_create_rescue_vhd(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD, - is_rescue_vhd=True) - - def test_create_root_vhdx_size_less_than_internal(self): - self._test_create_root_vhd_exception( - vhd_format=constants.DISK_FORMAT_VHD) - - def test_create_uncached_root_vhd(self): - mock_instance = self._prepare_create_root_device_mocks( - use_cow_images=False, vhd_format=constants.DISK_FORMAT_VHDX, - vhd_size=(self.FAKE_SIZE - 1)) - mock_instance.vm_state = vm_states.SHELVED_OFFLOADED - - base_root_path = 'root' - exp_root_path = 'root.vhdx' - self._vmops._pathutils.get_root_vhd_path.return_value = base_root_path - self._vmops._imagecache.append_image_format.return_value = ( - exp_root_path) - root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi - get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size - - ret_val = self._vmops._create_root_vhd( - context=self.context, - instance=mock_instance) - - self.assertEqual(exp_root_path, ret_val) - self._vmops._imagecache.get_cached_image.assert_not_called() - self._vmops._pathutils.get_root_vhd_path.assert_called_with( - mock_instance.name, None, False) - - self._vmops._imagecache.fetch.assert_called_once_with( - self.context, mock_instance.image_ref, base_root_path, - mock_instance.trusted_certs) - self._vmops._imagecache.get_image_format.assert_called_once_with( - self.context, mock_instance.image_ref, mock_instance) - self._vmops._imagecache.append_image_format.assert_called_once_with( - base_root_path, - self._vmops._imagecache.get_image_format.return_value) - - get_size.assert_called_once_with(exp_root_path, - root_vhd_internal_size) - self._vmops._vhdutils.resize_vhd.assert_called_once_with( - exp_root_path, root_vhd_internal_size, - is_file_max_size=False) - - def test_is_resize_needed_exception(self): - inst = mock.MagicMock() - self.assertRaises( - exception.FlavorDiskSmallerThanImage, - self._vmops._is_resize_needed, - mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst) - - def test_is_resize_needed_true(self): - inst = mock.MagicMock() - self.assertTrue(self._vmops._is_resize_needed( - mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst)) - - def test_is_resize_needed_false(self): - inst = mock.MagicMock() - self.assertFalse(self._vmops._is_resize_needed( - mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst)) - - @mock.patch.object(vmops.VMOps, 'create_ephemeral_disk') - def test_create_ephemerals(self, mock_create_ephemeral_disk): - mock_instance = fake_instance.fake_instance_obj(self.context) - - fake_ephemerals = [dict(), dict()] - self._vmops._vhdutils.get_best_supported_vhd_format.return_value = ( - mock.sentinel.format) - self._vmops._pathutils.get_ephemeral_vhd_path.side_effect = [ - mock.sentinel.FAKE_PATH0, mock.sentinel.FAKE_PATH1] - - self._vmops._create_ephemerals(mock_instance, fake_ephemerals) - - self._vmops._pathutils.get_ephemeral_vhd_path.assert_has_calls( - [mock.call(mock_instance.name, mock.sentinel.format, 'eph0'), - mock.call(mock_instance.name, mock.sentinel.format, 'eph1')]) - mock_create_ephemeral_disk.assert_has_calls( - [mock.call(mock_instance.name, fake_ephemerals[0]), - mock.call(mock_instance.name, fake_ephemerals[1])]) - - def test_create_ephemeral_disk(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_ephemeral_info = {'path': 'fake_eph_path', - 'size': 10} - - self._vmops.create_ephemeral_disk(mock_instance.name, - mock_ephemeral_info) - - mock_create_dynamic_vhd = self._vmops._vhdutils.create_dynamic_vhd - mock_create_dynamic_vhd.assert_called_once_with('fake_eph_path', - 10 * units.Gi) - - def test_get_attached_ephemeral_disks(self): - ephemeral_disks = [os.path.join('image_dir', img_name) - for img_name in ['eph0.vhdx', 'eph1.vhdx']] - image_disks = ephemeral_disks + [ - os.path.join('image_dir', 'root.vhdx')] - - self._vmutils.get_vm_storage_paths.return_value = ( - image_disks, mock.sentinel.passthrough_disks) - - ret_val = self._vmops.get_attached_ephemeral_disks( - mock.sentinel.instance_name) - - self.assertEqual(ephemeral_disks, ret_val) - self._vmutils.get_vm_storage_paths.assert_called_once_with( - mock.sentinel.instance_name) - - @mock.patch.object(vmops.objects, 'PCIDeviceBus') - @mock.patch.object(vmops.objects, 'NetworkInterfaceMetadata') - @mock.patch.object(vmops.objects.VirtualInterfaceList, - 'get_by_instance_uuid') - def test_get_vif_metadata(self, mock_get_by_inst_uuid, - mock_NetworkInterfaceMetadata, mock_PCIDevBus): - mock_vif = mock.MagicMock(tag='taggy') - mock_vif.__contains__.side_effect = ( - lambda attr: getattr(mock_vif, attr, None) is not None) - mock_get_by_inst_uuid.return_value = [mock_vif, - mock.MagicMock(tag=None)] - - vif_metadata = self._vmops._get_vif_metadata(self.context, - mock.sentinel.instance_id) - - mock_get_by_inst_uuid.assert_called_once_with( - self.context, mock.sentinel.instance_id) - mock_NetworkInterfaceMetadata.assert_called_once_with( - mac=mock_vif.address, - bus=mock_PCIDevBus.return_value, - tags=[mock_vif.tag]) - self.assertEqual([mock_NetworkInterfaceMetadata.return_value], - vif_metadata) - - @mock.patch.object(vmops.objects, 'InstanceDeviceMetadata') - @mock.patch.object(vmops.VMOps, '_get_vif_metadata') - def test_update_device_metadata(self, mock_get_vif_metadata, - mock_InstanceDeviceMetadata): - mock_instance = mock.MagicMock() - mock_get_vif_metadata.return_value = [mock.sentinel.vif_metadata] - self._vmops._block_dev_man.get_bdm_metadata.return_value = [ - mock.sentinel.bdm_metadata] - - self._vmops.update_device_metadata(self.context, mock_instance) - - mock_get_vif_metadata.assert_called_once_with(self.context, - mock_instance.uuid) - self._vmops._block_dev_man.get_bdm_metadata.assert_called_once_with( - self.context, mock_instance) - - expected_metadata = [mock.sentinel.vif_metadata, - mock.sentinel.bdm_metadata] - mock_InstanceDeviceMetadata.assert_called_once_with( - devices=expected_metadata) - self.assertEqual(mock_InstanceDeviceMetadata.return_value, - mock_instance.device_metadata) - - def test_set_boot_order(self): - self._vmops.set_boot_order(mock.sentinel.instance_name, - mock.sentinel.vm_gen, - mock.sentinel.bdi) - - mock_get_boot_order = self._vmops._block_dev_man.get_boot_order - mock_get_boot_order.assert_called_once_with( - mock.sentinel.vm_gen, mock.sentinel.bdi) - self._vmops._vmutils.set_boot_order.assert_called_once_with( - mock.sentinel.instance_name, mock_get_boot_order.return_value) - - @mock.patch.object(vmops.VMOps, 'plug_vifs') - @mock.patch('compute_hyperv.nova.vmops.VMOps.destroy') - @mock.patch('compute_hyperv.nova.vmops.VMOps.power_on') - @mock.patch.object(vmops.VMOps, 'set_boot_order') - @mock.patch('compute_hyperv.nova.vmops.VMOps.attach_config_drive') - @mock.patch('compute_hyperv.nova.vmops.VMOps._create_config_drive') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('compute_hyperv.nova.vmops.VMOps.update_device_metadata') - @mock.patch('compute_hyperv.nova.vmops.VMOps.create_instance') - @mock.patch('compute_hyperv.nova.vmops.VMOps.get_image_vm_generation') - @mock.patch('compute_hyperv.nova.vmops.VMOps._create_ephemerals') - @mock.patch('compute_hyperv.nova.vmops.VMOps._create_root_device') - @mock.patch('compute_hyperv.nova.vmops.VMOps._delete_disk_files') - @mock.patch('compute_hyperv.nova.vmops.VMOps._get_neutron_events', - return_value=[]) - def _test_spawn(self, mock_get_neutron_events, - mock_delete_disk_files, - mock_create_root_device, - mock_create_ephemerals, mock_get_image_vm_gen, - mock_create_instance, mock_update_device_metadata, - mock_configdrive_required, - mock_create_config_drive, mock_attach_config_drive, - mock_set_boot_order, - mock_power_on, mock_destroy, mock_plug_vifs, - exists=False, configdrive_required=True, fail=None, - fake_vm_gen=constants.VM_GEN_2, - power_on=True): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_image_meta = mock.MagicMock() - root_device_info = mock.sentinel.ROOT_DEV_INFO - mock_get_image_vm_gen.return_value = fake_vm_gen - fake_config_drive_path = mock_create_config_drive.return_value - block_device_info = {'ephemerals': [], 'root_disk': root_device_info} - - self._vmops._pathutils.get_instance_dir.return_value = ( - 'fake-instance-dir') - self._vmops._vmutils.vm_exists.return_value = exists - mock_configdrive_required.return_value = configdrive_required - mock_create_instance.side_effect = fail - if exists: - self.assertRaises(exception.InstanceExists, self._vmops.spawn, - self.context, mock_instance, mock_image_meta, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.network_info, block_device_info) - elif fail is os_win_exc.HyperVException: - self.assertRaises(os_win_exc.HyperVException, self._vmops.spawn, - self.context, mock_instance, mock_image_meta, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.network_info, block_device_info) - mock_destroy.assert_called_once_with(mock_instance, - mock.sentinel.network_info, - block_device_info) - else: - self._vmops.spawn(self.context, mock_instance, mock_image_meta, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.network_info, block_device_info, - power_on=power_on) - self._vmops._vmutils.vm_exists.assert_called_once_with( - mock_instance.name) - self._vmops._pathutils.get_instance_dir.assert_called_once_with( - mock_instance.name, create_dir=False) - mock_validate_and_update_bdi = ( - self._vmops._block_dev_man.validate_and_update_bdi) - mock_validate_and_update_bdi.assert_called_once_with( - mock_instance, mock_image_meta, fake_vm_gen, block_device_info) - mock_create_root_device.assert_called_once_with(self.context, - mock_instance, - root_device_info, - fake_vm_gen) - mock_create_ephemerals.assert_called_once_with( - mock_instance, block_device_info['ephemerals']) - mock_get_neutron_events.assert_called_once_with( - mock.sentinel.network_info) - mock_get_image_vm_gen.assert_called_once_with(mock_instance.uuid, - mock_image_meta) - mock_create_instance.assert_called_once_with( - self.context, mock_instance, mock.sentinel.network_info, - block_device_info, fake_vm_gen, mock_image_meta) - mock_plug_vifs.assert_called_once_with(mock_instance, - mock.sentinel.network_info) - mock_update_device_metadata.assert_called_once_with( - self.context, mock_instance) - mock_configdrive_required.assert_called_once_with(mock_instance) - if configdrive_required: - mock_create_config_drive.assert_called_once_with( - self.context, mock_instance, [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.network_info) - mock_attach_config_drive.assert_called_once_with( - mock_instance, fake_config_drive_path, fake_vm_gen) - mock_set_boot_order.assert_called_once_with( - mock_instance.name, fake_vm_gen, block_device_info) - if power_on: - mock_power_on.assert_called_once_with( - mock_instance, - network_info=mock.sentinel.network_info, - should_plug_vifs=False) - else: - mock_power_on.assert_not_called() - - def test_spawn(self): - self._test_spawn() - - def test_spawn_instance_exists(self): - self._test_spawn(exists=True) - - def test_spawn_create_instance_exception(self): - self._test_spawn(fail=os_win_exc.HyperVException) - - def test_spawn_cfgdrive_not_required(self): - self._test_spawn(configdrive_required=False) - - def test_spawn_without_powering_on(self): - self._test_spawn(power_on=False) - - def test_spawn_no_admin_permissions(self): - self._vmops._vmutils.check_admin_permissions.side_effect = ( - os_win_exc.HyperVException) - self.assertRaises(os_win_exc.HyperVException, - self._vmops.spawn, - self.context, mock.DEFAULT, mock.DEFAULT, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.INFO, mock.sentinel.DEV_INFO) - - @mock.patch.object(vmops.VMOps, '_get_neutron_events') - def test_wait_vif_plug_events(self, mock_get_events): - self._vmops._virtapi.wait_for_instance_event.side_effect = ( - etimeout.Timeout) - self.flags(vif_plugging_timeout=1) - self.flags(vif_plugging_is_fatal=True) - - def _context_user(): - with self._vmops.wait_vif_plug_events(mock.sentinel.instance, - mock.sentinel.network_info): - pass - - self.assertRaises(exception.VirtualInterfaceCreateException, - _context_user) - - mock_get_events.assert_called_once_with(mock.sentinel.network_info) - self._vmops._virtapi.wait_for_instance_event.assert_called_once_with( - mock.sentinel.instance, mock_get_events.return_value, - deadline=CONF.vif_plugging_timeout, - error_callback=self._vmops._neutron_failed_callback) - - @mock.patch.object(vmops.VMOps, '_get_neutron_events') - def test_wait_vif_plug_events_port_binding_failed(self, mock_get_events): - mock_get_events.side_effect = exception.PortBindingFailed( - port_id='fake_id') - - def _context_user(): - with self._vmops.wait_vif_plug_events(mock.sentinel.instance, - mock.sentinel.network_info): - pass - - self.assertRaises(exception.PortBindingFailed, _context_user) - - def test_neutron_failed_callback(self): - self.flags(vif_plugging_is_fatal=True) - self.assertRaises(exception.VirtualInterfaceCreateException, - self._vmops._neutron_failed_callback, - mock.sentinel.event_name, mock.sentinel.instance) - - def test_get_neutron_events(self): - network_info = [{'id': mock.sentinel.vif_id1, 'active': True}, - {'id': mock.sentinel.vif_id2, 'active': False}, - {'id': mock.sentinel.vif_id3}] - - events = self._vmops._get_neutron_events(network_info) - self.assertEqual([('network-vif-plugged', mock.sentinel.vif_id2)], - events) - - def test_get_neutron_events_no_timeout(self): - self.flags(vif_plugging_timeout=0) - network_info = [{'id': mock.sentinel.vif_id1, 'active': True}] - - events = self._vmops._get_neutron_events(network_info) - self.assertEqual([], events) - - @mock.patch.object(vmops.VMOps, 'configure_instance_metrics') - @mock.patch.object(vmops.VMOps, 'update_vm_resources') - @mock.patch.object(vmops.VMOps, '_configure_secure_vm') - @mock.patch.object(vmops.VMOps, '_requires_secure_boot') - @mock.patch.object(vmops.VMOps, '_requires_certificate') - @mock.patch.object(vmops.VMOps, '_get_instance_vnuma_config') - @mock.patch.object(vmops.VMOps, '_attach_root_device') - @mock.patch.object(vmops.VMOps, 'configure_remotefx') - @mock.patch.object(vmops.VMOps, '_get_image_serial_port_settings') - @mock.patch.object(vmops.VMOps, '_create_vm_com_port_pipes') - @mock.patch.object(vmops.VMOps, 'attach_ephemerals') - def test_create_instance(self, mock_attach_ephemerals, - mock_create_pipes, - mock_get_port_settings, - mock_configure_remotefx, - mock_attach_root_device, - mock_get_vnuma_config, - mock_requires_certificate, - mock_requires_secure_boot, - mock_configure_secure_vm, - mock_update_vm_resources, - mock_configure_metrics): - root_device_info = mock.sentinel.ROOT_DEV_INFO - block_device_info = {'root_disk': root_device_info, 'ephemerals': [], - 'block_device_mapping': []} - fake_network_info = {'id': mock.sentinel.ID, - 'address': mock.sentinel.ADDRESS} - mock_instance = fake_instance.fake_instance_obj(self.context) - instance_path = os.path.join(CONF.instances_path, mock_instance.name) - - mock_get_vnuma_config.return_value = (mock.sentinel.mem_per_numa_node, - mock.sentinel.vnuma_cpus) - - self._vmops.create_instance(context=self.context, - instance=mock_instance, - network_info=[fake_network_info], - block_device_info=block_device_info, - vm_gen=mock.sentinel.vm_gen, - image_meta=mock.sentinel.image_meta) - - mock_get_vnuma_config.assert_called_once_with(mock_instance, - mock.sentinel.image_meta) - self._vmops._vmutils.create_vm.assert_called_once_with( - mock_instance.name, True, mock.sentinel.vm_gen, - instance_path, [mock_instance.uuid]) - - mock_configure_remotefx.assert_called_once_with( - mock_instance, mock.sentinel.vm_gen) - - mock_create_scsi_ctrl = self._vmops._vmutils.create_scsi_controller - mock_create_scsi_ctrl.assert_called_once_with(mock_instance.name) - - mock_attach_root_device.assert_called_once_with( - self.context, mock_instance, root_device_info) - mock_attach_ephemerals.assert_called_once_with(mock_instance.name, - block_device_info['ephemerals']) - self._vmops._volumeops.attach_volumes.assert_called_once_with( - self.context, block_device_info['block_device_mapping'], - mock_instance) - - mock_get_port_settings.assert_called_with(mock.sentinel.image_meta) - mock_create_pipes.assert_called_once_with( - mock_instance, mock_get_port_settings.return_value) - - self._vmops._vmutils.create_nic.assert_called_once_with( - mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS) - mock_configure_metrics.assert_called_once_with(mock_instance.name) - mock_requires_secure_boot.assert_called_once_with( - mock_instance, mock.sentinel.image_meta, mock.sentinel.vm_gen) - mock_requires_certificate.assert_called_once_with( - mock.sentinel.image_meta) - enable_secure_boot = self._vmops._vmutils.enable_secure_boot - enable_secure_boot.assert_called_once_with( - mock_instance.name, - msft_ca_required=mock_requires_certificate.return_value) - mock_configure_secure_vm.assert_called_once_with(self.context, - mock_instance, mock.sentinel.image_meta, - mock_requires_secure_boot.return_value) - mock_update_vm_resources.assert_called_once_with( - mock_instance, mock.sentinel.vm_gen, mock.sentinel.image_meta) - - @mock.patch.object(vmops.version, 'product_string') - @mock.patch.object(vmops.VMOps, '_attach_pci_devices') - @mock.patch.object(vmops.VMOps, '_set_instance_disk_qos_specs') - @mock.patch.object(vmops.VMOps, '_get_instance_dynamic_memory_ratio') - @mock.patch.object(vmops.VMOps, '_requires_nested_virt') - @mock.patch.object(vmops.VMOps, '_get_instance_vnuma_config') - def _check_update_vm_resources(self, mock_get_vnuma_config, - mock_requires_nested_virt, - mock_get_dynamic_memory_ratio, - mock_set_qos_specs, - mock_attach_pci_devices, - mock_product_string, - pci_requests=None, - instance_automatic_shutdown=False): - self.flags(instance_automatic_shutdown=instance_automatic_shutdown, - group='hyperv') - - mock_get_vnuma_config.return_value = (mock.sentinel.mem_per_numa_node, - mock.sentinel.vnuma_cpus) - dynamic_memory_ratio = mock_get_dynamic_memory_ratio.return_value - mock_instance = fake_instance.fake_instance_obj(self.context) - - instance_pci_requests = objects.InstancePCIRequests( - requests=pci_requests or [], instance_uuid=mock_instance.uuid) - mock_instance.pci_requests = instance_pci_requests - host_shutdown_action = (os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN - if pci_requests or - instance_automatic_shutdown - else None) - - self._vmops.update_vm_resources(mock_instance, mock.sentinel.vm_gen, - mock.sentinel.image_meta, - mock.sentinel.instance_path, - mock.sentinel.is_resize) - - mock_get_vnuma_config.assert_called_once_with(mock_instance, - mock.sentinel.image_meta) - mock_requires_nested_virt.assert_called_once_with( - mock_instance, mock.sentinel.image_meta) - mock_get_dynamic_memory_ratio.assert_called_once_with( - mock_instance, True, mock_requires_nested_virt.return_value) - self._vmops._vmutils.update_vm.assert_called_once_with( - mock_instance.name, mock_instance.flavor.memory_mb, - mock.sentinel.mem_per_numa_node, mock_instance.flavor.vcpus, - mock.sentinel.vnuma_cpus, CONF.hyperv.limit_cpu_features, - dynamic_memory_ratio, - configuration_root_dir=mock.sentinel.instance_path, - host_shutdown_action=host_shutdown_action, - vnuma_enabled=True, - chassis_asset_tag=mock_product_string.return_value) - mock_set_qos_specs.assert_called_once_with(mock_instance, - mock.sentinel.is_resize) - mock_attach_pci_devices.assert_called_once_with( - mock_instance, mock.sentinel.is_resize) - self._vmops._vmutils.set_nested_virtualization( - mock_instance.name, state=mock_requires_nested_virt.return_value) - - def test_update_vm_resources(self): - self._check_update_vm_resources() - - def test_update_vm_resources_pci_requested(self): - vendor_id = 'fake_vendor_id' - product_id = 'fake_product_id' - spec = {'vendor_id': vendor_id, 'product_id': product_id} - request = objects.InstancePCIRequest(count=1, spec=[spec]) - self._check_update_vm_resources(pci_requests=[request]) - - def test_create_instance_automatic_shutdown(self): - self._check_update_vm_resources(instance_automatic_shutdown=True) - - def test_attach_pci_devices(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - vendor_id = 'fake_vendor_id' - product_id = 'fake_product_id' - spec = {'vendor_id': vendor_id, 'product_id': product_id} - request = objects.InstancePCIRequest(count=2, spec=[spec]) - instance_pci_requests = objects.InstancePCIRequests( - requests=[request], instance_uuid=mock_instance.uuid) - mock_instance.pci_requests = instance_pci_requests - - self._vmops._attach_pci_devices(mock_instance, True) - - self._vmops._vmutils.remove_all_pci_devices.assert_called_once_with( - mock_instance.name) - self._vmops._vmutils.add_pci_device.assert_has_calls( - [mock.call(mock_instance.name, vendor_id, product_id)] * 2) - - @mock.patch.object(vmops.hardware, 'numa_get_constraints') - @mock.patch.object(vmops.objects.ImageMeta, 'from_dict') - def _check_get_instance_vnuma_config_exception(self, mock_from_dict, - mock_get_numa, numa_cells): - flavor = {'extra_specs': {}} - mock_instance = mock.MagicMock(flavor=flavor) - image_meta = mock.MagicMock(properties={}) - mock_get_numa.return_value.cells = numa_cells - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._get_instance_vnuma_config, - mock_instance, image_meta) - - def test_get_instance_vnuma_config_bad_cpuset(self): - cell1 = mock.MagicMock(cpuset=set([0]), memory=1024) - cell2 = mock.MagicMock(cpuset=set([1, 2]), memory=1024) - self._check_get_instance_vnuma_config_exception( - numa_cells=[cell1, cell2]) - - def test_get_instance_vnuma_config_bad_memory(self): - cell1 = mock.MagicMock(cpuset=set([0]), memory=1024) - cell2 = mock.MagicMock(cpuset=set([1]), memory=2048) - self._check_get_instance_vnuma_config_exception( - numa_cells=[cell1, cell2]) - - @mock.patch.object(vmops.hardware, 'numa_get_constraints') - @mock.patch.object(vmops.objects.ImageMeta, 'from_dict') - def _check_get_instance_vnuma_config( - self, mock_from_dict, mock_get_numa, numa_topology=None, - expected_mem_per_numa=None, expected_cpus_per_numa=None): - mock_instance = mock.MagicMock() - image_meta = mock.MagicMock() - mock_get_numa.return_value = numa_topology - - result_memory_per_numa, result_cpus_per_numa = ( - self._vmops._get_instance_vnuma_config(mock_instance, image_meta)) - - self.assertEqual(expected_cpus_per_numa, result_cpus_per_numa) - self.assertEqual(expected_mem_per_numa, result_memory_per_numa) - - def test_get_instance_vnuma_config(self): - cell1 = mock.MagicMock(cpuset=set([0]), memory=2048, cpu_pinning=None) - cell2 = mock.MagicMock(cpuset=set([1]), memory=2048, cpu_pinning=None) - mock_topology = mock.MagicMock(cells=[cell1, cell2]) - self._check_get_instance_vnuma_config(numa_topology=mock_topology, - expected_cpus_per_numa=1, - expected_mem_per_numa=2048) - - def test_get_instance_vnuma_config_no_topology(self): - self._check_get_instance_vnuma_config() - - @ddt.data((True, False), - (False, True), - (False, False)) - @ddt.unpack - def test_get_instance_dynamic_memory_ratio(self, vnuma_enabled, - nested_virt_enabled): - mock_instance = fake_instance.fake_instance_obj(self.context) - expected_dyn_memory_ratio = 2.0 - self.flags(dynamic_memory_ratio=expected_dyn_memory_ratio, - group='hyperv') - if vnuma_enabled or nested_virt_enabled: - expected_dyn_memory_ratio = 1.0 - - response = self._vmops._get_instance_dynamic_memory_ratio( - mock_instance, vnuma_enabled, nested_virt_enabled) - self.assertEqual(expected_dyn_memory_ratio, response) - - def test_attach_root_device_volume(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - root_device_info = {'type': constants.VOLUME, - 'connection_info': mock.sentinel.CONN_INFO, - 'disk_bus': constants.CTRL_TYPE_IDE} - - self._vmops._attach_root_device(self.context, - mock_instance, root_device_info) - - self._vmops._volumeops.attach_volume.assert_called_once_with( - self.context, - root_device_info['connection_info'], mock_instance, - disk_bus=root_device_info['disk_bus']) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_root_device_disk(self, mock_attach_drive): - mock_instance = fake_instance.fake_instance_obj(self.context) - root_device_info = {'type': constants.DISK, - 'boot_index': 0, - 'disk_bus': constants.CTRL_TYPE_IDE, - 'path': 'fake_path', - 'drive_addr': 0, - 'ctrl_disk_addr': 1} - - self._vmops._attach_root_device( - self.context, mock_instance, root_device_info) - - mock_attach_drive.assert_called_once_with( - mock_instance.name, root_device_info['path'], - root_device_info['drive_addr'], root_device_info['ctrl_disk_addr'], - root_device_info['disk_bus'], root_device_info['type']) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_ephemerals(self, mock_attach_drive): - mock_instance = fake_instance.fake_instance_obj(self.context) - - class FakeBDM(dict): - _bdm_obj = mock.sentinel.bdm_obj - - ephemerals = [{'path': os.path.join('eph_dir', 'eph0_path'), - 'boot_index': 1, - 'disk_bus': constants.CTRL_TYPE_IDE, - 'device_type': 'disk', - 'drive_addr': 0, - 'ctrl_disk_addr': 1}, - {'path': os.path.join('eph_dir', 'eph1_path'), - 'boot_index': 2, - 'disk_bus': constants.CTRL_TYPE_SCSI, - 'device_type': 'disk', - 'drive_addr': 0, - 'ctrl_disk_addr': 0}, - {'path': None}] - ephemerals = [FakeBDM(ephemerals[0]), - ephemerals[1], - FakeBDM(ephemerals[2])] - - self._vmops.attach_ephemerals(mock_instance.name, ephemerals) - - mock_attach_drive.assert_has_calls( - [mock.call(mock_instance.name, ephemerals[0]['path'], 0, - 1, constants.CTRL_TYPE_IDE, constants.DISK), - mock.call(mock_instance.name, ephemerals[1]['path'], 0, - 0, constants.CTRL_TYPE_SCSI, constants.DISK) - ]) - mock_update_conn = ( - self._vmops._block_dev_man.update_bdm_connection_info) - mock_update_conn.assert_called_once_with( - mock.sentinel.bdm_obj, - eph_filename=os.path.basename(ephemerals[0]['path'])) - - def test_attach_drive_vm_to_scsi(self): - self._vmops._attach_drive( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, - constants.CTRL_TYPE_SCSI) - - self._vmops._vmutils.attach_scsi_drive.assert_called_once_with( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - constants.DISK) - - def test_attach_drive_vm_to_ide(self): - self._vmops._attach_drive( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, - constants.CTRL_TYPE_IDE) - - self._vmops._vmutils.attach_ide_drive.assert_called_once_with( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, - constants.DISK) - - def test_get_image_vm_generation_default(self): - image_meta = {"properties": {}} - self._vmops._hostutils.get_default_vm_generation.return_value = ( - constants.IMAGE_PROP_VM_GEN_1) - self._vmops._hostutils.get_supported_vm_types.return_value = [ - constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] - - response = self._vmops.get_image_vm_generation( - mock.sentinel.instance_id, image_meta) - - self.assertEqual(constants.VM_GEN_1, response) - - def test_get_image_vm_generation_gen2(self): - image_meta = {"properties": { - constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_2}} - self._vmops._hostutils.get_supported_vm_types.return_value = [ - constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] - - response = self._vmops.get_image_vm_generation( - mock.sentinel.instance_id, image_meta) - - self.assertEqual(constants.VM_GEN_2, response) - - def test_get_image_vm_generation_bad_prop(self): - image_meta = {"properties": - {constants.IMAGE_PROP_VM_GEN: mock.sentinel.bad_prop}} - self._vmops._hostutils.get_supported_vm_types.return_value = [ - constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops.get_image_vm_generation, - mock.sentinel.instance_id, - image_meta) - - def test_check_vm_image_type_exception(self): - self._vmops._vhdutils.get_vhd_format.return_value = ( - constants.DISK_FORMAT_VHD) - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops.check_vm_image_type, - mock.sentinel.instance_id, constants.VM_GEN_2, - mock.sentinel.FAKE_PATH) - - def _check_requires_certificate(self, os_type): - mock_image_meta = {'properties': {'os_type': os_type}} - - expected_result = os_type == fields.OSType.LINUX - result = self._vmops._requires_certificate(mock_image_meta) - self.assertEqual(expected_result, result) - - def test_requires_certificate_windows(self): - self._check_requires_certificate(os_type=fields.OSType.WINDOWS) - - def test_requires_certificate_linux(self): - self._check_requires_certificate(os_type=fields.OSType.LINUX) - - def _check_requires_secure_boot( - self, image_prop_os_type=fields.OSType.LINUX, - image_prop_secure_boot=fields.SecureBoot.REQUIRED, - flavor_secure_boot=fields.SecureBoot.REQUIRED, - vm_gen=constants.VM_GEN_2, expected_exception=True): - mock_instance = fake_instance.fake_instance_obj(self.context) - if flavor_secure_boot: - mock_instance.flavor.extra_specs = { - constants.FLAVOR_SPEC_SECURE_BOOT: flavor_secure_boot} - mock_image_meta = {'properties': {'os_type': image_prop_os_type}} - if image_prop_secure_boot: - mock_image_meta['properties']['os_secure_boot'] = ( - image_prop_secure_boot) - - if expected_exception: - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._requires_secure_boot, - mock_instance, mock_image_meta, vm_gen) - else: - result = self._vmops._requires_secure_boot(mock_instance, - mock_image_meta, - vm_gen) - - requires_sb = fields.SecureBoot.REQUIRED in [ - flavor_secure_boot, image_prop_secure_boot] - self.assertEqual(requires_sb, result) - - def test_requires_secure_boot_ok(self): - self._check_requires_secure_boot( - expected_exception=False) - - def test_requires_secure_boot_image_img_prop_none(self): - self._check_requires_secure_boot( - image_prop_secure_boot=None, - expected_exception=False) - - def test_requires_secure_boot_image_extra_spec_none(self): - self._check_requires_secure_boot( - flavor_secure_boot=None, - expected_exception=False) - - def test_requires_secure_boot_flavor_no_os_type(self): - self._check_requires_secure_boot( - image_prop_os_type=None) - - def test_requires_secure_boot_flavor_no_os_type_no_exc(self): - self._check_requires_secure_boot( - image_prop_os_type=None, - image_prop_secure_boot=fields.SecureBoot.DISABLED, - flavor_secure_boot=fields.SecureBoot.DISABLED, - expected_exception=False) - - def test_requires_secure_boot_flavor_disabled(self): - self._check_requires_secure_boot( - flavor_secure_boot=fields.SecureBoot.DISABLED) - - def test_requires_secure_boot_image_disabled(self): - self._check_requires_secure_boot( - image_prop_secure_boot=fields.SecureBoot.DISABLED) - - def test_requires_secure_boot_generation_1(self): - self._check_requires_secure_boot(vm_gen=constants.VM_GEN_1) - - def _check_requires_nested_virt(self, extra_spec='', img_prop=None, - expected=True): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.extra_specs['hw:cpu_features'] = extra_spec - image_meta = {"properties": {'hw_cpu_features': img_prop or ''}} - - requires_nested = self._vmops._requires_nested_virt(mock_instance, - image_meta) - self.assertEqual(expected, requires_nested) - - def test_requires_nested_virt_flavor(self): - self._check_requires_nested_virt(extra_spec='vmx') - - def test_requires_nested_virt_image(self): - self._check_requires_nested_virt(img_prop='vmx') - - def test_requires_nested_virt_False(self): - self._check_requires_nested_virt(expected=False) - - def test_requires_nested_virt_unsupported(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.extra_specs['hw:cpu_features'] = 'vmx' - mock_image_meta = mock.MagicMock() - self._vmops._hostutils.supports_nested_virtualization.return_value = ( - False) - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._requires_nested_virt, - mock_instance, mock_image_meta) - - @mock.patch('nova.api.metadata.base.InstanceMetadata') - @mock.patch('nova.virt.configdrive.ConfigDriveBuilder') - @mock.patch('oslo_concurrency.processutils.execute') - def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder, - mock_InstanceMetadata, config_drive_format, - config_drive_cdrom, side_effect, - rescue=False): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.flags(config_drive_format=config_drive_format) - self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv') - self.flags(config_drive_inject_password=True, group='hyperv') - mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [ - side_effect] - - path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO) - path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD) - - def fake_get_configdrive_path(instance_name, disk_format, - rescue=False): - return (path_iso - if disk_format == constants.DVD_FORMAT else path_vhd) - - mock_get_configdrive_path = self._vmops._pathutils.get_configdrive_path - mock_get_configdrive_path.side_effect = fake_get_configdrive_path - expected_get_configdrive_path_calls = [mock.call(mock_instance.name, - constants.DVD_FORMAT, - rescue=rescue)] - if not config_drive_cdrom: - expected_call = mock.call(mock_instance.name, - constants.DISK_FORMAT_VHD, - rescue=rescue) - expected_get_configdrive_path_calls.append(expected_call) - - if config_drive_format != self.ISO9660: - self.assertRaises(exception.ConfigDriveUnsupportedFormat, - self._vmops._create_config_drive, - self.context, - mock_instance, - [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.NET_INFO, - rescue) - elif side_effect is processutils.ProcessExecutionError: - self.assertRaises(processutils.ProcessExecutionError, - self._vmops._create_config_drive, - self.context, - mock_instance, - [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.NET_INFO, - rescue) - else: - path = self._vmops._create_config_drive(self.context, - mock_instance, - [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.NET_INFO, - rescue) - mock_InstanceMetadata.assert_called_once_with( - mock_instance, content=[mock.sentinel.FILE], - extra_md={'admin_pass': mock.sentinel.PASSWORD}, - network_info=mock.sentinel.NET_INFO) - mock_get_configdrive_path.assert_has_calls( - expected_get_configdrive_path_calls) - mock_ConfigDriveBuilder.assert_called_with( - instance_md=mock_InstanceMetadata.return_value) - mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive - mock_make_drive.assert_called_once_with(path_iso) - if not CONF.hyperv.config_drive_cdrom: - expected = path_vhd - mock_execute.assert_called_once_with( - CONF.hyperv.qemu_img_cmd, - 'convert', '-f', 'raw', '-O', 'vpc', - path_iso, path_vhd, attempts=1) - self._vmops._pathutils.remove.assert_called_once_with( - os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)) - else: - expected = path_iso - - self.assertEqual(expected, path) - - def test_create_config_drive_cdrom(self): - self._test_create_config_drive(config_drive_format=self.ISO9660, - config_drive_cdrom=True, - side_effect=None) - - def test_create_config_drive_vhd(self): - self._test_create_config_drive(config_drive_format=self.ISO9660, - config_drive_cdrom=False, - side_effect=None) - - def test_create_rescue_config_drive_vhd(self): - self._test_create_config_drive(config_drive_format=self.ISO9660, - config_drive_cdrom=False, - side_effect=None, - rescue=True) - - def test_create_config_drive_other_drive_format(self): - self._test_create_config_drive(config_drive_format=self.VFAT, - config_drive_cdrom=False, - side_effect=None) - - def test_create_config_drive_execution_error(self): - self._test_create_config_drive( - config_drive_format=self.ISO9660, - config_drive_cdrom=False, - side_effect=processutils.ProcessExecutionError) - - def test_attach_config_drive_exception(self): - instance = fake_instance.fake_instance_obj(self.context) - self.assertRaises(exception.InvalidDiskFormat, - self._vmops.attach_config_drive, - instance, 'C:/fake_instance_dir/configdrive.xxx', - constants.VM_GEN_1) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_config_drive(self, mock_attach_drive): - instance = fake_instance.fake_instance_obj(self.context) - self._vmops.attach_config_drive(instance, - self._FAKE_CONFIGDRIVE_PATH, - constants.VM_GEN_1) - mock_attach_drive.assert_called_once_with( - instance.name, self._FAKE_CONFIGDRIVE_PATH, - 1, 0, constants.CTRL_TYPE_IDE, constants.DISK) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_config_drive_gen2(self, mock_attach_drive): - instance = fake_instance.fake_instance_obj(self.context) - self._vmops.attach_config_drive(instance, - self._FAKE_CONFIGDRIVE_PATH, - constants.VM_GEN_2) - mock_attach_drive.assert_called_once_with( - instance.name, self._FAKE_CONFIGDRIVE_PATH, - 1, 0, constants.CTRL_TYPE_SCSI, constants.DISK) - - def test_detach_config_drive(self): - is_rescue_configdrive = True - mock_lookup_configdrive = ( - self._vmops._pathutils.lookup_configdrive_path) - mock_lookup_configdrive.return_value = mock.sentinel.configdrive_path - - self._vmops._detach_config_drive(mock.sentinel.instance_name, - rescue=is_rescue_configdrive, - delete=True) - - mock_lookup_configdrive.assert_called_once_with( - mock.sentinel.instance_name, - rescue=is_rescue_configdrive) - self._vmops._vmutils.detach_vm_disk.assert_called_once_with( - mock.sentinel.instance_name, mock.sentinel.configdrive_path, - is_physical=False) - self._vmops._pathutils.remove.assert_called_once_with( - mock.sentinel.configdrive_path) - - @ddt.data({'passed_instance_path': True}, - {'cleanup_migr_files': True}) - @ddt.unpack - def test_delete_disk_files(self, passed_instance_path=None, - cleanup_migr_files=False): - mock_instance = mock.Mock( - system_metadata=dict( - backup_location=mock.sentinel.backup_location)) - self._vmops._delete_disk_files(mock_instance, - passed_instance_path, - cleanup_migr_files) - - stop_console_handler = ( - self._vmops._serial_console_ops.stop_console_handler_unsync) - stop_console_handler.assert_called_once_with(mock_instance.name) - - if passed_instance_path: - self.assertFalse(self._vmops._pathutils.get_instance_dir.called) - else: - self._pathutils.get_instance_dir.assert_called_once_with( - mock_instance.name) - - exp_inst_path = (passed_instance_path or - self._pathutils.get_instance_dir.return_value) - - exp_check_remove_dir_calls = [mock.call(exp_inst_path)] - - mock_get_migr_dir = self._pathutils.get_instance_migr_revert_dir - if cleanup_migr_files: - mock_get_migr_dir.assert_called_once_with( - exp_inst_path, remove_dir=True) - exp_check_remove_dir_calls.append( - mock.call(mock.sentinel.backup_location)) - - self._pathutils.check_remove_dir.assert_has_calls( - exp_check_remove_dir_calls) - - @ddt.data({"force_destroy": True, "destroy_disks": False}, - {'vm_exists': False, 'planned_vm_exists': False}, - {'vm_exists': False, 'planned_vm_exists': True}, - {'task_state': task_states.RESIZE_REVERTING}, - {'cleanup_migr_files': False}) - @ddt.unpack - @mock.patch('compute_hyperv.nova.vmops.VMOps._delete_disk_files') - @mock.patch('compute_hyperv.nova.vmops.VMOps.power_off') - @mock.patch('compute_hyperv.nova.vmops.VMOps.unplug_vifs') - def test_destroy(self, mock_unplug_vifs, mock_power_off, - mock_delete_disk_files, vm_exists=True, - planned_vm_exists=False, - force_destroy=False, - task_state=task_states.DELETING, - destroy_disks=True, - cleanup_migr_files=True): - self.flags(force_destroy_instances=force_destroy, group="hyperv") - - mock_instance = fake_instance.fake_instance_obj( - self.context, task_state=task_state) - self._vmops._vmutils.vm_exists.return_value = vm_exists - self._vmops._migrutils.planned_vm_exists.return_value = ( - planned_vm_exists) - - self._vmops.destroy( - instance=mock_instance, - block_device_info=mock.sentinel.FAKE_BD_INFO, - network_info=mock.sentinel.fake_network_info, - cleanup_migration_files=cleanup_migr_files, - destroy_disks=destroy_disks) - - self._vmops._vmutils.vm_exists.assert_called_with( - mock_instance.name) - - if vm_exists: - self._vmops._vmutils.stop_vm_jobs.assert_called_once_with( - mock_instance.name) - mock_power_off.assert_called_once_with(mock_instance) - self._vmops._vmutils.destroy_vm.assert_called_once_with( - mock_instance.name) - elif planned_vm_exists: - self._vmops._migrutils.planned_vm_exists.assert_called_once_with( - mock_instance.name) - destroy_planned_vm = ( - self._vmops._migrutils.destroy_existing_planned_vm) - destroy_planned_vm.assert_called_once_with( - mock_instance.name) - self.assertFalse(self._vmops._vmutils.destroy_vm.called) - else: - self.assertFalse( - self._vmops._migrutils.destroy_existing_planned_vm.called) - - mock_unplug_vifs.assert_called_once_with( - mock_instance, mock.sentinel.fake_network_info) - self._vmops._volumeops.disconnect_volumes.assert_called_once_with( - mock.sentinel.FAKE_BD_INFO) - - reverting_resize = task_state == task_states.RESIZE_REVERTING - exp_migr_files_cleanup = cleanup_migr_files and not reverting_resize - if destroy_disks or reverting_resize: - mock_delete_disk_files.assert_called_once_with( - mock_instance, - self._pathutils.get_instance_dir.return_value, - exp_migr_files_cleanup) - else: - mock_delete_disk_files.assert_not_called() - - @mock.patch('compute_hyperv.nova.vmops.VMOps.power_off') - def test_destroy_exception(self, mock_power_off): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops._vmutils.destroy_vm.side_effect = ( - os_win_exc.HyperVException) - self._vmops._vmutils.vm_exists.return_value = True - - self.assertRaises(os_win_exc.HyperVException, - self._vmops.destroy, mock_instance, - mock.sentinel.network_info, - mock.sentinel.block_device_info) - - def test_reboot_hard(self): - self._test_reboot(vmops.REBOOT_TYPE_HARD, - os_win_const.HYPERV_VM_STATE_REBOOT) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._soft_shutdown") - def test_reboot_soft(self, mock_soft_shutdown): - mock_soft_shutdown.return_value = True - self._test_reboot(vmops.REBOOT_TYPE_SOFT, - os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._soft_shutdown") - def test_reboot_soft_failed(self, mock_soft_shutdown): - mock_soft_shutdown.return_value = False - self._test_reboot(vmops.REBOOT_TYPE_SOFT, - os_win_const.HYPERV_VM_STATE_REBOOT) - - @mock.patch("compute_hyperv.nova.vmops.VMOps.power_on") - @mock.patch("compute_hyperv.nova.vmops.VMOps._soft_shutdown") - def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on): - mock_soft_shutdown.return_value = True - mock_power_on.side_effect = os_win_exc.HyperVException( - "Expected failure") - instance = fake_instance.fake_instance_obj(self.context) - - self.assertRaises(os_win_exc.HyperVException, self._vmops.reboot, - instance, {}, vmops.REBOOT_TYPE_SOFT) - - mock_soft_shutdown.assert_called_once_with(instance) - mock_power_on.assert_called_once_with(instance, network_info={}) - - def _test_reboot(self, reboot_type, vm_state): - instance = fake_instance.fake_instance_obj(self.context) - with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state: - self._vmops.reboot(instance, {}, reboot_type) - mock_set_state.assert_called_once_with(instance, vm_state) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._wait_for_power_off") - def test_soft_shutdown(self, mock_wait_for_power_off): - instance = fake_instance.fake_instance_obj(self.context) - mock_wait_for_power_off.return_value = True - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) - - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.assert_called_once_with(instance.name) - mock_wait_for_power_off.assert_called_once_with( - instance.name, self._FAKE_TIMEOUT) - - self.assertTrue(result) - - @mock.patch("time.sleep") - def test_soft_shutdown_failed(self, mock_sleep): - instance = fake_instance.fake_instance_obj(self.context) - - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.side_effect = os_win_exc.HyperVException( - "Expected failure.") - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) - - mock_shutdown_vm.assert_called_once_with(instance.name) - self.assertFalse(result) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._wait_for_power_off") - def test_soft_shutdown_wait(self, mock_wait_for_power_off): - instance = fake_instance.fake_instance_obj(self.context) - mock_wait_for_power_off.side_effect = [False, True] - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1) - - calls = [mock.call(instance.name, 1), - mock.call(instance.name, self._FAKE_TIMEOUT - 1)] - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.assert_called_with(instance.name) - mock_wait_for_power_off.assert_has_calls(calls) - - self.assertTrue(result) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._wait_for_power_off") - def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off): - instance = fake_instance.fake_instance_obj(self.context) - mock_wait_for_power_off.return_value = False - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5) - - calls = [mock.call(instance.name, 1.5), - mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)] - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.assert_called_with(instance.name) - mock_wait_for_power_off.assert_has_calls(calls) - - self.assertFalse(result) - - @mock.patch('compute_hyperv.nova.vmops.VMOps._set_vm_state') - def test_pause(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.pause(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_PAUSED) - - @mock.patch('compute_hyperv.nova.vmops.VMOps._set_vm_state') - def test_unpause(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.unpause(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch('compute_hyperv.nova.vmops.VMOps._set_vm_state') - def test_suspend(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.suspend(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_SUSPENDED) - - @mock.patch('compute_hyperv.nova.vmops.VMOps._set_vm_state') - def test_resume(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.resume(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - def _test_power_off(self, timeout, set_state_expected=True): - instance = fake_instance.fake_instance_obj(self.context) - with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state: - self._vmops.power_off(instance, timeout) - - serialops = self._vmops._serial_console_ops - serialops.stop_console_handler.assert_called_once_with( - instance.name) - if set_state_expected: - mock_set_state.assert_called_once_with( - instance, os_win_const.HYPERV_VM_STATE_DISABLED) - - def test_power_off_hard(self): - self._test_power_off(timeout=0) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._soft_shutdown") - def test_power_off_exception(self, mock_soft_shutdown): - mock_soft_shutdown.return_value = False - self._test_power_off(timeout=1) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._set_vm_state") - @mock.patch("compute_hyperv.nova.vmops.VMOps._soft_shutdown") - def test_power_off_soft(self, mock_soft_shutdown, mock_set_state): - instance = fake_instance.fake_instance_obj(self.context) - mock_soft_shutdown.return_value = True - - self._vmops.power_off(instance, 1, 0) - - serialops = self._vmops._serial_console_ops - serialops.stop_console_handler.assert_called_once_with( - instance.name) - mock_soft_shutdown.assert_called_once_with( - instance, 1, vmops.SHUTDOWN_TIME_INCREMENT) - self.assertFalse(mock_set_state.called) - - @mock.patch("compute_hyperv.nova.vmops.VMOps._soft_shutdown") - def test_power_off_unexisting_instance(self, mock_soft_shutdown): - mock_soft_shutdown.side_effect = os_win_exc.HyperVVMNotFoundException( - vm_name=mock.sentinel.vm_name) - self._test_power_off(timeout=1, set_state_expected=False) - - @mock.patch('compute_hyperv.nova.vmops.VMOps._set_vm_state') - def test_power_on(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance) - - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch('compute_hyperv.nova.vmops.VMOps._set_vm_state') - def test_power_on_having_block_devices(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance, mock.sentinel.block_device_info) - - mock_fix_instance_vol_paths = ( - self._vmops._volumeops.fix_instance_volume_disk_paths) - mock_fix_instance_vol_paths.assert_called_once_with( - mock_instance.name, mock.sentinel.block_device_info) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch.object(vmops.VMOps, 'plug_vifs') - def test_power_on_with_network_info(self, mock_plug_vifs): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance, - network_info=mock.sentinel.fake_network_info) - mock_plug_vifs.assert_called_once_with( - mock_instance, mock.sentinel.fake_network_info) - - @mock.patch.object(vmops.VMOps, 'plug_vifs') - def test_power_on_vifs_already_plugged(self, mock_plug_vifs): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance, - should_plug_vifs=False) - self.assertFalse(mock_plug_vifs.called) - - def _test_set_vm_state(self, state): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops._set_vm_state(mock_instance, state) - self._vmops._vmutils.set_vm_state.assert_called_once_with( - mock_instance.name, state) - - def test_set_vm_state_disabled(self): - self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_DISABLED) - - def test_set_vm_state_enabled(self): - self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_ENABLED) - - def test_set_vm_state_reboot(self): - self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_REBOOT) - - def test_set_vm_state_exception(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops._vmutils.set_vm_state.side_effect = ( - os_win_exc.HyperVException) - self.assertRaises(os_win_exc.HyperVException, - self._vmops._set_vm_state, - mock_instance, mock.sentinel.STATE) - - def test_get_vm_state(self): - summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED} - - with mock.patch.object(self._vmops._vmutils, - 'get_vm_summary_info') as mock_get_summary_info: - mock_get_summary_info.return_value = summary_info - - response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME) - self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED) - - @mock.patch.object(vmops.VMOps, '_get_vm_state') - def test_wait_for_power_off_true(self, mock_get_state): - mock_get_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED - result = self._vmops._wait_for_power_off( - mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) - mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME) - self.assertTrue(result) - - @mock.patch.object(vmops.etimeout, "with_timeout") - def test_wait_for_power_off_false(self, mock_with_timeout): - mock_with_timeout.side_effect = etimeout.Timeout() - result = self._vmops._wait_for_power_off( - mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) - self.assertFalse(result) - - def test_create_vm_com_port_pipes(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_serial_ports = { - 1: constants.SERIAL_PORT_TYPE_RO, - 2: constants.SERIAL_PORT_TYPE_RW - } - - self._vmops._create_vm_com_port_pipes(mock_instance, - mock_serial_ports) - expected_calls = [] - for port_number, port_type in mock_serial_ports.items(): - expected_pipe = r'\\.\pipe\%s_%s' % (mock_instance.uuid, - port_type) - expected_calls.append(mock.call(mock_instance.name, - port_number, - expected_pipe)) - - mock_set_conn = self._vmops._vmutils.set_vm_serial_port_connection - mock_set_conn.assert_has_calls(expected_calls) - - def test_list_instance_uuids(self): - fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3' - with mock.patch.object(self._vmops._vmutils, - 'list_instance_notes') as mock_list_notes: - mock_list_notes.return_value = [('fake_name', [fake_uuid])] - - response = self._vmops.list_instance_uuids() - mock_list_notes.assert_called_once_with() - - self.assertEqual(response, [fake_uuid]) - - def test_copy_vm_dvd_disks(self): - fake_paths = [mock.sentinel.FAKE_DVD_PATH1, - mock.sentinel.FAKE_DVD_PATH2] - mock_copy = self._vmops._pathutils.copyfile - mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths - mock_get_dvd_disk_paths.return_value = fake_paths - self._vmops._pathutils.get_instance_dir.return_value = ( - mock.sentinel.FAKE_DEST_PATH) - - self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME, - mock.sentinel.FAKE_DEST_HOST) - - mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME) - self._vmops._pathutils.get_instance_dir.assert_called_once_with( - mock.sentinel.FAKE_VM_NAME, - remote_server=mock.sentinel.FAKE_DEST_HOST) - mock_copy.has_calls(mock.call(mock.sentinel.FAKE_DVD_PATH1, - mock.sentinel.FAKE_DEST_PATH), - mock.call(mock.sentinel.FAKE_DVD_PATH2, - mock.sentinel.FAKE_DEST_PATH)) - - def test_plug_vifs(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_vif1 = {'id': mock.sentinel.ID1, - 'type': mock.sentinel.vif_type1} - fake_vif2 = {'id': mock.sentinel.ID2, - 'type': mock.sentinel.vif_type2} - mock_network_info = [fake_vif1, fake_vif2] - calls = [mock.call(mock_instance, fake_vif1), - mock.call(mock_instance, fake_vif2)] - - self._vmops.plug_vifs(mock_instance, - network_info=mock_network_info) - self._vmops._vif_driver.plug.assert_has_calls(calls) - - def test_plug_vifs_failed(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_vif1 = {'id': mock.sentinel.ID1, - 'type': mock.sentinel.vif_type1} - mock_network_info = [fake_vif1] - - self._vmops._vif_driver.plug.side_effect = exception.NovaException - - self.assertRaises(exception.VirtualInterfacePlugException, - self._vmops.plug_vifs, - mock_instance, mock_network_info) - - def test_unplug_vifs(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_vif1 = {'id': mock.sentinel.ID1, - 'type': mock.sentinel.vif_type1} - fake_vif2 = {'id': mock.sentinel.ID2, - 'type': mock.sentinel.vif_type2} - mock_network_info = [fake_vif1, fake_vif2] - calls = [mock.call(mock_instance, fake_vif1), - mock.call(mock_instance, fake_vif2)] - - self._vmops.unplug_vifs(mock_instance, - network_info=mock_network_info) - self._vmops._vif_driver.unplug.assert_has_calls(calls) - - @ddt.data({}, - {'metrics_enabled': False}, - {'enable_network_metrics': False}) - @ddt.unpack - def test_configure_instance_metrics(self, metrics_enabled=True, - enable_network_metrics=True): - port_names = ['port1', 'port2'] - - enable_vm_metrics = self._metricsutils.enable_vm_metrics_collection - self._vmutils.get_vm_nic_names.return_value = port_names - - self.flags(enable_instance_metrics_collection=metrics_enabled, - group='hyperv') - - self._vmops.configure_instance_metrics( - mock.sentinel.instance_name, - enable_network_metrics=enable_network_metrics) - - if metrics_enabled: - enable_vm_metrics.assert_called_once_with( - mock.sentinel.instance_name) - if enable_network_metrics: - self._vmutils.get_vm_nic_names.assert_called_once_with( - mock.sentinel.instance_name) - self._vif_driver.enable_metrics.assert_has_calls( - [mock.call(mock.sentinel.instance_name, port_name) - for port_name in port_names]) - else: - enable_vm_metrics.assert_not_called() - - if not (metrics_enabled and enable_network_metrics): - self._vif_driver.enable_metrics.assert_not_called() - - def _setup_remotefx_mocks(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.extra_specs = { - 'os:resolution': os_win_const.REMOTEFX_MAX_RES_1920x1200, - 'os:monitors': '2', - 'os:vram': '256'} - - return mock_instance - - def test_configure_remotefx_not_required(self): - self.flags(enable_remotefx=False, group='hyperv') - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.old_flavor.extra_specs['os:resolution'] = ( - os_win_const.REMOTEFX_MAX_RES_1920x1200) - - self._vmops.configure_remotefx(mock_instance, mock.sentinel.VM_GEN, - True) - - disable_remotefx = self._vmops._vmutils.disable_remotefx_video_adapter - disable_remotefx.assert_called_once_with(mock_instance.name) - - def test_configure_remotefx_exception_enable_config(self): - self.flags(enable_remotefx=False, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops.configure_remotefx, - mock_instance, mock.sentinel.VM_GEN) - - def test_configure_remotefx_exception_server_feature(self): - self.flags(enable_remotefx=True, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - self._vmops._hostutils.check_server_feature.return_value = False - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops.configure_remotefx, - mock_instance, mock.sentinel.VM_GEN) - - def test_configure_remotefx_exception_vm_gen(self): - self.flags(enable_remotefx=True, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - self._vmops._hostutils.check_server_feature.return_value = True - self._vmops._vmutils.vm_gen_supports_remotefx.return_value = False - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops.configure_remotefx, - mock_instance, mock.sentinel.VM_GEN) - - def test_configure_remotefx(self): - self.flags(enable_remotefx=True, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - self._vmops._hostutils.check_server_feature.return_value = True - self._vmops._vmutils.vm_gen_supports_remotefx.return_value = True - extra_specs = mock_instance.flavor.extra_specs - - self._vmops.configure_remotefx(mock_instance, constants.VM_GEN_1) - mock_enable_remotefx = ( - self._vmops._vmutils.enable_remotefx_video_adapter) - mock_enable_remotefx.assert_called_once_with( - mock_instance.name, int(extra_specs['os:monitors']), - extra_specs['os:resolution'], - int(extra_specs['os:vram']) * units.Mi) - - @mock.patch.object(vmops.VMOps, '_get_vm_state') - def test_check_hotplug_available_vm_disabled(self, mock_get_vm_state): - fake_vm = fake_instance.fake_instance_obj(self.context) - mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED - - result = self._vmops._check_hotplug_available(fake_vm) - - self.assertTrue(result) - mock_get_vm_state.assert_called_once_with(fake_vm.name) - self.assertFalse( - self._vmops._hostutils.check_min_windows_version.called) - self.assertFalse(self._vmops._vmutils.get_vm_generation.called) - - @mock.patch.object(vmops.VMOps, '_get_vm_state') - def _test_check_hotplug_available( - self, mock_get_vm_state, expected_result=False, - vm_gen=constants.VM_GEN_2, windows_version=_WIN_VERSION_10): - - fake_vm = fake_instance.fake_instance_obj(self.context) - mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_ENABLED - self._vmops._vmutils.get_vm_generation.return_value = vm_gen - fake_check_win_vers = self._vmops._hostutils.check_min_windows_version - fake_check_win_vers.return_value = ( - windows_version == self._WIN_VERSION_10) - - result = self._vmops._check_hotplug_available(fake_vm) - - self.assertEqual(expected_result, result) - mock_get_vm_state.assert_called_once_with(fake_vm.name) - fake_check_win_vers.assert_called_once_with(10, 0) - - def test_check_if_hotplug_available(self): - self._test_check_hotplug_available(expected_result=True) - - def test_check_if_hotplug_available_gen1(self): - self._test_check_hotplug_available( - expected_result=False, vm_gen=constants.VM_GEN_1) - - def test_check_if_hotplug_available_win_6_3(self): - self._test_check_hotplug_available( - expected_result=False, windows_version=self._WIN_VERSION_6_3) - - @mock.patch.object(vmops.VMOps, 'update_device_metadata') - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_attach_interface(self, mock_check_hotplug_available, - mock_update_dev_meta): - mock_check_hotplug_available.return_value = True - fake_vm = fake_instance.fake_instance_obj(self.context) - fake_vif = test_virtual_interface.fake_vif - - self._vmops.attach_interface( - mock.sentinel.context, fake_vm, fake_vif) - - mock_check_hotplug_available.assert_called_once_with(fake_vm) - self._vmops._vif_driver.plug.assert_called_once_with( - fake_vm, fake_vif) - self._vmops._vmutils.create_nic.assert_called_once_with( - fake_vm.name, fake_vif['id'], fake_vif['address']) - mock_update_dev_meta.assert_called_once_with( - mock.sentinel.context, fake_vm) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_attach_interface_failed(self, mock_check_hotplug_available): - mock_check_hotplug_available.return_value = False - self.assertRaises(exception.InterfaceAttachFailed, - self._vmops.attach_interface, - mock.sentinel.context, - mock.MagicMock(), mock.sentinel.fake_vif) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_detach_interface(self, mock_check_hotplug_available): - mock_check_hotplug_available.return_value = True - fake_vm = fake_instance.fake_instance_obj(self.context) - fake_vif = test_virtual_interface.fake_vif - - self._vmops.detach_interface(fake_vm, fake_vif) - - mock_check_hotplug_available.assert_called_once_with(fake_vm) - self._vmops._vif_driver.unplug.assert_called_once_with( - fake_vm, fake_vif) - self._vmops._vmutils.destroy_nic.assert_called_once_with( - fake_vm.name, fake_vif['id']) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_detach_interface_failed(self, mock_check_hotplug_available): - mock_check_hotplug_available.return_value = False - self.assertRaises(exception.InterfaceDetachFailed, - self._vmops.detach_interface, - mock.MagicMock(), mock.sentinel.fake_vif) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_detach_interface_missing_instance(self, mock_check_hotplug): - mock_check_hotplug.side_effect = os_win_exc.HyperVVMNotFoundException( - vm_name='fake_vm') - self.assertRaises(exception.InterfaceDetachFailed, - self._vmops.detach_interface, - mock.MagicMock(), mock.sentinel.fake_vif) - - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch.object(vmops.VMOps, '_create_root_vhd') - @mock.patch.object(vmops.VMOps, 'get_image_vm_generation') - @mock.patch.object(vmops.VMOps, '_attach_drive') - @mock.patch.object(vmops.VMOps, '_create_config_drive') - @mock.patch.object(vmops.VMOps, 'attach_config_drive') - @mock.patch.object(vmops.VMOps, '_detach_config_drive') - @mock.patch.object(vmops.VMOps, 'power_on') - def test_rescue_instance(self, mock_power_on, - mock_detach_config_drive, - mock_attach_config_drive, - mock_create_config_drive, - mock_attach_drive, - mock_get_image_vm_gen, - mock_create_root_vhd, - mock_configdrive_required): - mock_image_meta = mock.MagicMock() - mock_vm_gen = constants.VM_GEN_2 - mock_instance = fake_instance.fake_instance_obj(self.context) - - mock_configdrive_required.return_value = True - mock_create_root_vhd.return_value = mock.sentinel.rescue_vhd_path - mock_get_image_vm_gen.return_value = mock_vm_gen - self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen - self._vmops._pathutils.lookup_root_vhd_path.return_value = ( - mock.sentinel.root_vhd_path) - mock_create_config_drive.return_value = ( - mock.sentinel.rescue_configdrive_path) - - self._vmops.rescue_instance(self.context, - mock_instance, - mock.sentinel.network_info, - mock_image_meta, - mock.sentinel.rescue_password) - - mock_get_image_vm_gen.assert_called_once_with( - mock_instance.uuid, mock_image_meta) - self._vmops._vmutils.detach_vm_disk.assert_called_once_with( - mock_instance.name, mock.sentinel.root_vhd_path, - is_physical=False) - mock_attach_drive.assert_called_once_with( - mock_instance.name, mock.sentinel.rescue_vhd_path, 0, - self._vmops._ROOT_DISK_CTRL_ADDR, - vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen]) - self._vmops._vmutils.attach_scsi_drive.assert_called_once_with( - mock_instance.name, mock.sentinel.root_vhd_path, - drive_type=constants.DISK) - mock_detach_config_drive.assert_called_once_with(mock_instance.name) - mock_create_config_drive.assert_called_once_with( - self.context, mock_instance, - injected_files=None, - admin_password=mock.sentinel.rescue_password, - network_info=mock.sentinel.network_info, - rescue=True) - mock_attach_config_drive.assert_called_once_with( - mock_instance, mock.sentinel.rescue_configdrive_path, - mock_vm_gen) - - @mock.patch.object(vmops.VMOps, '_create_root_vhd') - @mock.patch.object(vmops.VMOps, 'get_image_vm_generation') - @mock.patch.object(vmops.VMOps, 'unrescue_instance') - def _test_rescue_instance_exception(self, mock_unrescue, - mock_get_image_vm_gen, - mock_create_root_vhd, - wrong_vm_gen=False, - boot_from_volume=False, - expected_exc=None): - mock_vm_gen = constants.VM_GEN_1 - image_vm_gen = (mock_vm_gen - if not wrong_vm_gen else constants.VM_GEN_2) - mock_image_meta = mock.MagicMock() - - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_get_image_vm_gen.return_value = image_vm_gen - self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen - self._vmops._pathutils.lookup_root_vhd_path.return_value = ( - mock.sentinel.root_vhd_path if not boot_from_volume else None) - - self.assertRaises(expected_exc, - self._vmops.rescue_instance, - self.context, mock_instance, - mock.sentinel.network_info, - mock_image_meta, - mock.sentinel.rescue_password) - mock_unrescue.assert_called_once_with(mock_instance) - - def test_rescue_instance_wrong_vm_gen(self): - # Test the case when the rescue image requires a different - # vm generation than the actual rescued instance. - self._test_rescue_instance_exception( - wrong_vm_gen=True, - expected_exc=exception.ImageUnacceptable) - - def test_rescue_instance_boot_from_volume(self): - # Rescuing instances booted from volume is not supported. - self._test_rescue_instance_exception( - boot_from_volume=True, - expected_exc=exception.InstanceNotRescuable) - - @mock.patch.object(fileutils, 'delete_if_exists') - @mock.patch.object(vmops.VMOps, '_attach_drive') - @mock.patch.object(vmops.VMOps, 'attach_config_drive') - @mock.patch.object(vmops.VMOps, '_detach_config_drive') - @mock.patch.object(vmops.VMOps, 'power_on') - @mock.patch.object(vmops.VMOps, 'power_off') - def test_unrescue_instance(self, mock_power_on, mock_power_off, - mock_detach_config_drive, - mock_attach_configdrive, - mock_attach_drive, - mock_delete_if_exists): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_vm_gen = constants.VM_GEN_2 - - self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen - self._vmops._vmutils.is_disk_attached.return_value = False - self._vmops._pathutils.lookup_root_vhd_path.side_effect = ( - mock.sentinel.root_vhd_path, mock.sentinel.rescue_vhd_path) - self._vmops._pathutils.lookup_configdrive_path.return_value = ( - mock.sentinel.configdrive_path) - - self._vmops.unrescue_instance(mock_instance) - - self._vmops._pathutils.lookup_root_vhd_path.assert_has_calls( - [mock.call(mock_instance.name), - mock.call(mock_instance.name, rescue=True)]) - self._vmops._vmutils.detach_vm_disk.assert_has_calls( - [mock.call(mock_instance.name, - mock.sentinel.root_vhd_path, - is_physical=False), - mock.call(mock_instance.name, - mock.sentinel.rescue_vhd_path, - is_physical=False)]) - mock_attach_drive.assert_called_once_with( - mock_instance.name, mock.sentinel.root_vhd_path, 0, - self._vmops._ROOT_DISK_CTRL_ADDR, - vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen]) - mock_detach_config_drive.assert_called_once_with(mock_instance.name, - rescue=True, - delete=True) - mock_delete_if_exists.assert_called_once_with( - mock.sentinel.rescue_vhd_path) - self._vmops._vmutils.is_disk_attached.assert_called_once_with( - mock.sentinel.configdrive_path, - is_physical=False) - mock_attach_configdrive.assert_called_once_with( - mock_instance, mock.sentinel.configdrive_path, mock_vm_gen) - mock_power_on.assert_called_once_with(mock_instance) - - @mock.patch.object(vmops.VMOps, 'power_off') - def test_unrescue_instance_missing_root_image(self, mock_power_off): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.vm_state = vm_states.RESCUED - self._vmops._pathutils.lookup_root_vhd_path.return_value = None - - self.assertRaises(exception.InstanceNotRescuable, - self._vmops.unrescue_instance, - mock_instance) - - @ddt.data((1, True), - (0, True), - (0, False)) - @ddt.unpack - @mock.patch.object(vmops.VMOps, '_get_scoped_flavor_extra_specs') - @mock.patch.object(vmops.VMOps, '_get_instance_local_disks') - def test_set_instance_disk_qos_specs(self, total_iops_sec, is_resize, - mock_get_local_disks, - mock_get_scoped_specs): - fake_total_bytes_sec = 8 - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_local_disks = [mock.sentinel.root_vhd_path, - mock.sentinel.eph_vhd_path] - - mock_get_local_disks.return_value = mock_local_disks - mock_set_qos_specs = self._vmops._vmutils.set_disk_qos_specs - mock_get_scoped_specs.return_value = dict( - disk_total_bytes_sec=fake_total_bytes_sec) - mock_bytes_per_sec_to_iops = ( - self._vmops._volumeops.bytes_per_sec_to_iops) - mock_bytes_per_sec_to_iops.return_value = total_iops_sec - - self._vmops._set_instance_disk_qos_specs(mock_instance, is_resize) - - mock_bytes_per_sec_to_iops.assert_called_once_with( - fake_total_bytes_sec) - - if total_iops_sec or is_resize: - mock_get_local_disks.assert_called_once_with(mock_instance.name) - expected_calls = [mock.call(disk_path, total_iops_sec) - for disk_path in mock_local_disks] - mock_set_qos_specs.assert_has_calls(expected_calls) - else: - self.assertFalse(mock_get_local_disks.called) - self.assertFalse(mock_set_qos_specs.called) - - def test_get_instance_local_disks(self): - fake_instance_dir = 'fake_instance_dir' - fake_local_disks = [os.path.join(fake_instance_dir, disk_name) - for disk_name in ['root.vhd', 'configdrive.iso']] - fake_instance_disks = ['fake_remote_disk'] + fake_local_disks - - mock_get_storage_paths = self._vmops._vmutils.get_vm_storage_paths - mock_get_storage_paths.return_value = [fake_instance_disks, []] - mock_get_instance_dir = self._vmops._pathutils.get_instance_dir - mock_get_instance_dir.return_value = fake_instance_dir - - ret_val = self._vmops._get_instance_local_disks( - mock.sentinel.instance_name) - - self.assertEqual(fake_local_disks, ret_val) - - def test_get_scoped_flavor_extra_specs(self): - # The flavor extra spect dict contains only string values. - fake_total_bytes_sec = '8' - - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.extra_specs = { - 'spec_key': 'spec_value', - 'quota:total_bytes_sec': fake_total_bytes_sec} - - ret_val = self._vmops._get_scoped_flavor_extra_specs( - mock_instance, scope='quota') - - expected_specs = { - 'total_bytes_sec': fake_total_bytes_sec - } - self.assertEqual(expected_specs, ret_val) - - def _mock_get_port_settings(self, logging_port, interactive_port): - mock_image_port_settings = { - constants.IMAGE_PROP_LOGGING_SERIAL_PORT: logging_port, - constants.IMAGE_PROP_INTERACTIVE_SERIAL_PORT: interactive_port - } - mock_image_meta = {'properties': mock_image_port_settings} - - acceptable_ports = [1, 2] - expected_exception = not (logging_port in acceptable_ports and - interactive_port in acceptable_ports) - if expected_exception: - self.assertRaises(exception.ImageSerialPortNumberInvalid, - self._vmops._get_image_serial_port_settings, - mock_image_meta) - else: - return self._vmops._get_image_serial_port_settings( - mock_image_meta) - - def test_get_image_serial_port_settings(self): - logging_port = 1 - interactive_port = 2 - - ret_val = self._mock_get_port_settings(logging_port, interactive_port) - - expected_serial_ports = { - logging_port: constants.SERIAL_PORT_TYPE_RO, - interactive_port: constants.SERIAL_PORT_TYPE_RW, - } - - self.assertEqual(expected_serial_ports, ret_val) - - def test_get_image_serial_port_settings_exception(self): - self._mock_get_port_settings(1, 3) - - def test_get_image_serial_port_settings_single_port(self): - interactive_port = 1 - - ret_val = self._mock_get_port_settings(interactive_port, - interactive_port) - - expected_serial_ports = { - interactive_port: constants.SERIAL_PORT_TYPE_RW - } - self.assertEqual(expected_serial_ports, ret_val) - - @mock.patch.object(vmops.VMOps, '_check_vtpm_requirements') - @mock.patch.object(vmops.VMOps, '_feature_requested') - @mock.patch.object(vmops.VMOps, '_create_fsk') - def _test_configure_secure_vm(self, mock_create_fsk, - mock_feature_requested, - mock_check_vtpm_requirements, - requires_shielded, requires_encryption): - instance = mock.MagicMock() - mock_tmp_file = self._vmops._pathutils.temporary_file - mock_tmp_file.return_value.__enter__.side_effect = [ - self._FAKE_FSK_FILE_PATH, self._FAKE_PDK_FILE_PATH] - mock_feature_requested.side_effect = [requires_shielded, - requires_encryption] - - self._vmops._configure_secure_vm(mock.sentinel.context, instance, - mock.sentinel.image_meta, - mock.sentinel.secure_boot_enabled) - - expected_calls = [mock.call(instance, - mock.sentinel.image_meta, - constants.IMAGE_PROP_VTPM_SHIELDED)] - if not requires_shielded: - expected_calls.append(mock.call(instance, - mock.sentinel.image_meta, - constants.IMAGE_PROP_VTPM)) - mock_feature_requested.has_calls(expected_calls) - - mock_check_vtpm_requirements.assert_called_with(instance, - mock.sentinel.image_meta, mock.sentinel.secure_boot_enabled) - self._vmops._vmutils.add_vtpm.assert_called_once_with( - instance.name, self._FAKE_PDK_FILE_PATH, - shielded=requires_shielded) - self._vmops._vmutils.provision_vm.assert_called_once_with( - instance.name, self._FAKE_FSK_FILE_PATH, self._FAKE_PDK_FILE_PATH) - - def test_configure_secure_vm_shielded(self): - self._test_configure_secure_vm(requires_shielded=True, - requires_encryption=True) - - def test_configure_secure_vm_encryption(self): - self._test_configure_secure_vm(requires_shielded=False, - requires_encryption=True) - - @mock.patch.object(vmops.VMOps, '_check_vtpm_requirements') - @mock.patch.object(vmops.VMOps, '_feature_requested') - def test_configure_regular_vm(self, mock_feature_requested, - mock_check_vtpm_requirements): - mock_feature_requested.side_effect = [False, False] - - self._vmops._configure_secure_vm(mock.sentinel.context, - mock.MagicMock(), - mock.sentinel.image_meta, - mock.sentinel.secure_boot_enabled) - - self.assertFalse(mock_check_vtpm_requirements.called) - - def _test_feature_requested(self, image_prop, image_prop_required): - mock_instance = mock.MagicMock() - mock_image_meta = {'properties': {image_prop: image_prop_required}} - - feature_requested = image_prop_required == constants.REQUIRED - - result = self._vmops._feature_requested(mock_instance, - mock_image_meta, - image_prop) - self.assertEqual(feature_requested, result) - - def test_vtpm_image_required(self): - self._test_feature_requested( - image_prop=constants.IMAGE_PROP_VTPM_SHIELDED, - image_prop_required=constants.REQUIRED) - - def test_vtpm_image_disabled(self): - self._test_feature_requested( - image_prop=constants.IMAGE_PROP_VTPM_SHIELDED, - image_prop_required=constants.DISABLED) - - def _test_check_vtpm_requirements(self, os_type='windows', - secure_boot_enabled=True, - guarded_host=True): - mock_instance = mock.MagicMock() - mock_image_meta = {'properties': {'os_type': os_type}} - guarded_host = self._vmops._hostutils.is_host_guarded.return_value - - if (not secure_boot_enabled or not guarded_host or - os_type not in os_win_const.VTPM_SUPPORTED_OS): - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._check_vtpm_requirements, - mock_instance, - mock_image_meta, - secure_boot_enabled) - else: - self._vmops._check_vtpm_requirements(mock_instance, - mock_image_meta, - secure_boot_enabled) - - def test_vtpm_requirements_all_satisfied(self): - self._test_check_vtpm_requirements() - - def test_vtpm_requirement_no_secureboot(self): - self._test_check_vtpm_requirements(secure_boot_enabled=False) - - def test_vtpm_requirement_not_supported_os(self): - self._test_check_vtpm_requirements( - os_type=mock.sentinel.unsupported_os) - - def test_vtpm_requirement_host_not_guarded(self): - self._test_check_vtpm_requirements(guarded_host=False) - - @mock.patch.object(vmops.VMOps, '_get_fsk_data') - def test_create_fsk(self, mock_get_fsk_data): - mock_instance = mock.MagicMock() - fsk_pairs = mock_get_fsk_data.return_value - - self._vmops._create_fsk(mock_instance, mock.sentinel.fsk_filename) - mock_get_fsk_data.assert_called_once_with(mock_instance) - self._vmops._vmutils.populate_fsk.assert_called_once_with( - mock.sentinel.fsk_filename, fsk_pairs) - - def _test_get_fsk_data(self, metadata, instance_name, - expected_fsk_pairs=None): - mock_instance = mock.MagicMock() - mock_instance.metadata = metadata - mock_instance.hostname = instance_name - - result = self._vmops._get_fsk_data(mock_instance) - self.assertEqual(expected_fsk_pairs, result) - - def test_get_fsk_data_no_computername(self): - metadata = {'TimeZone': mock.sentinel.timezone} - expected_fsk_pairs = {'@@ComputerName@@': mock.sentinel.instance_name} - self._test_get_fsk_data(metadata, - mock.sentinel.instance_name, - expected_fsk_pairs) - - def test_get_fsk_data_with_computername(self): - metadata = {'fsk:ComputerName': mock.sentinel.instance_name, - 'fsk:TimeZone': mock.sentinel.timezone} - expected_fsk_pairs = {'@@ComputerName@@': mock.sentinel.instance_name, - '@@TimeZone@@': mock.sentinel.timezone} - self._test_get_fsk_data(metadata, - mock.sentinel.instance_name, - expected_fsk_pairs) - - def test_get_fsk_data_computername_exception(self): - mock_instance = mock.MagicMock() - mock_instance.metadata = { - 'fsk:ComputerName': mock.sentinel.computer_name, - 'fsk:TimeZone': mock.sentinel.timezone} - mock_instance.hostname = mock.sentinel.instance_name - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._get_fsk_data, - mock_instance) - - @ddt.data({'vm_state': os_win_const.HYPERV_VM_STATE_DISABLED}, - {'vm_state': os_win_const.HYPERV_VM_STATE_SUSPENDED}, - {'vm_state': os_win_const.HYPERV_VM_STATE_SUSPENDED, - 'allow_paused': True}, - {'vm_state': os_win_const.HYPERV_VM_STATE_PAUSED}, - {'vm_state': os_win_const.HYPERV_VM_STATE_PAUSED, - 'allow_paused': True}, - {'allow_paused': True}) - @ddt.unpack - @mock.patch.object(vmops.VMOps, 'pause') - @mock.patch.object(vmops.VMOps, 'suspend') - @mock.patch.object(vmops.VMOps, '_set_vm_state') - def test_prepare_for_volume_snapshot( - self, mock_set_state, mock_suspend, mock_pause, - vm_state=os_win_const.HYPERV_VM_STATE_ENABLED, - allow_paused=False): - self._vmops._vmutils.get_vm_state.return_value = vm_state - - expect_instance_suspend = not allow_paused and vm_state not in [ - os_win_const.HYPERV_VM_STATE_DISABLED, - os_win_const.HYPERV_VM_STATE_SUSPENDED] - expect_instance_pause = allow_paused and vm_state == ( - os_win_const.HYPERV_VM_STATE_ENABLED) - - with self._vmops.prepare_for_volume_snapshot( - mock.sentinel.instance, allow_paused): - self._vmutils.get_vm_state.assert_called_once_with( - mock.sentinel.instance.name) - - if expect_instance_suspend: - mock_suspend.assert_called_once_with(mock.sentinel.instance) - else: - mock_suspend.assert_not_called() - - if expect_instance_pause: - mock_pause.assert_called_once_with(mock.sentinel.instance) - else: - mock_pause.assert_not_called() - - # We expect the previous instance state to be restored. - if expect_instance_suspend or expect_instance_pause: - mock_set_state.assert_called_once_with(mock.sentinel.instance, - vm_state) - else: - mock_set_state.assert_not_called() - - @ddt.data({}, - {'instance_found': False}, - {'uuid_found': True}) - def test_get_instance_uuid(self, instance_found=True, uuid_found=True): - if instance_found: - side_effect = (mock.sentinel.instance_uuid - if uuid_found else None, ) - else: - side_effect = os_win_exc.HyperVVMNotFoundException( - vm_name=mock.sentinel.instance_name) - - self._vmutils.get_instance_uuid.side_effect = side_effect - - instance_uuid = self._vmops.get_instance_uuid( - mock.sentinel.instance_name) - - self._vmutils.get_instance_uuid.assert_called_once_with( - mock.sentinel.instance_name) - expected_uuid = (mock.sentinel.instance_uuid - if instance_found and uuid_found else None) - self.assertEqual(expected_uuid, instance_uuid) - - def test_get_instance_uuid_missing_but_expected(self): - self._vmutils.get_instance_uuid.side_effect = ( - os_win_exc.HyperVVMNotFoundException( - vm_name=mock.sentinel.instance_name)) - - self.assertRaises(os_win_exc.HyperVVMNotFoundException, - self._vmops.get_instance_uuid, - mock.sentinel.instance_name, - expect_existing=True) - - @ddt.data(virtevent.EVENT_LIFECYCLE_STARTED, - virtevent.EVENT_LIFECYCLE_STOPPED) - @mock.patch.object(vmops.VMOps, 'configure_instance_metrics') - @mock.patch.object(utils, 'spawn_n', - lambda f, *args, **kwargs: f(*args, **kwargs)) - def test_instance_state_change_callback(self, transition, - mock_configure_metrics): - event = eventhandler.HyperVLifecycleEvent( - mock.sentinel.uuid, - mock.sentinel.name, - transition) - - self._vmops.instance_state_change_callback(event) - - serialops = self._vmops._serial_console_ops - if transition == virtevent.EVENT_LIFECYCLE_STARTED: - serialops.start_console_handler.assert_called_once_with(event.name) - mock_configure_metrics.assert_called_once_with( - event.name, enable_network_metrics=True) - else: - serialops.stop_console_handler.assert_called_once_with(event.name) diff --git a/compute_hyperv/tests/unit/test_volumeops.py b/compute_hyperv/tests/unit/test_volumeops.py deleted file mode 100644 index 1a9a44bf..00000000 --- a/compute_hyperv/tests/unit/test_volumeops.py +++ /dev/null @@ -1,1208 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import os -from unittest import mock - -import ddt -from nova.compute import task_states -from nova import exception -from nova import test -from nova.tests.unit import fake_block_device -from os_brick.initiator import connector -from os_win import constants as os_win_const -from oslo_utils import units - -from compute_hyperv.nova import block_device_manager -import compute_hyperv.nova.conf -from compute_hyperv.nova import constants -from compute_hyperv.nova import vmops -from compute_hyperv.nova import volumeops -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - -CONF = compute_hyperv.nova.conf.CONF - -connection_data = {'volume_id': 'fake_vol_id', - 'target_lun': mock.sentinel.fake_lun, - 'target_iqn': mock.sentinel.fake_iqn, - 'target_portal': mock.sentinel.fake_portal, - 'auth_method': 'chap', - 'auth_username': mock.sentinel.fake_user, - 'auth_password': mock.sentinel.fake_pass} - - -def get_fake_block_dev_info(dev_count=1): - return {'block_device_mapping': [ - fake_block_device.AnonFakeDbBlockDeviceDict({'source_type': 'volume'}) - for dev in range(dev_count)] - } - - -def get_fake_connection_info(**kwargs): - return {'data': dict(connection_data, **kwargs), - 'serial': mock.sentinel.serial} - - -@ddt.ddt -class VolumeOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for VolumeOps class.""" - - _autospec_classes = [ - volumeops.cinder.API, - ] - - def setUp(self): - super(VolumeOpsTestCase, self).setUp() - self._volumeops = volumeops.VolumeOps() - self._volume_api = self._volumeops._volume_api - - def test_get_volume_driver(self): - fake_conn_info = {'driver_volume_type': mock.sentinel.fake_driver_type} - self._volumeops.volume_drivers[mock.sentinel.fake_driver_type] = ( - mock.sentinel.fake_driver) - - result = self._volumeops._get_volume_driver( - connection_info=fake_conn_info) - self.assertEqual(mock.sentinel.fake_driver, result) - - def test_get_volume_driver_exception(self): - fake_conn_info = {'driver_volume_type': 'fake_driver'} - self.assertRaises(exception.VolumeDriverNotFound, - self._volumeops._get_volume_driver, - connection_info=fake_conn_info) - - def test_validate_host_configuration(self): - self._volumeops.volume_drivers = { - constants.STORAGE_PROTOCOL_SMBFS: mock.Mock( - side_effect=exception.ValidationError), - constants.STORAGE_PROTOCOL_ISCSI: mock.Mock( - side_effect=exception.ValidationError), - constants.STORAGE_PROTOCOL_FC: mock.Mock() - } - - self._volumeops.validate_host_configuration() - - for volume_drv in self._volumeops.volume_drivers.values(): - volume_drv.validate_host_configuration.assert_called_once_with() - - @mock.patch.object(volumeops.VolumeOps, 'attach_volume') - def test_attach_volumes(self, mock_attach_volume): - block_device_info = get_fake_block_dev_info() - - self._volumeops.attach_volumes( - mock.sentinel.context, - block_device_info['block_device_mapping'], - mock.sentinel.instance) - - mock_attach_volume.assert_called_once_with( - mock.sentinel.context, - block_device_info['block_device_mapping'][0]['connection_info'], - mock.sentinel.instance) - - def test_fix_instance_volume_disk_paths_empty_bdm(self): - self._volumeops.fix_instance_volume_disk_paths( - mock.sentinel.instance_name, - block_device_info={}) - self.assertFalse( - self._volumeops._vmutils.get_vm_physical_disk_mapping.called) - - @mock.patch.object(volumeops.VolumeOps, 'get_disk_path_mapping') - def test_fix_instance_volume_disk_paths(self, mock_get_disk_path_mapping): - block_device_info = get_fake_block_dev_info() - - mock_disk1 = { - 'mounted_disk_path': mock.sentinel.mounted_disk1_path, - 'resource_path': mock.sentinel.resource1_path - } - mock_disk2 = { - 'mounted_disk_path': mock.sentinel.mounted_disk2_path, - 'resource_path': mock.sentinel.resource2_path - } - - mock_vm_disk_mapping = { - mock.sentinel.disk1_serial: mock_disk1, - mock.sentinel.disk2_serial: mock_disk2 - } - # In this case, only the first disk needs to be updated. - mock_phys_disk_path_mapping = { - mock.sentinel.disk1_serial: mock.sentinel.actual_disk1_path, - mock.sentinel.disk2_serial: mock.sentinel.mounted_disk2_path - } - - vmutils = self._volumeops._vmutils - vmutils.get_vm_physical_disk_mapping.return_value = ( - mock_vm_disk_mapping) - - mock_get_disk_path_mapping.return_value = mock_phys_disk_path_mapping - - self._volumeops.fix_instance_volume_disk_paths( - mock.sentinel.instance_name, - block_device_info) - - vmutils.get_vm_physical_disk_mapping.assert_called_once_with( - mock.sentinel.instance_name) - mock_get_disk_path_mapping.assert_called_once_with( - block_device_info) - vmutils.set_disk_host_res.assert_called_once_with( - mock.sentinel.resource1_path, - mock.sentinel.actual_disk1_path) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_disconnect_volumes(self, mock_get_volume_driver): - block_device_info = get_fake_block_dev_info() - block_device_mapping = block_device_info['block_device_mapping'] - fake_volume_driver = mock_get_volume_driver.return_value - - self._volumeops.disconnect_volumes(block_device_info) - fake_volume_driver.disconnect_volume.assert_called_once_with( - block_device_mapping[0]['connection_info']) - - @ddt.data({}, - {'attach_failed': True}, - {'update_device_metadata': True}) - @ddt.unpack - @mock.patch('time.sleep') - @mock.patch.object(volumeops.VolumeOps, 'detach_volume') - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - @mock.patch.object(vmops.VMOps, 'update_device_metadata') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - 'set_volume_bdm_connection_info') - def test_attach_volume(self, mock_set_bdm_conn_info, - mock_update_dev_meta, - mock_get_volume_driver, - mock_detach, - mock_sleep, - attach_failed=False, - update_device_metadata=False): - mock_instance = fake_instance.fake_instance_obj() - fake_conn_info = get_fake_connection_info( - qos_specs=mock.sentinel.qos_specs) - fake_volume_driver = mock_get_volume_driver.return_value - - expected_try_count = 1 - if attach_failed: - expected_try_count += CONF.hyperv.volume_attach_retry_count - - fake_volume_driver.set_disk_qos_specs.side_effect = ( - test.TestingException) - - self.assertRaises(exception.VolumeAttachFailed, - self._volumeops.attach_volume, - mock.sentinel.context, - fake_conn_info, - mock_instance, - mock.sentinel.disk_bus, - update_device_metadata) - else: - self._volumeops.attach_volume( - mock.sentinel.context, - fake_conn_info, - mock_instance, - mock.sentinel.disk_bus, - update_device_metadata) - - mock_get_volume_driver.assert_any_call( - fake_conn_info) - fake_volume_driver.attach_volume.assert_has_calls( - [mock.call(fake_conn_info, - mock_instance.name, - mock.sentinel.disk_bus)] * expected_try_count) - fake_volume_driver.set_disk_qos_specs.assert_has_calls( - [mock.call(fake_conn_info, - mock.sentinel.qos_specs)] * expected_try_count) - - if update_device_metadata: - mock_set_bdm_conn_info.assert_has_calls( - [mock.call(mock.sentinel.context, - mock_instance, - fake_conn_info)] * expected_try_count) - mock_update_dev_meta.assert_has_calls( - [mock.call(mock.sentinel.context, - mock_instance)] * expected_try_count) - else: - mock_set_bdm_conn_info.assert_not_called() - mock_update_dev_meta.assert_not_called() - - if attach_failed: - mock_detach.assert_called_once_with( - mock.sentinel.context, - fake_conn_info, - mock_instance, - update_device_metadata) - mock_sleep.assert_has_calls( - [mock.call(CONF.hyperv.volume_attach_retry_interval)] * - CONF.hyperv.volume_attach_retry_count) - else: - mock_sleep.assert_not_called() - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_disconnect_volume(self, mock_get_volume_driver): - fake_volume_driver = mock_get_volume_driver.return_value - - self._volumeops.disconnect_volume(mock.sentinel.conn_info) - - mock_get_volume_driver.assert_called_once_with( - mock.sentinel.conn_info) - fake_volume_driver.disconnect_volume.assert_called_once_with( - mock.sentinel.conn_info) - - @ddt.data(True, False) - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - @mock.patch.object(vmops.VMOps, 'update_device_metadata') - def test_detach_volume(self, update_device_metadata, - mock_update_dev_meta, - mock_get_volume_driver): - mock_instance = fake_instance.fake_instance_obj() - fake_volume_driver = mock_get_volume_driver.return_value - fake_conn_info = {'data': 'fake_conn_info_data'} - - self._volumeops.detach_volume(mock.sentinel.context, - fake_conn_info, - mock_instance, - update_device_metadata) - - mock_get_volume_driver.assert_called_once_with( - fake_conn_info) - fake_volume_driver.detach_volume.assert_called_once_with( - fake_conn_info, mock_instance.name) - fake_volume_driver.disconnect_volume.assert_called_once_with( - fake_conn_info) - - if update_device_metadata: - mock_update_dev_meta.assert_called_once_with( - mock.sentinel.context, mock_instance) - else: - mock_update_dev_meta.assert_not_called() - - @mock.patch.object(connector, 'get_connector_properties') - def test_get_volume_connector(self, mock_get_connector): - conn = self._volumeops.get_volume_connector() - - mock_get_connector.assert_called_once_with( - root_helper=None, - my_ip=CONF.my_block_storage_ip, - multipath=CONF.hyperv.use_multipath_io, - enforce_multipath=True, - host=CONF.host) - self.assertEqual(mock_get_connector.return_value, conn) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_connect_volumes(self, mock_get_volume_driver): - block_device_info = get_fake_block_dev_info() - - self._volumeops.connect_volumes(block_device_info) - - init_vol_conn = ( - mock_get_volume_driver.return_value.connect_volume) - init_vol_conn.assert_called_once_with( - block_device_info['block_device_mapping'][0]['connection_info']) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_get_disk_path_mapping(self, mock_get_vol_drv): - block_device_info = get_fake_block_dev_info(dev_count=2) - block_device_mapping = block_device_info['block_device_mapping'] - - block_dev_conn_info = get_fake_connection_info() - block_dev_conn_info['serial'] = mock.sentinel.block_dev_serial - - # We expect this to be filtered out if only block devices are - # requested. - disk_file_conn_info = get_fake_connection_info() - disk_file_conn_info['serial'] = mock.sentinel.disk_file_serial - - block_device_mapping[0]['connection_info'] = block_dev_conn_info - block_device_mapping[1]['connection_info'] = disk_file_conn_info - - block_dev_drv = mock.Mock(_is_block_dev=True) - mock_get_vol_drv.side_effect = [block_dev_drv, - mock.Mock(_is_block_dev=False)] - - block_dev_drv.get_disk_resource_path.return_value = ( - mock.sentinel.disk_path) - - resulted_disk_path_mapping = self._volumeops.get_disk_path_mapping( - block_device_info, block_dev_only=True) - - block_dev_drv.get_disk_resource_path.assert_called_once_with( - block_dev_conn_info) - expected_disk_path_mapping = { - mock.sentinel.block_dev_serial: mock.sentinel.disk_path - } - self.assertEqual(expected_disk_path_mapping, - resulted_disk_path_mapping) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_get_disk_resource_path(self, mock_get_volume_driver): - fake_conn_info = get_fake_connection_info() - fake_volume_driver = mock_get_volume_driver.return_value - - resulted_disk_path = self._volumeops.get_disk_resource_path( - fake_conn_info) - - mock_get_volume_driver.assert_called_once_with(fake_conn_info) - get_mounted_disk = fake_volume_driver.get_disk_resource_path - get_mounted_disk.assert_called_once_with(fake_conn_info) - self.assertEqual(get_mounted_disk.return_value, - resulted_disk_path) - - def test_bytes_per_sec_to_iops(self): - no_bytes = 15 * units.Ki - expected_iops = 2 - - resulted_iops = self._volumeops.bytes_per_sec_to_iops(no_bytes) - self.assertEqual(expected_iops, resulted_iops) - - @mock.patch.object(volumeops.LOG, 'warning') - def test_validate_qos_specs(self, mock_warning): - supported_qos_specs = [mock.sentinel.spec1, mock.sentinel.spec2] - requested_qos_specs = {mock.sentinel.spec1: mock.sentinel.val, - mock.sentinel.spec3: mock.sentinel.val2} - - self._volumeops.validate_qos_specs(requested_qos_specs, - supported_qos_specs) - self.assertTrue(mock_warning.called) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - @mock.patch.object(volumeops.driver_block_device, 'convert_volume') - @mock.patch.object(volumeops.objects, 'BlockDeviceMapping') - def test_volume_snapshot_create(self, mock_bdm_cls, mock_convert_volume, - mock_get_vol_drv): - mock_instance = mock.Mock() - fake_create_info = {'snapshot_id': mock.sentinel.snapshot_id} - - mock_bdm = mock_bdm_cls.get_by_volume_and_instance.return_value - mock_driver_bdm = mock_convert_volume.return_value - mock_vol_driver = mock_get_vol_drv.return_value - - self._volumeops.volume_snapshot_create( - mock.sentinel.context, mock_instance, - mock.sentinel.volume_id, fake_create_info) - - mock_bdm_cls.get_by_volume_and_instance.assert_called_once_with( - mock.sentinel.context, mock.sentinel.volume_id, - mock_instance.uuid) - mock_convert_volume.assert_called_once_with(mock_bdm) - mock_get_vol_drv.assert_called_once_with( - mock_driver_bdm['connection_info']) - - mock_vol_driver.create_snapshot.assert_called_once_with( - mock_driver_bdm['connection_info'], - mock_instance, - fake_create_info) - mock_driver_bdm.save.assert_called_once_with() - - self._volume_api.update_snapshot_status.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.snapshot_id, - 'creating') - - self.assertIsNone(mock_instance.task_state) - mock_instance.save.assert_has_calls( - [mock.call(expected_task_state=[None]), - mock.call(expected_task_state=[ - task_states.IMAGE_SNAPSHOT_PENDING])]) - - @mock.patch.object(volumeops.objects, 'BlockDeviceMapping') - def test_volume_snapshot_create_exc(self, mock_bdm_cls): - mock_instance = mock.Mock() - fake_create_info = {'snapshot_id': mock.sentinel.snapshot_id} - - mock_bdm_cls.get_by_volume_and_instance.side_effect = ( - test.TestingException) - - self.assertRaises(test.TestingException, - self._volumeops.volume_snapshot_create, - mock.sentinel.context, - mock_instance, - mock.sentinel.volume_id, - fake_create_info) - self._volume_api.update_snapshot_status.assert_called_once_with( - mock.sentinel.context, mock.sentinel.snapshot_id, 'error') - - self.assertIsNone(mock_instance.task_state) - mock_instance.save.assert_has_calls( - [mock.call(expected_task_state=[None]), - mock.call(expected_task_state=[ - task_states.IMAGE_SNAPSHOT_PENDING])]) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - @mock.patch.object(volumeops.driver_block_device, 'convert_volume') - @mock.patch.object(volumeops.objects, 'BlockDeviceMapping') - def test_volume_snapshot_delete(self, mock_bdm_cls, mock_convert_volume, - mock_get_vol_drv): - mock_instance = mock.Mock() - - mock_bdm = mock_bdm_cls.get_by_volume_and_instance.return_value - mock_driver_bdm = mock_convert_volume.return_value - mock_vol_driver = mock_get_vol_drv.return_value - - self._volumeops.volume_snapshot_delete( - mock.sentinel.context, mock_instance, - mock.sentinel.volume_id, - mock.sentinel.snapshot_id, - mock.sentinel.delete_info) - - mock_bdm_cls.get_by_volume_and_instance.assert_called_once_with( - mock.sentinel.context, mock.sentinel.volume_id, - mock_instance.uuid) - mock_convert_volume.assert_called_once_with(mock_bdm) - mock_get_vol_drv.assert_called_once_with( - mock_driver_bdm['connection_info']) - - mock_vol_driver.delete_snapshot.assert_called_once_with( - mock_driver_bdm['connection_info'], - mock_instance, - mock.sentinel.delete_info) - mock_driver_bdm.save.assert_called_once_with() - - self._volume_api.update_snapshot_status.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.snapshot_id, - 'deleting') - - self.assertIsNone(mock_instance.task_state) - mock_instance.save.assert_has_calls( - [mock.call(expected_task_state=[None]), - mock.call(expected_task_state=[ - task_states.IMAGE_SNAPSHOT_PENDING])]) - - @mock.patch.object(volumeops.objects, 'BlockDeviceMapping') - def test_volume_snapshot_delete_exc(self, mock_bdm_cls): - mock_instance = mock.Mock() - - mock_bdm_cls.get_by_volume_and_instance.side_effect = ( - test.TestingException) - - self.assertRaises(test.TestingException, - self._volumeops.volume_snapshot_delete, - mock.sentinel.context, - mock_instance, - mock.sentinel.volume_id, - mock.sentinel.snapshot_id, - mock.sentinel.delete_info) - self._volume_api.update_snapshot_status.assert_called_once_with( - mock.sentinel.context, mock.sentinel.snapshot_id, 'error_deleting') - - self.assertIsNone(mock_instance.task_state) - mock_instance.save.assert_has_calls( - [mock.call(expected_task_state=[None]), - mock.call(expected_task_state=[ - task_states.IMAGE_SNAPSHOT_PENDING])]) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_get_disk_attachment_info(self, mock_get_volume_driver): - fake_conn_info = get_fake_connection_info() - ret_val = self._volumeops.get_disk_attachment_info(fake_conn_info) - - mock_vol_driver = mock_get_volume_driver.return_value - mock_vol_driver.get_disk_attachment_info.assert_called_once_with( - fake_conn_info) - - self.assertEqual( - mock_vol_driver.get_disk_attachment_info.return_value, - ret_val) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_extend_volume(self, mock_get_volume_driver): - fake_conn_info = get_fake_connection_info() - self._volumeops.extend_volume(fake_conn_info) - - mock_vol_driver = mock_get_volume_driver.return_value - mock_vol_driver.extend_volume.assert_called_once_with( - fake_conn_info) - - -@ddt.ddt -class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase): - """Unit tests for Hyper-V BaseVolumeDriver class.""" - - def setUp(self): - super(BaseVolumeDriverTestCase, self).setUp() - - self._base_vol_driver = volumeops.BaseVolumeDriver() - self._base_vol_driver._conn = mock.Mock() - self._vmutils = self._base_vol_driver._vmutils - self._migrutils = self._base_vol_driver._migrutils - self._diskutils = self._base_vol_driver._diskutils - self._metricsutils = self._base_vol_driver._metricsutils - self._conn = self._base_vol_driver._conn - - @mock.patch.object(connector.InitiatorConnector, 'factory') - def test_connector(self, mock_conn_factory): - self._base_vol_driver._conn = None - self._base_vol_driver._protocol = mock.sentinel.protocol - self._base_vol_driver._extra_connector_args = dict( - fake_conn_arg=mock.sentinel.conn_val) - - conn = self._base_vol_driver._connector - - self.assertEqual(mock_conn_factory.return_value, conn) - mock_conn_factory.assert_called_once_with( - protocol=mock.sentinel.protocol, - root_helper=None, - use_multipath=CONF.hyperv.use_multipath_io, - device_scan_attempts=CONF.hyperv.mounted_disk_query_retry_count, - device_scan_interval=( - CONF.hyperv.mounted_disk_query_retry_interval), - **self._base_vol_driver._extra_connector_args) - - def test_connect_volume(self): - conn_info = get_fake_connection_info() - - dev_info = self._base_vol_driver.connect_volume(conn_info) - expected_dev_info = self._conn.connect_volume.return_value - - self.assertEqual(expected_dev_info, dev_info) - self._conn.connect_volume.assert_called_once_with( - conn_info['data']) - - def test_disconnect_volume(self): - conn_info = get_fake_connection_info() - - self._base_vol_driver.disconnect_volume(conn_info) - - self._conn.disconnect_volume.assert_called_once_with( - conn_info['data']) - - @mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path') - def _test_get_disk_resource_path_by_conn_info(self, - mock_get_disk_res_path, - disk_found=True): - conn_info = get_fake_connection_info() - mock_vol_paths = [mock.sentinel.disk_path] if disk_found else [] - self._conn.get_volume_paths.return_value = mock_vol_paths - - if disk_found: - disk_res_path = self._base_vol_driver.get_disk_resource_path( - conn_info) - - self._conn.get_volume_paths.assert_called_once_with( - conn_info['data']) - self.assertEqual(mock_get_disk_res_path.return_value, - disk_res_path) - mock_get_disk_res_path.assert_called_once_with( - mock.sentinel.disk_path) - else: - self.assertRaises(exception.DiskNotFound, - self._base_vol_driver.get_disk_resource_path, - conn_info) - - def test_get_existing_disk_res_path(self): - self._test_get_disk_resource_path_by_conn_info() - - def test_get_unfound_disk_res_path(self): - self._test_get_disk_resource_path_by_conn_info(disk_found=False) - - def test_get_block_dev_res_path(self): - self._base_vol_driver._is_block_dev = True - - mock_get_dev_number = ( - self._diskutils.get_device_number_from_device_name) - mock_get_dev_number.return_value = mock.sentinel.dev_number - self._vmutils.get_mounted_disk_by_drive_number.return_value = ( - mock.sentinel.disk_path) - - disk_path = self._base_vol_driver._get_disk_res_path( - mock.sentinel.dev_name) - - mock_get_dev_number.assert_called_once_with(mock.sentinel.dev_name) - self._vmutils.get_mounted_disk_by_drive_number.assert_called_once_with( - mock.sentinel.dev_number) - - self.assertEqual(mock.sentinel.disk_path, disk_path) - - def test_get_block_dev_res_path_missing(self): - self._base_vol_driver._is_block_dev = True - - self._vmutils.get_mounted_disk_by_drive_number.return_value = None - - self.assertRaises(exception.DiskNotFound, - self._base_vol_driver._get_disk_res_path, - mock.sentinel.dev_name) - - def test_get_virt_disk_res_path(self): - # For virtual disk images, we expect the resource path to be the - # actual image path, as opposed to passthrough disks, in which case we - # need the Msvm_DiskDrive resource path when attaching it to a VM. - self._base_vol_driver._is_block_dev = False - - path = self._base_vol_driver._get_disk_res_path( - mock.sentinel.disk_path) - self.assertEqual(mock.sentinel.disk_path, path) - - @mock.patch.object(volumeops.BaseVolumeDriver, - '_check_san_policy') - @ddt.data(True, False) - def test_validate_host_configuration(self, is_block_dev, - fake_check_san_policy): - self._base_vol_driver._is_block_dev = is_block_dev - - self._base_vol_driver.validate_host_configuration() - - if is_block_dev: - fake_check_san_policy.assert_called_once_with() - else: - fake_check_san_policy.assert_not_called() - - @ddt.data(os_win_const.DISK_POLICY_OFFLINE_ALL, - os_win_const.DISK_POLICY_ONLINE_ALL) - def test_check_san_policy(self, disk_policy): - self._diskutils.get_new_disk_policy.return_value = disk_policy - - accepted_policies = [os_win_const.DISK_POLICY_OFFLINE_SHARED, - os_win_const.DISK_POLICY_OFFLINE_ALL] - - if disk_policy not in accepted_policies: - self.assertRaises( - exception.ValidationError, - self._base_vol_driver._check_san_policy) - else: - self._base_vol_driver._check_san_policy() - - @mock.patch.object(volumeops.BaseVolumeDriver, - '_configure_disk_metrics') - @mock.patch.object(volumeops.BaseVolumeDriver, - '_get_disk_res_path') - @mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_ctrl_and_slot') - @mock.patch.object(volumeops.BaseVolumeDriver, - 'connect_volume') - @mock.patch.object(volumeops.BaseVolumeDriver, - 'validate_host_configuration') - def _test_attach_volume(self, mock_validate_host_config, - mock_connect_volume, - mock_get_disk_ctrl_and_slot, - mock_get_disk_res_path, - mock_configure_metrics, - is_block_dev=True): - connection_info = get_fake_connection_info() - self._base_vol_driver._is_block_dev = is_block_dev - mock_connect_volume.return_value = dict(path=mock.sentinel.raw_path) - - mock_get_disk_res_path.return_value = ( - mock.sentinel.disk_path) - mock_get_disk_ctrl_and_slot.return_value = ( - mock.sentinel.ctrller_path, - mock.sentinel.slot) - - self._base_vol_driver.attach_volume( - connection_info=connection_info, - instance_name=mock.sentinel.instance_name, - disk_bus=mock.sentinel.disk_bus) - - if is_block_dev: - self._vmutils.attach_volume_to_controller.assert_called_once_with( - mock.sentinel.instance_name, - mock.sentinel.ctrller_path, - mock.sentinel.slot, - mock.sentinel.disk_path, - serial=connection_info['serial']) - else: - self._vmutils.attach_drive.assert_called_once_with( - mock.sentinel.instance_name, - mock.sentinel.disk_path, - mock.sentinel.ctrller_path, - mock.sentinel.slot) - - mock_get_disk_res_path.assert_called_once_with( - mock.sentinel.raw_path) - mock_get_disk_ctrl_and_slot.assert_called_once_with( - mock.sentinel.instance_name, mock.sentinel.disk_bus) - mock_validate_host_config.assert_called_once_with() - mock_configure_metrics.assert_called_once_with(mock.sentinel.disk_path) - - def test_attach_volume_image_file(self): - self._test_attach_volume(is_block_dev=False) - - def test_attach_volume_block_dev(self): - self._test_attach_volume(is_block_dev=True) - - def test_detach_volume_planned_vm(self): - self._base_vol_driver.detach_volume(mock.sentinel.connection_info, - mock.sentinel.inst_name) - self._vmutils.detach_vm_disk.assert_not_called() - - @ddt.data({}, - {'metrics_enabled': False}, - {'is_block_dev': True}) - @ddt.unpack - def test_configure_disk_metrics(self, metrics_enabled=True, - is_block_dev=False): - self.flags(enable_instance_metrics_collection=metrics_enabled, - group='hyperv') - self._base_vol_driver._is_block_dev = is_block_dev - - enable_metrics = self._metricsutils.enable_disk_metrics_collection - - self._base_vol_driver._configure_disk_metrics(mock.sentinel.disk_path) - - if metrics_enabled and not is_block_dev: - enable_metrics.assert_called_once_with( - mock.sentinel.disk_path, - is_physical=is_block_dev) - else: - enable_metrics.assert_not_called() - - @ddt.data(True, False) - @mock.patch.object(volumeops.BaseVolumeDriver, - 'get_disk_resource_path') - def test_detach_volume(self, is_block_dev, mock_get_disk_resource_path): - self._migrutils.planned_vm_exists.return_value = False - connection_info = get_fake_connection_info() - self._base_vol_driver._is_block_dev = is_block_dev - - self._base_vol_driver.detach_volume(connection_info, - mock.sentinel.instance_name) - - if is_block_dev: - exp_serial = connection_info['serial'] - exp_disk_res_path = None - self.assertFalse(mock_get_disk_resource_path.called) - else: - exp_serial = None - exp_disk_res_path = mock_get_disk_resource_path.return_value - mock_get_disk_resource_path.assert_called_once_with( - connection_info) - - self._vmutils.detach_vm_disk.assert_called_once_with( - mock.sentinel.instance_name, - exp_disk_res_path, - is_physical=is_block_dev, - serial=exp_serial) - - def test_get_disk_ctrl_and_slot_ide(self): - ctrl, slot = self._base_vol_driver._get_disk_ctrl_and_slot( - mock.sentinel.instance_name, - disk_bus=constants.CTRL_TYPE_IDE) - - expected_ctrl = self._vmutils.get_vm_ide_controller.return_value - expected_slot = 0 - - self._vmutils.get_vm_ide_controller.assert_called_once_with( - mock.sentinel.instance_name, 0) - - self.assertEqual(expected_ctrl, ctrl) - self.assertEqual(expected_slot, slot) - - def test_get_disk_ctrl_and_slot_scsi(self): - ctrl, slot = self._base_vol_driver._get_disk_ctrl_and_slot( - mock.sentinel.instance_name, - disk_bus=constants.CTRL_TYPE_SCSI) - - expected_ctrl = self._vmutils.get_vm_scsi_controller.return_value - expected_slot = ( - self._vmutils.get_free_controller_slot.return_value) - - self._vmutils.get_vm_scsi_controller.assert_called_once_with( - mock.sentinel.instance_name) - self._vmutils.get_free_controller_slot( - self._vmutils.get_vm_scsi_controller.return_value) - - self.assertEqual(expected_ctrl, ctrl) - self.assertEqual(expected_slot, slot) - - def test_set_disk_qos_specs(self): - # This base method is a noop, we'll just make sure - # it doesn't error out. - self._base_vol_driver.set_disk_qos_specs( - mock.sentinel.conn_info, mock.sentinel.disk_qos_spes) - - @ddt.data(True, False) - @mock.patch.object(volumeops.BaseVolumeDriver, - 'get_disk_resource_path') - def test_get_disk_attachment_info(self, is_block_dev, - mock_get_disk_resource_path): - connection_info = get_fake_connection_info() - self._base_vol_driver._is_block_dev = is_block_dev - - self._base_vol_driver.get_disk_attachment_info(connection_info) - - if is_block_dev: - exp_serial = connection_info['serial'] - exp_disk_res_path = None - self.assertFalse(mock_get_disk_resource_path.called) - else: - exp_serial = None - exp_disk_res_path = mock_get_disk_resource_path.return_value - mock_get_disk_resource_path.assert_called_once_with( - connection_info) - - self._vmutils.get_disk_attachment_info.assert_called_once_with( - exp_disk_res_path, - is_physical=is_block_dev, - serial=exp_serial) - - def test_extend_volume(self): - conn_info = get_fake_connection_info() - - self._base_vol_driver.extend_volume(conn_info) - - self._conn.extend_volume.assert_called_once_with( - conn_info['data']) - - -class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase): - """Unit tests for Hyper-V BaseVolumeDriver class.""" - - def test_extra_conn_args(self): - fake_iscsi_initiator = ( - 'PCI\\VEN_1077&DEV_2031&SUBSYS_17E8103C&REV_02\\' - '4&257301f0&0&0010_0') - self.flags(iscsi_initiator_list=[fake_iscsi_initiator], - group='hyperv') - expected_extra_conn_args = dict( - initiator_list=[fake_iscsi_initiator]) - - vol_driver = volumeops.ISCSIVolumeDriver() - - self.assertEqual(expected_extra_conn_args, - vol_driver._extra_connector_args) - - -@ddt.ddt -class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V SMBFSVolumeDriver class.""" - - _autospec_classes = [ - volumeops.pathutils.PathUtils, - ] - - _FAKE_EXPORT_PATH = '//ip/share/' - _FAKE_CONN_INFO = get_fake_connection_info(export=_FAKE_EXPORT_PATH) - - def setUp(self): - super(SMBFSVolumeDriverTestCase, self).setUp() - self._volume_driver = volumeops.SMBFSVolumeDriver() - self._volume_driver._conn = mock.Mock() - self._conn = self._volume_driver._conn - self._vmutils = self._volume_driver._vmutils - self._pathutils = self._volume_driver._pathutils - self._vhdutils = self._volume_driver._vhdutils - - def test_get_export_path(self): - export_path = self._volume_driver._get_export_path( - self._FAKE_CONN_INFO) - expected_path = self._FAKE_EXPORT_PATH.replace('/', '\\') - self.assertEqual(expected_path, export_path) - - @mock.patch.object(volumeops.BaseVolumeDriver, 'attach_volume') - def test_attach_volume(self, mock_attach): - # The tested method will just apply a lock before calling - # the superclass method. - self._volume_driver.attach_volume( - self._FAKE_CONN_INFO, - mock.sentinel.instance_name, - disk_bus=mock.sentinel.disk_bus) - - mock_attach.assert_called_once_with( - self._FAKE_CONN_INFO, - mock.sentinel.instance_name, - disk_bus=mock.sentinel.disk_bus) - - @mock.patch.object(volumeops.BaseVolumeDriver, 'detach_volume') - def test_detach_volume(self, mock_detach): - self._volume_driver.detach_volume( - self._FAKE_CONN_INFO, - instance_name=mock.sentinel.instance_name) - - mock_detach.assert_called_once_with( - self._FAKE_CONN_INFO, - instance_name=mock.sentinel.instance_name) - - @mock.patch.object(volumeops.VolumeOps, 'bytes_per_sec_to_iops') - @mock.patch.object(volumeops.VolumeOps, 'validate_qos_specs') - @mock.patch.object(volumeops.BaseVolumeDriver, 'get_disk_resource_path') - def test_set_disk_qos_specs(self, mock_get_disk_path, - mock_validate_qos_specs, - mock_bytes_per_sec_to_iops): - fake_total_bytes_sec = 8 - fake_total_iops_sec = 1 - - storage_qos_specs = {'total_bytes_sec': fake_total_bytes_sec} - expected_supported_specs = ['total_iops_sec', 'total_bytes_sec'] - mock_set_qos_specs = self._volume_driver._vmutils.set_disk_qos_specs - mock_bytes_per_sec_to_iops.return_value = fake_total_iops_sec - mock_get_disk_path.return_value = mock.sentinel.disk_path - - self._volume_driver.set_disk_qos_specs(self._FAKE_CONN_INFO, - storage_qos_specs) - - mock_validate_qos_specs.assert_called_once_with( - storage_qos_specs, expected_supported_specs) - mock_bytes_per_sec_to_iops.assert_called_once_with( - fake_total_bytes_sec) - mock_get_disk_path.assert_called_once_with(self._FAKE_CONN_INFO) - mock_set_qos_specs.assert_called_once_with( - mock.sentinel.disk_path, - fake_total_iops_sec) - - @contextlib.contextmanager - def check_prepare_for_vol_snap_mock(self, *args, **kwargs): - # Mocks the according context manager and ensures that - # it has been called with the expected arguments. - mock_prepare_for_vol_snap = mock.MagicMock() - - patcher = mock.patch.object(vmops.VMOps, - 'prepare_for_volume_snapshot', - mock_prepare_for_vol_snap) - patcher.start() - self.addCleanup(patcher.stop) - - try: - yield - finally: - mock_prepare_for_vol_snap.assert_called_once_with( - *args, **kwargs) - - def _get_fake_disk_attachment_info(self, - ctrl_type=constants.CTRL_TYPE_SCSI): - return dict(controller_type=ctrl_type, - controller_path=mock.sentinel.ctrl_path, - controller_slot=mock.sentinel.ctrl_slot) - - @ddt.data(constants.CTRL_TYPE_SCSI, constants.CTRL_TYPE_IDE) - @mock.patch.object(volumeops.SMBFSVolumeDriver, '_create_snapshot_ide') - @mock.patch.object(volumeops.SMBFSVolumeDriver, '_create_snapshot_scsi') - @mock.patch.object(volumeops.SMBFSVolumeDriver, 'get_disk_resource_path') - def test_create_snapshot(self, ctrl_type, mock_get_disk_res_path, - mock_create_snap_scsi, mock_create_snap_ide): - mock_instance = mock.Mock() - - conn_info = get_fake_connection_info() - mock_att_info = self._get_fake_disk_attachment_info(ctrl_type) - mock_attached_disk_dir = 'fake_share' - mock_attached_disk_name = 'volume-vol_id-hv_guid.vhdx' - mock_attached_disk_path = os.path.join(mock_attached_disk_dir, - mock_attached_disk_name) - mock_new_file_name = 'volume-vol_id-snap_id.vhdx' - fake_create_info = {'new_file': mock_new_file_name} - expected_new_file_path = os.path.join(mock_attached_disk_dir, - mock_new_file_name) - - mock_get_disk_res_path.return_value = mock_attached_disk_path - self._vmutils.get_disk_attachment_info.return_value = mock_att_info - - self._volume_driver.create_snapshot(conn_info, - mock_instance, - fake_create_info) - - if ctrl_type == constants.CTRL_TYPE_SCSI: - mock_create_snap_scsi.assert_called_once_with( - mock_instance, mock_att_info, - mock_attached_disk_path, expected_new_file_path) - else: - mock_create_snap_ide.assert_called_once_with( - mock_instance, mock_attached_disk_path, - expected_new_file_path) - - mock_get_disk_res_path.assert_called_once_with(conn_info) - self._vmutils.get_disk_attachment_info.assert_called_once_with( - mock_attached_disk_path, is_physical=False) - - self.assertEqual(mock_new_file_name, - conn_info['data']['name']) - - def test_create_snapshot_ide(self): - mock_instance = mock.Mock() - - with self.check_prepare_for_vol_snap_mock(mock_instance): - self._volume_driver._create_snapshot_ide( - mock_instance, - mock.sentinel.attached_path, - mock.sentinel.new_path) - - self._vhdutils.create_differencing_vhd.assert_called_once_with( - mock.sentinel.new_path, mock.sentinel.attached_path) - self._vmutils.update_vm_disk_path.assert_called_once_with( - mock.sentinel.attached_path, - mock.sentinel.new_path, - is_physical=False) - - def test_create_snapshot_scsi(self): - mock_instance = mock.Mock() - mock_att_info = self._get_fake_disk_attachment_info() - - with self.check_prepare_for_vol_snap_mock(mock_instance, - allow_paused=True): - self._volume_driver._create_snapshot_scsi( - mock_instance, - mock_att_info, - mock.sentinel.attached_path, - mock.sentinel.new_path) - - self._vmutils.detach_vm_disk.assert_called_once_with( - mock_instance.name, mock.sentinel.attached_path, - is_physical=False) - self._vhdutils.create_differencing_vhd.assert_called_once_with( - mock.sentinel.new_path, mock.sentinel.attached_path) - self._vmutils.attach_drive.assert_called_once_with( - mock_instance.name, mock.sentinel.new_path, - mock_att_info['controller_path'], - mock_att_info['controller_slot']) - - @ddt.data({'merge_latest': True}, - {'ctrl_type': constants.CTRL_TYPE_IDE, - 'prep_vm_state': os_win_const.HYPERV_VM_STATE_SUSPENDED}, - {'prep_vm_state': os_win_const.HYPERV_VM_STATE_PAUSED}, - {'merge_latest': True, - 'prep_vm_state': os_win_const.HYPERV_VM_STATE_PAUSED}) - @ddt.unpack - @mock.patch.object(volumeops.SMBFSVolumeDriver, - '_do_delete_snapshot') - @mock.patch.object(volumeops.SMBFSVolumeDriver, 'get_disk_resource_path') - def test_delete_snapshot( - self, mock_get_disk_res_path, - mock_delete_snap, - merge_latest=False, - ctrl_type=constants.CTRL_TYPE_SCSI, - prep_vm_state=os_win_const.HYPERV_VM_STATE_DISABLED): - mock_instance = mock.Mock() - - conn_info = get_fake_connection_info() - - mock_att_info = self._get_fake_disk_attachment_info(ctrl_type) - mock_attached_disk_dir = 'fake_share' - mock_attached_disk_name = 'volume-vol_id-hv_guid.vhdx' - mock_attached_disk_path = os.path.join(mock_attached_disk_dir, - mock_attached_disk_name) - mock_new_top_img = (mock_attached_disk_path if not merge_latest - else 'parent.vhdx') - mock_file_to_merge = (mock_attached_disk_name - if merge_latest - else 'volume-vol_id-snap_id.vhdx') - exp_file_to_merge_path = os.path.join(mock_attached_disk_dir, - mock_file_to_merge) - - mock_delete_info = {'file_to_merge': mock_file_to_merge} - - self._vmutils.get_disk_attachment_info.return_value = mock_att_info - self._vmutils.get_vm_state.return_value = prep_vm_state - mock_get_disk_res_path.return_value = mock_attached_disk_path - mock_delete_snap.return_value = mock_new_top_img - - exp_detach = prep_vm_state == os_win_const.HYPERV_VM_STATE_PAUSED - exp_allow_paused = ctrl_type == constants.CTRL_TYPE_SCSI - - with self.check_prepare_for_vol_snap_mock( - mock_instance, - allow_paused=exp_allow_paused): - self._volume_driver.delete_snapshot(conn_info, - mock_instance, - mock_delete_info) - - mock_get_disk_res_path.assert_called_once_with(conn_info) - self._vmutils.get_disk_attachment_info.assert_called_once_with( - mock_attached_disk_path, is_physical=False) - self._vmutils.get_vm_state.assert_called_once_with( - mock_instance.name) - - mock_delete_snap.assert_called_once_with(mock_attached_disk_path, - exp_file_to_merge_path) - - if exp_detach: - self._vmutils.detach_vm_disk.assert_called_once_with( - mock_instance.name, - mock_attached_disk_path, - is_physical=False) - self._vmutils.attach_drive.assert_called_once_with( - mock_instance.name, - mock_new_top_img, - mock_att_info['controller_path'], - mock_att_info['controller_slot']) - else: - self.assertFalse(self._vmutils.detach_vm_disk.called) - self.assertFalse(self._vmutils.attach_drive.called) - - if merge_latest: - self._vmutils.update_vm_disk_path.assert_called_once_with( - mock_attached_disk_path, - mock_new_top_img, - is_physical=False) - else: - self.assertFalse(self._vmutils.update_vm_disk_path.called) - - self.assertEqual(os.path.basename(mock_new_top_img), - conn_info['data']['name']) - - @ddt.data({'merge_latest': True}, - {'merge_latest': False}) - @ddt.unpack - @mock.patch.object(volumeops.SMBFSVolumeDriver, - '_get_higher_image_from_chain') - def test_do_delete_snapshot(self, mock_get_higher_img, - merge_latest=False): - mock_attached_disk_path = 'fake-attached-disk.vhdx' - mock_file_to_merge = (mock_attached_disk_path - if merge_latest - else 'fake-file-to-merge.vhdx') - - self._vhdutils.get_vhd_parent_path.return_value = ( - mock.sentinel.vhd_parent_path) - mock_get_higher_img.return_value = mock.sentinel.higher_img - - exp_new_top_img = (mock.sentinel.vhd_parent_path if merge_latest - else mock_attached_disk_path) - - new_top_img = self._volume_driver._do_delete_snapshot( - mock_attached_disk_path, - mock_file_to_merge) - - self.assertEqual(exp_new_top_img, new_top_img) - - self._vhdutils.get_vhd_parent_path.assert_called_once_with( - mock_file_to_merge) - self._vhdutils.merge_vhd.assert_called_once_with( - mock_file_to_merge, delete_merged_image=False) - - if not merge_latest: - mock_get_higher_img.assert_called_once_with( - mock_file_to_merge, - mock_attached_disk_path) - self._vhdutils.reconnect_parent_vhd.assert_called_once_with( - mock.sentinel.higher_img, - mock.sentinel.vhd_parent_path) - else: - mock_get_higher_img.assert_not_called() - self._vhdutils.reconnect_parent_vhd.assert_not_called() - - @ddt.data(2, 3, 4, 5) - def test_get_higher_image(self, vhd_idx): - vhd_chain_length = 5 - vhd_chain = ['vhd-%s.vhdx' % idx - for idx in range(vhd_chain_length)][::-1] - vhd_path = 'vhd-%s.vhdx' % vhd_idx - - self._vhdutils.get_vhd_parent_path.side_effect = ( - vhd_chain[1:] + [None]) - - if vhd_idx in range(vhd_chain_length - 1): - exp_higher_vhd_path = 'vhd-%s.vhdx' % (vhd_idx + 1) - result = self._volume_driver._get_higher_image_from_chain( - vhd_path, - vhd_chain[0]) - - self.assertEqual(exp_higher_vhd_path, result) - - self._vhdutils.get_vhd_parent_path.assert_has_calls( - [mock.call(path) - for path in vhd_chain[:vhd_chain_length - vhd_idx - 1]]) - else: - self.assertRaises( - exception.ImageNotFound, - self._volume_driver._get_higher_image_from_chain, - vhd_path, - vhd_chain[0]) diff --git a/compute_hyperv/tests/unit/utils/__init__.py b/compute_hyperv/tests/unit/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/compute_hyperv/tests/unit/utils/test_placement.py b/compute_hyperv/tests/unit/utils/test_placement.py deleted file mode 100644 index 8af74d10..00000000 --- a/compute_hyperv/tests/unit/utils/test_placement.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt -from nova import context -from nova import exception -from nova import objects -from nova.tests.unit import fake_requests -from oslo_serialization import jsonutils - -from compute_hyperv.nova.utils import placement as placement -from compute_hyperv.tests import fake_instance -from compute_hyperv.tests.unit import test_base - - -@ddt.ddt -class PlacementUtilsTestCase(test_base.HyperVBaseTestCase): - _autospec_classes = [ - placement.report.SchedulerReportClient - ] - - _FAKE_PROVIDER = 'fdb5c6d0-e0e9-4411-b952-fb05d6133718' - _FAKE_RESOURCES = {'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 1} - _FAKE_ALLOCATIONS = { - _FAKE_PROVIDER: {'resources': _FAKE_RESOURCES} - } - - def setUp(self): - super(PlacementUtilsTestCase, self).setUp() - self.context = context.get_admin_context() - self.instance = fake_instance.fake_instance_obj(self.context) - - self.placement = placement.PlacementUtils() - self.client = self.placement.reportclient - - @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') - @mock.patch.object(placement.PlacementUtils, 'move_allocations') - def test_move_compute_node_allocations(self, mock_move_alloc, - mock_get_comp_node): - mock_get_comp_node.side_effect = [ - mock.Mock(uuid=uuid) for uuid in [mock.sentinel.old_host_uuid, - mock.sentinel.new_host_uuid]] - - self.placement.move_compute_node_allocations( - self.context, self.instance, mock.sentinel.old_host, - mock.sentinel.new_host, - merge_existing=mock.sentinel.merge_existing) - - mock_move_alloc.assert_called_once_with( - self.context, self.instance.uuid, - mock.sentinel.old_host_uuid, - mock.sentinel.new_host_uuid, - merge_existing=mock.sentinel.merge_existing) - mock_get_comp_node.assert_has_calls( - mock.call(self.context, host, host) for host in - [mock.sentinel.old_host, mock.sentinel.new_host]) - - @ddt.data({}, # provider did not change - {'old_rp': 'fake_rp'}) # provider not included in allocations - @ddt.unpack - @mock.patch.object(placement.PlacementUtils, '_get_allocs_for_consumer') - @mock.patch.object(placement.PlacementUtils, '_put_allocs') - def test_move_allocations_noop(self, mock_put, mock_get_allocs, - old_rp=_FAKE_PROVIDER, - new_rp=_FAKE_PROVIDER): - mock_get_allocs.return_value = {'allocations': self._FAKE_ALLOCATIONS} - - self.placement.move_allocations( - self.context, mock.sentinel.consumer, old_rp, new_rp) - - mock_get_allocs.assert_called_once_with( - self.context, mock.sentinel.consumer, - version=placement.CONSUMER_GENERATION_VERSION) - mock_put.assert_not_called() - - @ddt.data(True, False) - @mock.patch.object(placement.PlacementUtils, '_get_allocs_for_consumer') - @mock.patch.object(placement.PlacementUtils, '_put_allocs') - def test_merge_allocations(self, merge_existing, - mock_put, mock_get_allocs): - old_rp = self._FAKE_PROVIDER - new_rp = 'new_rp' - allocs = self._FAKE_ALLOCATIONS.copy() - allocs[new_rp] = {'resources': self._FAKE_RESOURCES.copy()} - - mock_get_allocs.return_value = {'allocations': allocs} - - if merge_existing: - exp_resources = {'VCPU': 2, 'MEMORY_MB': 1024, 'DISK_GB': 2} - else: - exp_resources = self._FAKE_RESOURCES - exp_allocs = {new_rp: {'resources': exp_resources}} - - self.placement.move_allocations( - self.context, mock.sentinel.consumer, old_rp, new_rp, - merge_existing=merge_existing) - - mock_put.assert_called_once_with( - self.context, mock.sentinel.consumer, - {'allocations': exp_allocs}, - version=placement.CONSUMER_GENERATION_VERSION) - - @ddt.data({}, # no errors - {'status_code': 409, - 'errors': [{'code': 'placement.concurrent_update'}], - 'expected_exc': placement.report.Retry}, - {'status_code': 500, - 'expected_exc': exception.AllocationUpdateFailed}) - @ddt.unpack - def test_put_allocs(self, status_code=204, expected_exc=None, errors=None): - response = fake_requests.FakeResponse( - status_code, - content=jsonutils.dumps({'errors': errors})) - self.client.put.return_value = response - - args = (self.context, mock.sentinel.consumer, mock.sentinel.allocs, - mock.sentinel.version) - if expected_exc: - self.assertRaises(expected_exc, self.placement._put_allocs, *args) - else: - self.placement._put_allocs(*args) - - self.client.put.assert_called_once_with( - '/allocations/%s' % mock.sentinel.consumer, - mock.sentinel.allocs, - version=mock.sentinel.version, - global_request_id=self.context.global_id) - - def test_get_allocs(self): - ret_val = self.placement._get_allocs_for_consumer( - self.context, mock.sentinel.consumer, mock.sentinel.version) - exp_val = self.client.get.return_value.json.return_value - self.assertEqual(exp_val, ret_val) - - self.client.get.assert_called_once_with( - '/allocations/%s' % mock.sentinel.consumer, - version=mock.sentinel.version, - global_request_id=self.context.global_id) - - def test_get_allocs_missing(self): - self.client.get.return_value = fake_requests.FakeResponse(500) - self.assertRaises( - exception.ConsumerAllocationRetrievalFailed, - self.placement._get_allocs_for_consumer, - self.context, mock.sentinel.consumer, mock.sentinel.version) - - def test_merge_resources(self): - resources = { - 'VCPU': 1, 'MEMORY_MB': 1024, - } - new_resources = { - 'VCPU': 2, 'MEMORY_MB': 2048, 'CUSTOM_FOO': 1, - } - doubled = { - 'VCPU': 3, 'MEMORY_MB': 3072, 'CUSTOM_FOO': 1, - } - saved_orig = dict(resources) - self.placement.merge_resources(resources, new_resources) - # Check to see that we've doubled our resources - self.assertEqual(doubled, resources) - # and then removed those doubled resources - self.placement.merge_resources(resources, saved_orig, -1) - self.assertEqual(new_resources, resources) - - def test_merge_resources_zero(self): - # Test 0 value resources are ignored. - resources = { - 'VCPU': 1, 'MEMORY_MB': 1024, - } - new_resources = { - 'VCPU': 2, 'MEMORY_MB': 2048, 'DISK_GB': 0, - } - # The result should not include the zero valued resource. - doubled = { - 'VCPU': 3, 'MEMORY_MB': 3072, - } - self.placement.merge_resources(resources, new_resources) - self.assertEqual(doubled, resources) - - def test_merge_resources_original_zeroes(self): - # Confirm that merging that result in a zero in the original - # excludes the zeroed resource class. - resources = { - 'VCPU': 3, 'MEMORY_MB': 1023, 'DISK_GB': 1, - } - new_resources = { - 'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 1, - } - merged = { - 'VCPU': 2, 'MEMORY_MB': 511, - } - self.placement.merge_resources(resources, new_resources, -1) - self.assertEqual(merged, resources) diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 43d29ed4..00000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -sphinx>=2.0.0,!=2.1.0 # BSD -openstackdocstheme>=2.2.1 # Apache-2.0 - -# releasenotes -reno>=3.1.0 # Apache-2.0 - -oslo.config>=8.6.0 # Apache-2.0 diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 89d407eb..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'oslo_config.sphinxconfiggen', - 'oslo_config.sphinxext', - #'sphinx.ext.intersphinx', - 'openstackdocstheme' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -config_generator_config_file = '../../etc/compute-hyperv-config-generator.conf' -sample_config_basename = '_static/compute-hyperv' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'compute-hyperv' -copyright = '2013, OpenStack Foundation' - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/compute-hyperv' -openstackdocs_auto_name = False -openstackdocs_bug_project = 'compute-hyperv' -openstackdocs_bug_tag = '' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -html_static_path = ['_static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/configuration/config.rst b/doc/source/configuration/config.rst deleted file mode 100644 index 490bc48e..00000000 --- a/doc/source/configuration/config.rst +++ /dev/null @@ -1,16 +0,0 @@ -=============================== -Configuration options reference -=============================== - -The following is an overview of all available configuration options in Nova -and compute-hyperv. -For a sample configuration file, refer to :ref:`config_sample`. - -.. show-options:: - - compute_hyperv - os_win - nova.conf - oslo.log - oslo.messaging - oslo.concurrency diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index 80332ef4..00000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,234 +0,0 @@ -.. _config_index: - -============= -Configuration -============= - -In addition to the Nova config options, compute-hyperv has a few extra -configuration options. For a sample configuration file, refer to -:ref:`config_sample`. - - -Driver configuration --------------------- - -In order to use the compute-hyperv Nova driver, the following configuration -option will have to be set in the ``nova.conf`` file: - -.. code-block:: ini - - [DEFAULT] - compute_driver = compute_hyperv.driver.HyperVDriver - -And for Hyper-V Clusters, the following: - -.. code-block:: ini - - [DEFAULT] - compute_driver = compute_hyperv.cluster.driver.HyperVClusterDriver - instances_path = path\to\cluster\wide\storage\location - sync_power_state_interval = -1 - - [workarounds] - handle_virt_lifecycle_events = False - -By default, the OpenStack Hyper-V installer will configure the ``nova-compute`` -service to use the ``compute_hyperv.driver.HyperVDriver`` driver. - - -Storage configuration ---------------------- - -When spawning instances, ``nova-compute`` will create the VM related files ( -VM configuration file, ephemerals, configdrive, console.log, etc.) in the -location specified by the ``instances_path`` configuration option, even if -the instance is volume-backed. - -It is not recommended for Nova and Cinder to use the same storage location, as -that can create scheduling and disk overcommitment issues. - - -Nova instance files location -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default, the OpenStack Hyper-V installer will configure ``nova-compute`` to -use the following path as the ``instances_path``: - -.. code-block:: ini - - [DEFAULT] - instances_path = C:\OpenStack\Instances - -``instances_path`` can be set to an SMB share, mounted or unmounted: - -.. code-block:: ini - - [DEFAULT] - # in this case, X is a persistently mounted SMB share. - instances_path = X:\OpenStack\Instances - - # or - instances_path = \\SMB_SERVER\share_name\OpenStack\Instances - -Alternatively, CSVs can be used: - -.. code-block:: ini - - [DEFAULT] - instances_path = C:\ClusterStorage\Volume1\OpenStack\Instances - -When the compute hosts are using different CSVs, Nova must be configured not -to delete unused images since its image caching mechanism can't properly track -the image file usage in this case. - -.. code-block:: ini - - [image_cache] - remove_unused_base_images = False - - -Block Storage (Cinder) configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes Nova configuration options that handle the way in which -Cinder volumes are consumed. - -When having multiple paths connecting the host to the storage backend, -make sure to enable the following config option: - -.. code-block:: ini - - [hyperv] - use_multipath_io = True - -This will ensure that the available paths are actually leveraged. Also, before -attempting any volume connection, it will ensure that the MPIO service is -enabled and that iSCSI and Fibre Channel block devices are claimed by MPIO. -SMB and RBD volumes are not affected by this option. - -In some cases, Nova may fail to attach volumes due to transient connectivity -issues. The following options specify how many and how often retries should be -performed. - -.. code-block:: ini - - [hyperv] - # Those are the default values. - volume_attach_retry_count = 10 - volume_attach_retry_interval = 5 - - # The following options only apply to disk scan retries. - mounted_disk_query_retry_count = 10 - mounted_disk_query_retry_interval = 5 - -When having one or more hardware iSCSI initiators, you may use the following -config option, explicitly telling Nova which iSCSI initiator to use: - -.. code-block:: ini - - [hyperv] - iscsi_initiator_list = PCI\VEN_1077&DEV_2031&SUBSYS_17E8103C&REV_02\\4&257301f0&0&0010_0, PCI\VEN_1077&DEV_2031&SUBSYS_17E8103C&REV_02\4&257301f0&0&0010_1 - -The list of available initiators may be retrieved using: - -.. code-block:: powershell - - Get-InitiatorPort - -If no iSCSI initiator is specified, the MS iSCSI Initiator service will only -pick one of the available ones when establishing iSCSI sessions. - - -Live migration configuration ----------------------------- - -For live migrating virtual machines to hosts with different CPU features the -following configuration option must be set in the compute node's ``nova.conf`` -file: - -.. code-block:: ini - - [hyperv] - limit_cpu_features = True - -Keep in mind that changing this configuration option will not affect the -instances that are already spawned, meaning that instances spawned with this -flag set to False will not be able to live migrate to hosts with different CPU -features, and that they will have to be shut down and rebuilt, or have the -setting manually set. - - -.. _pci_devices_config: - -Whitelisting PCI devices ------------------------- - -After the assignable PCI devices have been prepared for Hyper-V -(:ref:`pci_devices_setup`), the next step is whitelist them in the compute -node's ``nova.conf``. - -.. code-block:: ini - - [pci] - # this is a list of dictionaries, more dictionaries can be added. - passthrough_whitelist = [{"vendor_id": "", "product_id": ""}] - -The ``vendor_id`` and ``product_id`` necessary for the ``passthrough_whitelist`` -can be obtained from assignable PCI device's ``InstanceId``: - -.. code-block:: powershell - - Get-VMHostAssignableDevice - -The ``InstanceId`` should have the following format: - -.. code-block:: none - - PCIP\VEN_&DEV_ - -The ```` and ```` can be extracted and used in the -``nova.conf`` file. After the configuration file has been changed, the -``nova-compute`` service will have to be restarted. - -Afterwards, the ``nova-api`` and ``nova-scheduler`` services will have to be -configured. For this, check the `nova PCI passthrough configuration guide`__. - -__ https://docs.openstack.org/nova/queens/admin/pci-passthrough.html - - -Distributed locking configuration ---------------------------------- - -In order to avoid race conditions, our driver relies on distributed locks. A -distributed lock backend such as etcd, mysql or a file share will have to be -configured. - -The following configuration will use etcd 3 as a lock backend: - -.. code-block:: ini - - [coordination] - backend_url = etcd3+http://etcd_address:2379 - -.. note:: - - The ``etcd3gw`` python package is required when using etcd 3. This does not - apply to the v2 etcd API, which may be requested through - ``etcd://etcd_address:2379``. - -In order to use a file share, set the following: - -.. code-block:: ini - - [coordination] - backend_url = file:////share_addr/share_name - - -Configuration options ---------------------- - -.. toctree:: - :maxdepth: 1 - - config - sample_config diff --git a/doc/source/configuration/sample_config.rst b/doc/source/configuration/sample_config.rst deleted file mode 100644 index 8053ee6e..00000000 --- a/doc/source/configuration/sample_config.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. _config_sample: - -==================== -Configuration sample -==================== - -The following is a sample compute-hyperv configuration for adaptation and -use. - -The sample configuration can also be viewed in :download:`file form -`. - -Config options that are specific to the Hyper-V Nova driver can be found in -the ``[hyperv]`` config group section. - -.. important:: - - The sample configuration file is auto-generated from compute-hyperv when - this documentation is built. You must ensure your version of - compute-hyperv matches the version of this documentation. - -.. literalinclude:: /_static/compute-hyperv.conf.sample diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index ed8c954d..00000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,92 +0,0 @@ -============ -Contributing -============ - -For general information on contributing to OpenStack, please check out the -`contributor guide `_ to get started. -It covers all the basics that are common to all OpenStack projects: the accounts -you need, the basics of interacting with our Gerrit review system, how we -communicate as a community, etc. - -Below will cover the more project specific information you need to get started -with compute-hyperv. - -Communication -~~~~~~~~~~~~~ -.. This would be a good place to put the channel you chat in as a project; when/ - where your meeting is, the tags you prepend to your ML threads, etc. - -We recommend using the standard communication channels, such as the OpenStack -mailing list or IRC channels. The official IRC channel (#openstack-hyper-v) is -not archived at the moment, so we recommend using #openstack-dev on OFTC. - -Please include one of the following tags when using the OpenStack mailing -list: - -* winstackers -* windows -* hyper-v - -Feel free to reach out to the Winstackers PTL or other core members. - -Contacting the Core Team -~~~~~~~~~~~~~~~~~~~~~~~~ -.. This section should list the core team, their irc nicks, emails, timezones - etc. If all this info is maintained elsewhere (i.e. a wiki), you can link to - that instead of enumerating everyone here. - -The Winstackers core team is composed of: - -* Lucian Petrut (lpetrut) -* Claudiu Belu (claudiub) -* Alessandro Pilotti (apilotti) - -New Feature Planning -~~~~~~~~~~~~~~~~~~~~ -.. This section is for talking about the process to get a new feature in. Some - projects use blueprints, some want specs, some want both! Some projects - stick to a strict schedule when selecting what new features will be reviewed - for a release. - -If you want to propose a new feature, we recommend `filing a blueprint -`__ and then contacting the core team. - -Once the feature is approved, please propose the patches on Gerrit, following -the Openstack contributor guide. - -Task Tracking -~~~~~~~~~~~~~ -.. This section is about where you track tasks- launchpad? storyboard? is there - more than one launchpad project? what's the name of the project group in - storyboard? - -We track our tasks in `Launchpad `__. - -Reporting a Bug -~~~~~~~~~~~~~~~ -.. Pretty self explanatory section, link directly to where people should report - bugs for your project. - -You found an issue and want to make sure we are aware of it? You can do so on -`Launchpad `__. -More info about Launchpad usage can be found on `OpenStack docs page -`_. - -Getting Your Patch Merged -~~~~~~~~~~~~~~~~~~~~~~~~~ -.. This section should have info about what it takes to get something merged. Do - you require one or two +2's before +W? Do some of your repos require unit - test changes with all patches? etc. - -Changes proposed to compute-hyperv generally require two ``Code-Review +2`` votes from -compute-hyperv core reviewers before merging. In case of trivial patches and urgent -bug fixes, this rule is sometimes ignored. - -Project Team Lead Duties -~~~~~~~~~~~~~~~~~~~~~~~~ -.. this section is where you can put PTL specific duties not already listed in - the common PTL guide (linked below), or if you already have them written - up elsewhere you can link to that doc here. - -All common PTL duties are enumerated in the `PTL guide -`_. diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index e0af15f9..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -============================================== -Welcome to the documentation of compute_hyperv -============================================== - -Starting with Folsom, Hyper-V can be used as a compute node within OpenStack -deployments. - -This documentation contains information on how to setup and configure Hyper-V -hosts as OpenStack compute nodes, more specifically: - -* Supported OS versions -* Requirements and host configurations -* How to install the necessary OpenStack services -* ``nova-compute`` configuration options -* Troubleshooting and debugging tips & tricks - -For release notes, please check out the following `page`__. - -__ http://compute-hyperv.readthedocs.io/projects/releasenotes - -Contents: - -.. toctree:: - :maxdepth: 2 - - readme - contributing - install/index - troubleshooting/index - configuration/index - usage/index - -* :ref:`search` diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 5ec30fae..00000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -================== -Installation guide -================== - -The compute-hyperv project offers two Nova Hyper-V drivers, providing -additional features and bug fixes compared to the in-tree Nova -Hyper-V driver: - -* ``compute_hyperv.driver.HyperVDriver`` -* ``compute_hyperv.cluster.driver.HyperVClusterDriver`` - -These drivers receive the same degree of testing (if not even more) as the -upstream driver, being covered by a range of official OpenStack Continuous -Integration (CI) systems. - -Most production Hyper-V based OpenStack deployments use the compute-hyperv -drivers. - -The ``HyperVClusterDriver`` can be used on Hyper-V Cluster compute nodes and -will create and manage highly available clustered virtual machines. - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Tutorial -`_. - - -.. toctree:: - :maxdepth: 2 - - prerequisites.rst - install.rst - next-steps.rst - verify.rst diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst deleted file mode 100644 index 97ddc7b5..00000000 --- a/doc/source/install/install.rst +++ /dev/null @@ -1,63 +0,0 @@ -.. _install: - -Install -~~~~~~~ - -This section describes how to install a Hyper-V nova compute node into an -OpenStack deployment. For details about configuration, refer to -:ref:`config_index`. - -This section assumes that you already have a working OpenStack environment. - -The easiest way to install and configure the ``nova-compute`` service is to use -an MSI, which can be freely downloaded from: -https://cloudbase.it/openstack-hyperv-driver/ - -The MSI can optionally include the installation and / or configuration of: - -* Neutron Hyper-V Agent -* Ceilometer Polling Agent. -* Windows Services for the mentioned agents. -* Live migration feature (if the compute node is joined in an AD). -* Free RDP -* iSCSI Initiator - -MSIs can be installed normally through its GUI, or can be installed in an -unattended mode (useful for automation). In order to do so, the following -command has to be executed: - -.. code-block:: bat - - msiexec /i \path\to\the\HyperVNovaCompute.msi /qn /l*v log.txt - -The command above will install the given MSI in the quiet, no UI mode, and -will output its verbose logs into the given ``log.txt`` file. Additional -key-value arguments can be given to the MSI for configuration. Some of the -configurations are: - -* ADDLOCAL: Comma separated list of features to install. Acceptable values: - ``HyperVNovaCompute,NeutronHyperVAgent,iSCSISWInitiator,FreeRDP`` -* INSTALLDIR: The location where the OpenStack services and their - configuration files are installed. By default, they are installed in: - ``%ProgramFiles%\Cloudbase Solutions\OpenStack\Nova`` -* SKIPNOVACONF: Installs the MSI without doing any of the other actions: - creating configuration files, services, vSwitches, etc. - -Example: - -.. code-block:: bat - - msiexec /i HyperVNovaCompute.msi /qn /l*v log.txt ` - ADDLOCAL="HyperVNovaCompute,NeutronHyperVAgent,iSCSISWInitiator,FreeRDP" - -After installing the OpenStack services on the Hyper-V compute node, check that -they are up and running: - -.. code-block:: powershell - - Get-Service nova-compute - Get-Service neutron-* - Get-Service ceilometer-* # if the Ceilometer Polling Agent has been installed. - -All the listed services must have the ``Running`` status. If not, refer to the -:ref:`troubleshooting`. diff --git a/doc/source/install/next-steps.rst b/doc/source/install/next-steps.rst deleted file mode 100644 index 0913a332..00000000 --- a/doc/source/install/next-steps.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the ``nova-compute`` service -installed and configured with the compute_hyperv driver. - -If the OpenStack services are Running on the Hyper-V compute node, make sure -that they're reporting to the OpenStack controller and that they're alive by -running the following: - -.. code-block:: bash - - neutron agent-list - nova service-list - -The output should contain the Hyper-V host's ``nova-compute`` service and -Neutron L2 agent as alive / running. - -Starting with Ocata, Nova cells became mandatory. Make sure that the newly -added Hyper-V compute node is mapped into a Nova cell, otherwise Nova will not -build any instances on it. In small deployments, two cells are enough: -``cell0`` and ``cell1``. ``cell0`` is a special cell, instances that are never -scheduled are relegated to the ``cell0`` database, which is effectively a -graveyard of instances that failed to start. All successful/running instances -are stored in ``cell1``. - -You can check your Nova cells by running this on the Nova Controller: - -.. code-block:: bash - - nova-manage cell_v2 list_cells - -You should at least have 2 cells listed (``cell0`` and ``cell1``). If they're -not, or only ``cell0`` exists, you can simply run: - -.. code-block:: bash - - nova-manage cell_v2 simple_cell_setup - -If you have the 2 cells, in order to map the newly created compute nodes to -``cell1``, run: - -.. code-block:: bash - - nova-manage cell_v2 discover_hosts - nova-manage cell_v2 list_hosts - -The ``list_hosts`` command should output a table with your compute nodes -mapped to the Nova cell. For more details on Nova cells, their benefits and -how to properly use them, check the `Nova cells documentation`__. - -__ https://docs.openstack.org/nova/latest/user/cells.html - -If Neutron Hyper-V Agent has been chosen as an L2 agent, make sure that the -Neutron Server meets the following requirements: - -* ``networking-hyperv`` installed. To check if ``networking-hyperv`` is - installed, run the following: - -.. code-block:: bash - - pip freeze | grep networking-hyperv - - If there is no output, it can be installed by running the command: - -.. code-block:: bash - - pip install networking-hyperv==VERSION - - The ``VERSION`` is dependent on your OpenStack deployment version. For - example, for Queens, the ``VERSION`` is 6.0.0. For other release names and - versions, you can look here: - https://github.com/openstack/networking-hyperv/releases - -* The Neutron Server has been configured to use the ``hyperv`` mechanism - driver. The configuration option can be found in - ``/etc/neutron/plugins/ml2/ml2_conf.ini``: - -.. code-block:: ini - - [ml2] - mechanism_drivers = openvswitch,hyperv - -If the configuration file has been modified, or ``networking-hyperv`` has been -installed, the Neutron Server service will have to be restarted. - -Additionally, keep in mind that the Neutron Hyper-V Agent only supports the -following network types: local, flat, VLAN. Ports with any other network -type will result in a PortBindingFailure exception. diff --git a/doc/source/install/prerequisites.rst b/doc/source/install/prerequisites.rst deleted file mode 100644 index a62f9a44..00000000 --- a/doc/source/install/prerequisites.rst +++ /dev/null @@ -1,340 +0,0 @@ -============= -Prerequisites -============= - -Starting with Folsom, Hyper-V can be used as a compute node within OpenStack -deployments. - -The Hyper-V versions that are currently supported are: - -* (deprecated) Windows / Hyper-V Server 2012 -* Windows / Hyper-V Server 2012 R2 -* Windows / Hyper-V Server 2016 - -Newer Hyper-V versions come with an extended list of features, and can offer -better overall performance. Thus, Windows / Hyper-V Server 2016 is recommended -for the best experience. - - -Hardware requirements ---------------------- - -Although this document does not provide a complete list of Hyper-V compatible -hardware, the following items are necessary: - -* 64-bit processor with Second Level Address Translation (SLAT). -* CPU support for VM Monitor Mode Extension (VT-c on Intel CPU's). -* Minimum of 4 GB memory. As virtual machines share memory with the Hyper-V - host, you will need to provide enough memory to handle the expected virtual - workload. -* Minimum 16-20 GB of disk space for the OS itself and updates. -* At least one NIC, but optimally two NICs: one connected to the management - network, and one connected to the guest data network. If a single NIC is - used, when creating the Hyper-V vSwitch, make sure the ``-AllowManagementOS`` - option is set to ``True``, otherwise you will lose connectivity to the host. - -The following items will need to be enabled in the system BIOS: - -* Virtualization Technology - may have a different label depending on - motherboard manufacturer. -* Hardware Enforced Data Execution Prevention. - -To check a host's Hyper-V compatibility, open up cmd or Powershell and run: - -.. code-block:: bat - - systeminfo - -The output will include the Hyper-V requirements and if the host meets them or -not. If all the requirements are met, the host is Hyper-V capable. - - -Storage considerations ----------------------- - -Instance files -~~~~~~~~~~~~~~ - -Nova will use a pre-configured directory for storing instance files such as: - -* instance boot images and ``ephemeral`` disk images -* instance config files (config drive image and Hyper-V files) -* instance console log -* cached Glance images -* snapshot files - -The following options are available for the instance directory: - -* Local disk. -* SMB shares. Make sure that they are persistent. -* Cluster Shared Volumes (``CSV``) - * Storage Spaces - * Storage Spaces Direct (``S2D``) - * SAN LUNs as underlying CSV storage - -.. note:: - - Ample storage may be required when using Nova "local" storage for the - instance virtual disk images (as opposed to booting from Cinder volumes). - -Compute nodes can be configured to use the same storage option. Doing so will -result in faster cold / live migration operations to other compute nodes using -the same storage, but there's a risk of disk overcommitment. Nova is not aware -of compute nodes sharing the same storage and because of this, the Nova -scheduler might pick a host it normally wouldn't. - -For example, hosts A and B are configured to use a 100 GB SMB share. Both -compute nodes will report as having 100 GB storage available. Nova has to -spawn 2 instances requiring 80 GB storage each. Normally, Nova would be able -to spawn only one instance, but both will spawn on different hosts, -overcommiting the disk by 60 GB. - - -Cinder volumes -~~~~~~~~~~~~~~ - -The Nova Hyper-V driver can attach Cinder volumes exposed through the -following protocols: - -* iSCSI -* Fibre Channel -* SMB - the volumes are stored as virtual disk images (e.g. VHD / VHDX) -* RBD - starting with Ceph 16 (Pacific) - -.. note:: - - The Nova Hyper-V Cluster driver only supports SMB backed volumes. The - reason is that the volumes need to be available on the destination - host side during an unexpected instance failover. - -Before configuring Nova, you should ensure that the Hyper-V compute nodes -can properly access the storage backend used by Cinder. - -The MSI installer can enable the Microsoft Software iSCSI initiator for you. -When using hardware iSCSI initiators or Fibre Channel, make sure that the HBAs -are properly configured and the drivers are up to date. - -Please consult your storage vendor documentation to see if there are any other -special requirements (e.g. additional software to be installed, such as iSCSI -DSMs - Device Specific Modules). - -Some Cinder backends require pre-configured information (specified via volume -types or Cinder Volume config file) about the hosts that are going to consume -the volumes (e.g. the operating system type), based on which the LUNs will be -created/exposed. The reason is that the supported SCSI command set may differ -based on the operating system. An incorrect LUN type may prevent Windows nodes -from accessing the volumes (although generic LUN types should be fine in most -cases). - -Multipath IO -"""""""""""" - -You may setup multiple paths between your Windows hosts and the storage -backends in order to provide increased throughput and fault tolerance. - -When using iSCSI or Fibre Channel, make sure to enable and configure the -MPIO service. MPIO is a service that manages available disk paths, performing -failover and load balancing based on pre-configured policies. It's extendable, -in the sense that Device Specific Modules may be imported. - -The MPIO service will ensure that LUNs accessible through multiple paths are -exposed by the OS as a single disk drive. - -.. warning:: - If multiple disk paths are available and the MPIO service is not - configured properly, the same LUN can be exposed as multiple disk drives - (one per available path). This must be addressed urgently as it can - potentially lead to data corruption. - -Run the following to enable the MPIO service: - -.. code-block:: powershell - - Enable-WindowsOptionalFeature –Online –FeatureName MultiPathIO - - # Ensure that the "mpio" service is running - Get-Service mpio - -Once you have enabled MPIO, make sure to configure it to automatically -claim volumes exposed by the desired storage backend. If needed, import -vendor provided DSMs. - -For more details about Windows MPIO, check the following `page`__. - -__ https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2008-R2-and-2008/ee619734(v=ws.10) - -SMB 3.0 and later also supports using multiple paths to a share (the UNC -path can be the same), leveraging ``SMB Direct`` and ``SMB Multichannel``. - -By default, all available paths will be used when accessing SMB shares. -You can configure constraints in order to choose which adapters should -be used when connecting to SMB shares (for example, to avoid using a -management network for SMB traffic). - -.. note:: - - SMB does not require or interact in any way with the MPIO service. - -For best performance, ``SMB Direct`` (RDMA) should also be used, if your -network cards support it. - -For more details about ``SMB Multichannel``, check the following -`blog post`__. - -__ https://blogs.technet.microsoft.com/josebda/2012/06/28/the-basics-of-smb-multichannel-a-feature-of-windows-server-2012-and-smb-3-0/ - - -NTP configuration ------------------ - -Network time services must be configured to ensure proper operation of the -OpenStack nodes. To set network time on your Windows host you must run the -following commands: - -.. code-block:: bat - - net stop w32time - w32tm /config /manualpeerlist:pool.ntp.org,0x8 /syncfromflags:MANUAL - net start w32time - -Keep in mind that the node will have to be time synchronized with the other -nodes of your OpenStack environment, so it is important to use the same NTP -server. Note that in case of an Active Directory environment, you may do this -only for the AD Domain Controller. - - -Live migration configuration ----------------------------- - -In order for the live migration feature to work on the Hyper-V compute nodes, -the following items are required: - -* A Windows domain controller with the Hyper-V compute nodes as domain members. -* The ``nova-compute`` service must run with domain credentials. You can set - the service credentials with: - -.. code-block:: bat - - sc.exe config openstack-compute obj="DOMAIN\username" password="password" - -`This guide`__ contains information on how to setup and configure live -migration on your Hyper-V compute nodes (authentication options, constrained -delegation, migration performance options, etc), and a few troubleshooting -tips. - -__ https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/manage/Use-live-migration-without-Failover-Clustering-to-move-a-virtual-machine - - -Hyper-V Cluster configuration ------------------------------ - -compute-hyperv also offers a driver for Hyper-V Cluster nodes, which will be -able to create and manage highly available virtual machines. For the Hyper-V -Cluster Driver to be usable, the Hyper-V Cluster nodes will have to be joined -to an Active Directory and a Microsoft Failover Cluster. The nodes in a -Hyper-V Cluster must be identical. - -In order to avoid race conditions, our driver relies on distributed locks. A -distributed lock backend such as etcd, mysql or a file share will have to be -configured. - -For more details about available distributed lock backends, check the -`list of drivers supported by tooz`__. - -__ https://docs.openstack.org/tooz/latest/user/drivers.html - - -Guarded Host configuration (Shielded VMs) ------------------------------------------ - -Shielded VMs is a new feature introduced in Windows / Hyper-V Server 2016 and -can be used in order to have highly secure virtual machines that cannot be -read from, tampered with, or inspected by malware, or even malicious -administrators. - -In order for a Hyper-V compute node to be able to spawn such VMs, it must be -configured as a Guarded Host. - -For more information on how to configure your Active Directory, Host Guardian -Service, and compute node as a Guarded Host, you can read `this article`__. - -__ https://cloudbase.it/hyperv-shielded-vms-part-1/ - - -.. _numa_setup: - -NUMA spanning configuration ---------------------------- - -Non-Uniform Memory Access (NUMA) is a computer system architecture that groups -processors and memory in NUMA nodes. Processor threads accessing data in the -same NUMA cell have lower memory access latencies and better overall -performance. Some applications are NUMA-aware, taking advantage of NUMA -performance optimizations. - -Windows / Hyper-V Server 2012 introduced support for Virtual NUMA (vNUMA), -which can be exposed to the VMs, allowing them to benefit from the NUMA -performance optimizations. - -By default, when Hyper-V starts a VM, it will try to fit all of its memory in -a single NUMA node, but it doesn't fit in only one, it will be spanned across -multiple NUMA nodes. This is called NUMA spanning, and it is enabled by -default. This allows Hyper-V to easily utilize the host's memory for VMs. - -NUMA spanning can be disabled and VMs can be configured to span a specific -number of NUMA nodes (including 1), and have that NUMA topology exposed to -the guest. Keep in mind that if a VM's vNUMA topology doesn't fit in the -host's available NUMA topology, it won't be able to start, and as a side -effect, less memory can be utilized for VMs. - -If a compute node only has 1 NUMA node, disabling NUMA spanning will have no -effect. To check how many NUMA node a host has, run the following powershell -command: - -.. code-block:: powershell - - Get-VMHostNumaNode - -The output will contain a list of NUMA nodes, their processors, total memory, -and used memory. - -To disable NUMA spanning, run the following powershell commands: - -.. code-block:: powershell - - Set-VMHost -NumaSpanningEnabled $false - Restart-Service vmms - -In order for the changes to take effect, the Hyper-V Virtual Machine Management -service (vmms) and the Hyper-V VMs have to be restarted. - -For more details on vNUMA, you can read the `following documentation`__. - -__ https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/dn282282(v=ws.11) - - -.. _pci_devices_setup: - -PCI passthrough host configuration ----------------------------------- - -Starting with Windows / Hyper-V Server 2016, PCI devices can be directly -assigned to Hyper-V VMs. - -In order to benefit from this feature, the host must support SR-IOV and -have assignable PCI devices. This can easily be checked by running the -following in powershell: - -.. code-block:: powershell - - Start-BitsTransfer https://raw.githubusercontent.com/Microsoft/Virtualization-Documentation/master/hyperv-samples/benarm-powershell/DDA/survey-dda.ps1 - .\survey-dda.ps1 - -The script above will output if the host supports SR-IOV, a detailed list -of PCI devices and if they're assignable or not. - -If all the conditions are met, the desired devices will have to be prepared to -be assigned to VMs. The `following article`__ contains a step-by-step guide on -how to prepare them and how to restore the configurations if needed. - -__ https://blogs.technet.microsoft.com/heyscriptingguy/2016/07/14/passing-through-devices-to-hyper-v-vms-by-using-discrete-device-assignment/ diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 21275d2b..00000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify that instances can be created on the Hyper-V compute node through -nova. If spawning fails, check the nova compute log file on the Hyper-V -compute node for relevant information (by default, it can be found in -``C:\OpenStack\Log\``). Additionally, setting the ``debug`` configuration -option in ``nova.conf`` will help troubleshoot the issue. - -If there is no relevant information in the compute node's logs, check the -Nova controller's logs. diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index a6210d3d..00000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/doc/source/troubleshooting/index.rst b/doc/source/troubleshooting/index.rst deleted file mode 100644 index 086671f1..00000000 --- a/doc/source/troubleshooting/index.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. _troubleshooting: - -===================== -Troubleshooting guide -===================== - -This section contains a few tips and tricks which can help you troubleshoot -and solve your Hyper-V compute node's potential issues. - - -OpenStack Services not running ------------------------------- - -You can check if the OpenStack services are up by running: - -.. code-block:: powershell - - Get-Service nova-compute - Get-Service neutron-* - -All the listed services must have the ``Running`` status. If not, check their -logs, which can typically be found in ``C:\OpenStack\Log\``. If there are no -logs, try to run the services manually. To see how to run ``nova-compute`` -manually, run the following command: - -.. code-block:: powershell - - sc.exe qc nova-compute - -The output will contain the ``BINARY_PATH_NAME`` with the service's command. -The command will contain the path to the ``nova-compute.exe`` executable and -its configuration file path. Edit the configuration file and add the -following: - -.. code-block:: ini - - [DEFAULT] - debug = True - use_stderr = True - -This will help troubleshoot the service's issues. Next, run ``nova-compute`` -in PowerShell manually: - -.. code-block:: powershell - - &"C:\Program Files\Cloudbase Solutions\OpenStack\Nova\Python27\Scripts\nova-compute.exe" ` - --config-file "C:\Program Files\Cloudbase Solutions\OpenStack\Nova\etc\nova.conf" - -The reason why the service could not be started should be visible in the -output. - - -Live migration --------------- - -`This guide`__ offers a few tips for troubleshooting live migration issues. - -If live migration fails because the nodes have incompatible hardware, refer to -refer to :ref:`config_index`. - -__ https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/manage/Use-live-migration-without-Failover-Clustering-to-move-a-virtual-machine - - -How to restart a service on Hyper-V ------------------------------------ - -Restarting a service on OpenStack can easily be done through Powershell: - -.. code-block:: powershell - - Restart-Service service-name - -or through cmd: - -.. code-block:: bat - - net stop service_name && net start service_name - -For example, the following command will restart the iSCSI initiator service: - -.. code-block:: powershell - - Restart-Service msiscsi diff --git a/doc/source/usage/index.rst b/doc/source/usage/index.rst deleted file mode 100644 index 798ac351..00000000 --- a/doc/source/usage/index.rst +++ /dev/null @@ -1,491 +0,0 @@ -=========== -Usage guide -=========== - -This section contains information on how to create Glance images for Hyper-V -compute nodes and how to use various Hyper-V features through image metadata -properties and Nova flavor extra specs. - - -Prepare images for use with Hyper-V ------------------------------------ - -Hyper-V currently supports only the VHD and VHDx file formats for virtual -machines. - -OpenStack Hyper-V images should have the following items installed: - -* cloud-init (Linux) or cloudbase-init (Windows) -* Linux Integration Services (on Linux type OSes) - -Images can be uploaded to `glance` using the `openstack` client: - -.. code-block:: bash - - openstack image create --name "VM_IMAGE_NAME" --property hypervisor_type=hyperv --public \ - --container-format bare --disk-format vhd --file /path/to/image - -.. note:: - - VHD and VHDx files sizes can be bigger than their maximum internal size, - as such you need to boot instances using a flavor with a slightly bigger - disk size than the internal size of the disk files. - - -Generation 2 VM images -~~~~~~~~~~~~~~~~~~~~~~ - -Windows / Hyper-V Server 2012 R2 introduced a feature called -**Generation 2 VMs**, which adds the support for Secure Boot, UEFI, -reduced boot times, etc. - -Starting with Kilo, the Hyper-V Driver supports Generation 2 VMs. - -Check the `original spec`__ for more details on its features, how to prepare -and create the glance images, and restrictions. - -Regarding restrictions, the original spec mentions that RemoteFX is not -supported with Generation 2 VMs, but starting with Windows / -Hyper-V Server 2016, this is a supported usecase. - -.. important:: - - The images must be prepared for Generation 2 VMs before uploading to glance - (can be created and prepared in a Hyper-V Generation 2 VM). Generation 2 - VM images cannot be used in Generation 1 VMs and vice-versa. The instances - will spawn and will be in the ``Running`` state, but they will **not** be - usable. - -__ https://specs.openstack.org/openstack/nova-specs/specs/kilo/implemented/hyper-v-generation-2-vms.html - - -UEFI Secure Boot ----------------- - -Secure Boot is a mechanism that starts the bootloader only if the bootloader's -signature has maintained integrity, assuring that only approved components are -allowed to run. This mechanism is dependent on UEFI. - -As it requires UEFI, this feature is only available to Generation 2 VMs, and -the guest OS must be supported by Hyper-V. Newer Hyper-V versions supports -more OS types and versions, for example: - -* Windows / Hyper-V Server 2012 R2 supports only Windows guests -* Windows / Hyper-V Server 2016 supports Windows and Linux guests - -Check the following for a detailed list of supported -`Linux distributions and versions`__. - -The Hyper-V Driver supports this feature starting with OpenStack Liberty. - -.. important:: - The images must be prepared for Secure Boot before they're uploaded to - glance. For example, the VM on which the image is prepared must be a - Generation 2 VM with Secure Boot enabled. These images can be spawned - with Secure Boot enabled or disabled, while other images can only be - spawned with Secure Boot disabled. The instances will spawn and will be - in the ``Running`` state, but they will **not** be usable. - -UEFI Secure Boot instances are created by specifying the ``os_secure_boot`` -image metadata property, or the nova flavor extra spec ``os:secure_boot`` -(the flavor extra spec's value takes precedence). - -The ``os_secure_boot`` image metadata property acceptable values are: -``disabled, optional, required`` (``disabled`` by default). The ``optional`` -value means that the image is capable of Secure Boot, but it will require the -flavor extra spec ``os:secure_boot`` to be ``required`` in order to use this -feature. - -Additionally, the image metadata property ``os_type`` is mandatory when -enabling Secure Boot. Acceptable values: ``windows``, ``linux``. - -Finally, in deployments with compute nodes with different Hyper-V versions, -the ``hypervisor_version_requires`` image metadata property should be set -in order to ensure proper scheduling. The correct values are: - -* ``>=6.3`` for images targeting Windows / Hyper-V Server 2012 R2 or newer -* ``>=10.0`` for images targeting Windows / Hyper-V Server 2016 or newer - (Linux guests) - -Examples of how to create the glance image: - -.. code-block:: bsah - - glance image-create --property hypervisor_type=hyperv \ - --property hw_machine_type="hyperv-gen2" \ - --property hypervisor_version_requires=">=6.3" \ - --property os_secure_boot=required --os-type=windows \ - --name win-secure --disk-format vhd --container-format bare \ - --file path/to/windows.vhdx - - glance image-update --property os_secure_boot=optional - glance image-update --property hypervisor_version_requires=">=10.0" - glance image-update --property os_type=linux - - nova flavor-key set "os:secure_boot=required" - -__ https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/Supported-Linux-and-FreeBSD-virtual-machines-for-Hyper-V-on-Windows - - -Shielded VMs ------------- - -Introduced in Windows / Hyper-V Server 2016, shielded virtual machines are -Generation 2 VMs, with virtual TPMs, and encrypted using BitLocker (memory, -disks, VM state, video, etc.). These VMs can only run on healthy Guarded -Hosts. Because of this, the shielded VMs have better protection against -malware or even compromised administrators, as they cannot tamper with, -inspect, or steal data from these virtual machines. - -This feature has been introduced in OpenStack in Newton. - -In order to use this feature in OpenStack, the Hyper-V compute nodes must -be prepared and configured as a Guarded Host beforehand. Additionally, the -Shielded VM images must be prepared for this feature before uploading them -into Glance. - -For information on how to create a Host Guardian Service and Guarded Host -setup, and how to create a Shielded VM template for Glance, you can check -`this article`__. - -__ https://cloudbase.it/hyperv-shielded-vms-part-1/ - -Finally, after the Shielded VM template has been created, it will have to be -uploaded to Glance. After which, Shielded VM instances can be spawned through -Nova. You can read the `followup article`__ for details on how to do these -steps. - -__ https://cloudbase.it/hyper-v-shielded-vms-part-2/ - - -Setting Boot Order ------------------- - -Support for setting boot order for Hyper-V instances has been introduced in -Liberty, and it is only available for Generation 2 VMs. For Generation 1 VMs, -the spawned VM's boot order is changed only if the given image is an ISO, -booting from ISO first. - -The boot order can be specified when creating a new instance: - -.. code-block:: bash - - nova boot --flavor m1.tiny --nic --net-name=private --block-device \ - source=image,id=,dest=volume,size=2,shutdown=remove,bootindex=0 \ - my-new-vm - -For more details on block devices, including more details about setting the -the boot order, you can check the `block device mapping docs`__. - -__ https://docs.openstack.org/nova/stein/user/block-device-mapping.html#block-device-mapping-v2 - - -RemoteFX --------- - -RemoteFX allows you to virtualize your GPUs and share them with Hyper-V VMs by -adding virtual graphics devices to them, especially useful for enhancing -GPU-intensive applications (CUDA, OpenCL, etc.) and a richer RDP experience. - -We have added support for RemoteFX in OpenStack in Kilo. - -Check `this article`__ for more details on RemoteFX's prerequisites, how to -configure the host and the ``nova-compute`` service, guest OS requirements, -and how to spawn RemoteFX instances in OpenStack. - -RemoteFX can be enabled during spawn, or it can be enabled / disabled through -cold resize. - -__ https://cloudbase.it/openstack-remotefx/ - - -Hyper-V vNUMA instances ------------------------ - -Hyper-V instances can have a vNUMA topology starting with Windows / Hyper-V -Server 2012. This feature improves the performance for instances with large -amounts of memory and for high-performance NUMA-aware applications. - -Support for Hyper-V vNUMA instances has been added in Liberty. - -Before spawning vNUMA instances, the Hyper-V host must be configured first. For -this, refer to :ref:`numa_setup`. - -Hyper-V only supports symmetric NUMA topologies, and the Hyper-V Driver will -raise an exception if an asymmetric one is given. - -Additionally, a Hyper-V VM cannot be configured with a NUMA topology and -Dynamic Memory at the same time. Because of this, the Hyper-V Driver will -always disable Dynamic Memory on VMs that require NUMA topology, even if the -configured ``dynamic_memory_ratio`` is higher than ``1.0``. - -For more details on this feature and how to use it in OpenStack, check the -`original spec`__ - -**Note:** Since Hyper-V is responsible for fitting the instance's vNUMA -topologies in the host's NUMA topology, there's a slight risk of instances -not being to be started after they've been stopped for a while, because it -doesn't fit in the NUMA topology anymore. For example, let's consider the -following scenario: - -Host A with 2 NUMA nodes (0, 1), 16 GB memory each. The host has the following -instances: - -* **instance A:** 16 GB memory, spans 2 vNUMA nodes (8 each). -* **instances B, C:** 6 GB memory each, spans 1 vNUMA node. -* **instances D, E:** 2 GB memory each, spans 1 vNUMA node. - -Topology-wise, they would fit as follows: - -**NUMA node 0:** A(0), B, D -**NUMA node 1:** A(1), C, E - -All instances are stopped, then the following instances are started in this -order: B, D, E, C. The topology would look something like this: - -**NUMA node 0:** B -**NUMA node 1:** D, E, C - -Starting A will fail, as the NUMA node 1 will have 10 GB memory used, and A -needs 8 GB on that node. - -One way to mitigate this issue would be to segregate instances spanning -multiple NUMA nodes to different compute nodes / availability zones from the -regular instances. - -__ https://specs.openstack.org/openstack/nova-specs/specs/ocata/implemented/hyper-v-vnuma-enable.html - - -Using Cinder Volumes --------------------- - -Identifying disks -~~~~~~~~~~~~~~~~~ - -When attaching multiple volumes to an instance, it's important to have a way -in which you can safely identify them on the guest side. - -While Libvirt exposes the Cinder volume id as disk serial id (visible in -/dev/disk/by-id/), this is not possible in case of Hyper-V. - -The mountpoints exposed by Nova (e.g. /dev/sd*) are not a reliable source -either (which mostly stands for other Nova drivers as well). - -Starting with Queens, the Hyper-V driver includes disk address information in -the instance metadata, accessible on the guest side through the metadata -service. This also applies to untagged volume attachments. - -.. note:: - The config drive should not be relied upon when fetching disk metadata - as it never gets updated after an instance is created. - -Here's an example: - -.. code-block:: bash - - nova volume-attach cirros 1517bb04-38ed-4b4a-bef3-21bec7d38792 - vm_fip="192.168.42.74" - - cmd="curl -s 169.254.169.254/openstack/latest/meta_data.json" - ssh_opts=( -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" ) - metadata=`ssh "${ssh_opts[@]}" "cirros@$vm_fip" $cmd` - echo $metadata | python -m json.tool - - # Sample output - # - # { - # "availability_zone": "nova", - # "devices": [ - # { - # "address": "0:0:0:0", - # "bus": "scsi", - # "serial": "1517bb04-38ed-4b4a-bef3-21bec7d38792", - # "tags": [], - # "type": "disk" - # } - # ], - # "hostname": "cirros.novalocal", - # "launch_index": 0, - # "name": "cirros", - # "project_id": "3a8199184dfc4821ab01f9cbd72f905e", - # "uuid": "f0a09969-d477-4d2f-9ad3-3e561226d49d" - # } - - # Now that we have the disk SCSI address, we may fetch its path. - file `find /dev/disk/by-path | grep "scsi-0:0:0:0"` - - # Sample output - # /dev/disk/by-path/pci-0000:00:10.0-scsi-0:0:0:0: symbolic link to ../../sdb - -The volumes may be identified in a similar way in case of Windows guests as -well. - - -Online volume extend -~~~~~~~~~~~~~~~~~~~~ - -The Hyper-V driver supports online Cinder volume resize. Still, there are a -few cases in which this feature is not available: - -* SMB backed volumes -* Some iSCSI backends where the online resize operation impacts connected - initiators. For example, when using the Cinder LVM driver and TGT, the - iSCSI targets are actually recreated during the process. The MS iSCSI - initiator will attempt to reconnect but TGT will report that the target - does not exist, for which reason no reconnect attempts will be performed. - - -Disk QoS --------- - -In terms of QoS, Hyper-V allows IOPS limits to be set on virtual disk images -preventing instances to exhaust the storage resources. - -Support for setting disk IOPS limits in Hyper-V has been added in OpenStack -in Kilo. - -The IOPS limits can be specified by number of IOPS, or number of bytes per -second (IOPS has precedence). Keep in mind that Hyper-V sets IOPS in normalized -IOPS allocation units (8 KB increments) and if the configured QoS policies are -not multiple of 8 KB, the Hyper-V Driver will round down to the nearest -multiple (minimum 1 IOPS). - -QoS is set differently for Cinder volumes and Nova local disks. - - -Cinder Volumes -~~~~~~~~~~~~~~ - -Cinder QoS specs can be either front-end (enforced on the consumer side), -in this case Nova, or back-end (enforced on the Cinder side). - -The Hyper-V driver only allows setting IOPS limits for volumes exposed by -Cinder SMB backends. For other Cinder backends (e.g. SANs exposing volumes -through iSCSI or FC), backend QoS specs must be used. - -.. code-block:: bash - - # alternatively, total_iops_sec can be specified instead. - cinder qos-create my-qos consumer=front-end total_bytes_sec= - cinder qos-associate my-qos - - cinder create --volume-type - - # The QoS specs are applied when the volume is attached to a Hyper-V instance - nova volume-attach - - -Nova instance local disks -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The QoS policy is set to all of the instance's disks (including ephemeral -disks), and can be enabled at spawn, or enabled / disabled through cold -resize. - -.. code-block:: bash - - # alternatively, quota:disk_total_iops_sec can be used instead. - nova flavor-key set quota:disk_total_bytes_sec= - - -PCI devices ------------ - -Windows / Hyper-V Server 2016 introduced Discrete Device Assignment, which -allows users to attach PCI devices directly to Hyper-V VMs. The Hyper-V host -must have SR-IOV support and have the PCI devices prepared before assignment. - -The Hyper-V Driver added support for this feature in OpenStack in Ocata. - -For preparing the PCI devices for assignment, refer to :ref:`pci_devices_setup`. - -The PCI devices must be whitelisted before being able to assign them. For this, -refer to :ref:`pci_devices_config`. - -PCI devices can be attached to Hyper-V instances at spawn, or attached / -detached through cold resize through nova flavor extra specs: - -.. code-block:: bash - - nova flavor-key set "pci_passthrough:alias"="alias:num_pci_devices" - - -Serial port configuration -------------------------- - -Serial ports are used to interact with an instance's console and / or read its -output. This feature has been introduced for the Hyper-V Drvier in Kilo. - -For Hyper-V, the serial ports can be configured to be Read Only or Read / Write. -This can be specified through the image metadata properties: - -* ``interactive_serial_port``: configure the given port as Read / Write. -* ``logging_serial_port``: configure the given port as Read Only. - -Valid values: ``1,2`` - -One port will always be configured as Read / Write, and by default, that port -is ``1``. - - -Hyper-V VM vNIC attach / detach -------------------------------- - -When creating a new instance, users can specify how many NICs the instance will -have, and to which neutron networks / ports they will be connected to. But -starting with Kilo, additional NICs can be added to Hyper-V VMs after they have -been created. This can be done through the command: - -.. code-block:: bash - - # alternatively, --port_id can be specified. - nova interface-attach --net-id - -However, there are a few restrictions that have to be taken into account in -order for the operation to be successful. When attaching a new vNIC to an -instance, the instance must be turned off, unless all the following conditions -are met: - -* The compute node hosting the VM is a Windows / Hyper-V Server 2016 or newer. -* The instance is a Generation 2 VM. - -If the conditions are met, the vNIC can be hot-plugged and the instance does -not have to be turned off. - -The same restrictions apply when detaching a vNIC from a Hyper-V instance. -Detaching interfaces can be done through the command: - -.. code-block:: bash - - nova interface-detach - - -Nested virtualization ---------------------- - -Nested virtualization has been introduced in Windows / Hyper-V Server 2016 and -support for it has been added to OpenStack in Pike. This feature will allow you -to create Hyper-V instances which will be able to create nested VMs of their own. - -In order to use this feature, the compute nodes must have the latest updates -installed. - -At the moment, only Windows / Hyper-V Server 2016 or Windows 10 guests can -benefit from this feature. - -Dynamic Memory is not supported for instances with nested virtualization enabled, -thus, the Hyper-V Driver will always spawn such instances with Dynamic Memory -disabled, even if the configured ``dynamic_memory_ratio`` is higher than 1.0. - -Disabling the security groups associated with instance's neutron ports will -enable MAC spoofing for instance's NICs (Queens or newer, if ``neutron-hyperv-agent`` -is used), which is necessary if the nested VMs needs access to the tenant or -external network. - -Instances with nested virtualization enabled can be spawned by adding ``vmx`` to -the image metadata property ``hw_cpu_features`` or the nova flavor extra spec -``hw:cpu_features``. - -.. important:: - - This feature will not work on clustered compute nodes. diff --git a/etc/compute-hyperv-config-generator.conf b/etc/compute-hyperv-config-generator.conf deleted file mode 100644 index f65fe8a8..00000000 --- a/etc/compute-hyperv-config-generator.conf +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -output_file = etc/compute-hyperv.conf.sample -wrap_width = 80 - -namespace = compute_hyperv -namespace = os_win -namespace = nova.conf -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.concurrency diff --git a/nova/__init__.py b/nova/__init__.py deleted file mode 100644 index 3edd78c2..00000000 --- a/nova/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__import__('pkg_resources').declare_namespace(__name__) - -import os - -os.environ['EVENTLET_NO_GREENDNS'] = 'yes' - -# NOTE(rpodolyaka): import oslo_service first, so that it makes eventlet hub -# use a monotonic clock to avoid issues with drifts of system time (see -# LP 1510234 for details) -import oslo_service # noqa - -import eventlet # noqa diff --git a/nova/virt/__init__.py b/nova/virt/__init__.py deleted file mode 100644 index 42067924..00000000 --- a/nova/virt/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__import__('pkg_resources').declare_namespace(__name__) diff --git a/nova/virt/compute_hyperv/__init__.py b/nova/virt/compute_hyperv/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova/virt/compute_hyperv/cluster/__init__.py b/nova/virt/compute_hyperv/cluster/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova/virt/compute_hyperv/cluster/driver.py b/nova/virt/compute_hyperv/cluster/driver.py deleted file mode 100644 index 9f1fd769..00000000 --- a/nova/virt/compute_hyperv/cluster/driver.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A Hyper-V Cluster Nova Compute driver.""" - -from compute_hyperv.nova.cluster import driver - -# NOTE: nova changed the way it imports drivers. All drivers must belong -# in the nova.virt namespace. - -HyperVClusterDriver = driver.HyperVClusterDriver diff --git a/nova/virt/compute_hyperv/driver.py b/nova/virt/compute_hyperv/driver.py deleted file mode 100644 index fb3a2fed..00000000 --- a/nova/virt/compute_hyperv/driver.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A Hyper-V Nova Compute driver. -""" - -from compute_hyperv.nova import driver - -# NOTE: nova changed the way it imports drivers. All drivers must belong -# in the nova.virt namespace. - -HyperVDriver = driver.HyperVDriver diff --git a/openstack-common.conf b/openstack-common.conf deleted file mode 100644 index 23d666e2..00000000 --- a/openstack-common.conf +++ /dev/null @@ -1,6 +0,0 @@ -[DEFAULT] - -# The list of modules to copy from oslo-incubator.git - -# The base module to hold the copy of openstack.common -base=hyperv diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/cluster-distributed-locks-5f12252af6b3913b.yaml b/releasenotes/notes/cluster-distributed-locks-5f12252af6b3913b.yaml deleted file mode 100644 index 758fc8bc..00000000 --- a/releasenotes/notes/cluster-distributed-locks-5f12252af6b3913b.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - | - When using the cluster driver, a distributed lock backend will have to be - configured. -fixes: - - | - In order to fix race conditions that can occur when handling instance - failovers, the cluster driver is now using distributed locks. A - distributed lock backend (e.g. etcd, mysql, file based, etc) will have to - be configured. diff --git a/releasenotes/notes/drop-ovs-support-616ec2952580c93d.yaml b/releasenotes/notes/drop-ovs-support-616ec2952580c93d.yaml deleted file mode 100644 index a27ff72b..00000000 --- a/releasenotes/notes/drop-ovs-support-616ec2952580c93d.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - Windows OVS support is no longer maintained. The Neutron OVS agent no longer - starts on Windows either since Wallaby. Please consider using the - networking-hyperv Neutron ML2 agent instead. diff --git a/releasenotes/notes/drop-py-2-7-5cd36052d5c2e594.yaml b/releasenotes/notes/drop-py-2-7-5cd36052d5c2e594.yaml deleted file mode 100644 index 877ae4f9..00000000 --- a/releasenotes/notes/drop-py-2-7-5cd36052d5c2e594.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Python 2.7 support has been dropped. Last release of compute-hyperv - to support py2.7 is OpenStack Train. The minimum version of Python now - supported by compute-hyperv is Python 3.6. diff --git a/releasenotes/notes/hyper-v-server-2012-support-deprecated-02a956e3926351d6.yaml b/releasenotes/notes/hyper-v-server-2012-support-deprecated-02a956e3926351d6.yaml deleted file mode 100644 index 0b6395bf..00000000 --- a/releasenotes/notes/hyper-v-server-2012-support-deprecated-02a956e3926351d6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - Support for Windows / Hyper-V Server 2012 has been deprecated in Queens - in nova and will be removed in Rocky. The supported versions are Windows / - Hyper-V Server 2012 R2 or newer. diff --git a/releasenotes/notes/instance-evacuate-2c46e63e3a6ae9c4.yaml b/releasenotes/notes/instance-evacuate-2c46e63e3a6ae9c4.yaml deleted file mode 100644 index e237a483..00000000 --- a/releasenotes/notes/instance-evacuate-2c46e63e3a6ae9c4.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - The Hyper-V driver now supports the "instance evacuate" feature. diff --git a/releasenotes/notes/online-volume-resize-446d58c9f6f340b6.yaml b/releasenotes/notes/online-volume-resize-446d58c9f6f340b6.yaml deleted file mode 100644 index 05f40afd..00000000 --- a/releasenotes/notes/online-volume-resize-446d58c9f6f340b6.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - The Hyper-V driver now supports online volume resize (only applies to - iSCSI/FC disks). Warning: this won't work properly with some iSCSI - backends (e.g. LVM + tgt), which will drop connected initiators - during an online volume resize operation. diff --git a/releasenotes/notes/rbd-support-9bb0037f69249785.yaml b/releasenotes/notes/rbd-support-9bb0037f69249785.yaml deleted file mode 100644 index 63881ea7..00000000 --- a/releasenotes/notes/rbd-support-9bb0037f69249785.yaml +++ /dev/null @@ -1,5 +0,0 @@ -features: - - | - The standard Hyper-V Nova driver can now attach RBD volumes. Note that the - cluster driver doesn't support RBD volumes yet. The minimum required - Ceph version is Ceph 16 (Pacific). diff --git a/releasenotes/notes/update-device-metadata-7204fb0e85bad1e3.yaml b/releasenotes/notes/update-device-metadata-7204fb0e85bad1e3.yaml deleted file mode 100644 index ccf4496b..00000000 --- a/releasenotes/notes/update-device-metadata-7204fb0e85bad1e3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - The Hyper-V driver now properly provides instance disk metadata (even for - untagged disks), allowing the guests to safely identify disks. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 4c44f562..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,281 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'compute_hyperv' -copyright = '2017, Cloudbase Solutions' - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/compute-hyperv' -openstackdocs_auto_name = False -openstackdocs_bug_project = 'compute-hyperv' -openstackdocs_bug_tag = '' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -# The full version, including alpha/beta/rc tags. -release = '' -# The short X.Y version. -version = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'compute_hypervReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'compute_hypervReleaseNotes.tex', - 'compute_hyperv Release Notes Documentation', - 'Cloudbase Solutions', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'compute_hypervrereleasenotes', - 'compute_hyperv Release Notes Documentation', - ['Cloudbase Solutions'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'compute_hyperv ReleaseNotes', - 'compute_hyperv Release Notes Documentation', - 'Cloudbase Solutions', 'compute_hypervReleaseNotes', - 'OpenStack Nova Hyper-V Driver.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index dd3611eb..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -============================ -compute_hyperv Release Notes -============================ - -.. toctree:: - :maxdepth: 1 - - unreleased - zed - yoga - xena - wallaby - victoria - ussuri - train diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 58390039..00000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aabc..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e0..00000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst deleted file mode 100644 index 4efc7b6f..00000000 --- a/releasenotes/source/victoria.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -Victoria Series Release Notes -============================= - -.. release-notes:: - :branch: stable/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst deleted file mode 100644 index d77b5659..00000000 --- a/releasenotes/source/wallaby.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -Wallaby Series Release Notes -============================ - -.. release-notes:: - :branch: stable/wallaby diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst deleted file mode 100644 index 1be85be3..00000000 --- a/releasenotes/source/xena.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -Xena Series Release Notes -========================= - -.. release-notes:: - :branch: stable/xena diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst deleted file mode 100644 index 7cd5e908..00000000 --- a/releasenotes/source/yoga.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -Yoga Series Release Notes -========================= - -.. release-notes:: - :branch: stable/yoga diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst deleted file mode 100644 index 9608c05e..00000000 --- a/releasenotes/source/zed.rst +++ /dev/null @@ -1,6 +0,0 @@ -======================== -Zed Series Release Notes -======================== - -.. release-notes:: - :branch: stable/zed diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index e8b76363..00000000 --- a/requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr>=5.5.1 # Apache-2.0 - -os-brick>=4.3.1 # Apache-2.0 -os-win>=5.4.0 # Apache-2.0 -oslo.config>=8.6.0 # Apache-2.0 -oslo.log>=4.4.0 # Apache-2.0 -oslo.serialization>=4.1.0 # Apache-2.0 -oslo.service>=2.5.0 # Apache-2.0 -oslo.utils>=4.8.0 # Apache-2.0 -oslo.i18n>=5.0.1 # Apache-2.0 - -tooz>=1.58.0 # Apache-2.0 - -eventlet>=0.30.1 # MIT -python-barbicanclient>=4.5.2 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 4184f333..00000000 --- a/setup.cfg +++ /dev/null @@ -1,50 +0,0 @@ -[metadata] -name = compute-hyperv -summary = Hyper-V Nova Driver -description_file = - README.rst -license = Apache License, Version 2.0 -author = Cloudbase Solutions Srl -author_email = info@cloudbasesolutions.com -home_page = http://www.cloudbase.it/ -url = https://github.com/cloudbase/compute-hyperv -python_requires = >=3.6 -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: Microsoft :: Windows - Programming Language :: Python - Programming Language :: Python :: Implementation :: CPython - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - Programming Language :: Python :: 3.6 - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 -keywords = openstack nova hyper-v compute - -[files] -packages = - compute_hyperv - nova - nova.virt.compute_hyperv - -[entry_points] -oslo.config.opts = - compute_hyperv = compute_hyperv.nova.conf:list_opts - -[compile_catalog] -directory = compute_hyperv/locale -domain = compute-hyperv - -[update_catalog] -domain = compute-hyperv -output_dir = compute_hyperv/locale/nova -input_file = compute_hyperv/locale/compute-hyperv.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = compute_hyperv/locale/compute-hyperv.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index cd35c3c3..00000000 --- a/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index dabe02ed..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -hacking>=3.0.1,<3.1.0 # Apache-2.0 - -coverage>=5.2.1 # Apache-2.0 -ddt>=1.2.1 # MIT -mock>=3.0.0 # BSD -python-subunit>=1.4.0 # Apache-2.0/BSD -oslotest>=3.8.0 # Apache-2.0 -stestr>=2.0.0 # Apache-2.0 -testtools>=2.2.0 # MIT - -# placement functional tests -wsgi-intercept>=1.7.0 # MIT License diff --git a/tools/tox_install.sh b/tools/tox_install.sh deleted file mode 100755 index 2b86d39e..00000000 --- a/tools/tox_install.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# This repo depends on nova, but it does not exist on pypi. - -# This wrapper for tox's package installer will use the existing package -# if it exists, else use zuul-cloner if that program exists, else grab it -# from nova master via a hard-coded URL. That last case should only -# happen with devs running unit tests locally. - -# From the tox.ini config page: -# install_command=ARGV -# default: -# pip install {opts} {packages} - -ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner -BRANCH_NAME=master -GIT_BASE=${GIT_BASE:-https://git.openstack.org/} - -install_project() { - local project=$1 - local branch=${2:-$BRANCH_NAME} - - if [ -x "$ZUUL_CLONER" ]; then - echo "ZUUL CLONER" > /tmp/tox_install.txt - # Make this relative to current working directory so that - # git clean can remove it. We cannot remove the directory directly - # since it is referenced after $install_cmd -e - mkdir -p .tmp - PROJECT_DIR=$(/bin/mktemp -d -p $(pwd)/.tmp) - pushd $PROJECT_DIR - $ZUUL_CLONER --cache-dir \ - /opt/git \ - --branch $branch \ - http://git.openstack.org \ - openstack/$project - cd openstack/$project - $install_cmd -e . - popd - else - echo "PIP HARDCODE" > /tmp/tox_install.txt - local GIT_REPO="$GIT_BASE/openstack/$project" - SRC_DIR="$VIRTUAL_ENV/src/$project" - git clone --depth 1 --branch $branch $GIT_REPO $SRC_DIR - $install_cmd -U -e $SRC_DIR - fi -} - -set -e - -install_cmd="pip install -c$1" -shift - -install_project nova - -$install_cmd -U . -exit $? diff --git a/tox.ini b/tox.ini deleted file mode 100644 index cb73cdc3..00000000 --- a/tox.ini +++ /dev/null @@ -1,117 +0,0 @@ -[tox] -minversion = 2.0 -envlist = py3,pep8,pip-missing-reqs -skipsdist = True - -[testenv] -basepython = python3 -usedevelop = True -# tox is silly... these need to be separated by a newline.... -whitelist_externals = bash - find -# Note the hash seed is set to 0 until hyperv can be tested with a -# random hash seed successfully. -setenv = VIRTUAL_ENV={envdir} - PYTHONHASHSEED=0 - OS_TEST_PATH=./compute_hyperv/tests/unit - LANGUAGE=en_US -deps = - -c{env:TOX_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -egit+https://github.com/openstack/nova#egg=nova -commands = - find . -type f -name "*.pyc" -delete - stestr run --slowest {posargs} -# there is also secret magic in pretty_tox.sh which lets you run in a fail only -# mode. To do this define the TRACE_FAILONLY environmental variable. - -[testenv:pep8] -commands = - flake8 {posargs} - -[testenv:cover] -basepython = python3 -# Also do not run test_coverage_ext tests while gathering coverage as those -# tests conflict with coverage. -setenv = - PYTHON=coverage run --source compute_hyperv --parallel-mode -commands = - coverage erase - stestr run {posargs} - coverage combine - coverage html --include='compute_hyperv/*' --omit='compute_hyperv/openstack/common/*' -d covhtml -i - coverage html -d cover - coverage xml -o cover/coverage.xml - -[testenv:venv] -commands = {posargs} - -[testenv:docs] -deps = -r{toxinidir}/doc/requirements.txt - -egit+https://github.com/openstack/nova#egg=nova -commands = - # We cannot use -W here since nova docs contain links to the nova tree - # that leads to invalid links for this build. - sphinx-build -a -E -d doc/build/doctrees -b html doc/source doc/build/html - -[testenv:releasenotes] -deps = {[testenv:docs]deps} -commands = - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[flake8] -# E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 -# The rest of the ignores are TODOs -# New from hacking 0.9: E129, E131, H407, H405 -# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -# We'll skip E402 so that we can still call certain functions before importing -# modules (e.g. eventlet.monkey_patch()). -# W504 skipped since you must choose either W503 or W504 (they conflict) - -ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E402,H405,W504,W605 -exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools/xenserver* -# To get a list of functions that are more complex than 25, set max-complexity -# to 25 and run 'tox -epep8'. -# 37 is currently the most complex thing we have -# TODO(jogo): get this number down to 25 or so -max-complexity=38 -# H106 No vi headers -# H904 Delay string interpolations at logging calls. -enable-extensions = H904,H106 - -[flake8:local-plugins] -extension = - C312 = checks:no_translate_logs - N307 = checks:import_no_db_in_virt - N309 = checks:no_db_session_in_public_api - N310 = checks:use_timeutils_utcnow - N311 = checks:import_no_virt_driver_import_deps - N312 = checks:import_no_virt_driver_config_deps - N313 = checks:capital_cfg_help - N316 = checks:assert_true_instance - N317 = checks:assert_equal_type - N318 = checks:assert_equal_none - N320 = checks:no_setting_conf_directly_in_tests - N322 = checks:no_mutable_default_args - N323 = checks:check_explicit_underscore_import - N324 = checks:use_jsonutils - N325 = checks:CheckForStrUnicodeExc - N326 = checks:CheckForTransAdd - N332 = checks:check_api_version_decorator - N333 = checks:check_oslo_namespace_imports - N334 = checks:assert_true_or_false_with_in - N335 = checks:assert_raises_regexp - N336 = checks:dict_constructor_with_list_copy - N337 = checks:no_import_translation_in_tests - N338 = checks:assert_equal_in -paths = ./compute_hyperv/hacking - -[testenv:pip-missing-reqs] -# do not install test-requirements as that will pollute the virtualenv for -# determining missing packages -# this also means that pip-missing-reqs must be installed separately, outside -# of the requirements.txt files -deps = pip_missing_reqs - -rrequirements.txt -commands=pip-missing-reqs -d --ignore-file=compute_hyperv/tests/* compute_hyperv