From 04e053c5cbcb45075f3276511b3bce583287bada Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 20 Feb 2024 22:49:19 +0900 Subject: [PATCH] Retire PowerVMStacker SIG: Remove Project Content Depends-on: https://review.opendev.org/c/openstack/project-config/+/909535 Change-Id: Icb1894348ef7b1602a3181dad3162df6d6ad53af --- .gitignore | 29 - .stestr.conf | 3 - .zuul.yaml | 7 - CONTRIBUTING.rst | 19 - HACKING.rst | 4 - LICENSE | 176 -- README.rst | 334 +-- babel.cfg | 2 - devstack/README.rst | 126 - devstack/local.conf.aio-ovs-ssp | 69 - devstack/local.conf.aio-sea-localdisk | 63 - devstack/local.conf.compute | 57 - devstack/local.conf.control | 42 - devstack/override-defaults | 3 - devstack/plugin.sh | 144 -- devstack/powervm-functions.sh | 38 - devstack/settings | 28 - doc/requirements.txt | 4 - doc/source/conf.py | 84 - doc/source/devref/development_environment.rst | 55 - doc/source/devref/index.rst | 48 - doc/source/devref/project_structure.rst | 117 - doc/source/devref/testing.rst | 64 - doc/source/devref/usage.rst | 212 -- doc/source/index.rst | 64 - doc/source/policies/bugs.rst | 26 - doc/source/policies/code-reviews.rst | 13 - doc/source/policies/contributing.rst | 1 - doc/source/policies/index.rst | 39 - doc/source/readme.rst | 1 - doc/source/specs/index.rst | 12 - doc/source/specs/newton/index.rst | 7 - .../specs/newton/lb_and_ovs_support.rst | 183 -- .../specs/newton/powervm-sriov-nova.rst | 350 --- doc/source/specs/ocata/image_cache.rst | 179 -- doc/source/specs/ocata/index.rst | 7 - doc/source/specs/pike/fileio_cinder.rst | 142 -- doc/source/specs/pike/fileio_driver.rst | 120 - doc/source/specs/pike/index.rst | 7 - .../pike/srr-capability-dynamic-toggle.rst | 145 -- doc/source/specs/rocky/device-passthrough.rst | 414 --- doc/source/specs/rocky/index.rst | 7 - doc/source/specs/template.rst | 316 --- doc/source/support-matrix.ini | 654 ----- doc/source/support-matrix.rst | 41 - lower-constraints.txt | 186 -- nova/__init__.py | 18 - nova/virt/__init__.py | 18 - nova/virt/powervm_ext/__init__.py | 0 nova/virt/powervm_ext/driver.py | 33 - nova_powervm/__init__.py | 0 nova_powervm/conf/__init__.py | 23 - nova_powervm/conf/powervm.py | 260 -- nova_powervm/hacking/__init__.py | 0 nova_powervm/hacking/checks.py | 21 - nova_powervm/locale/de/nova-powervm.po | 425 ---- nova_powervm/locale/es/nova-powervm.po | 425 ---- nova_powervm/locale/fr/nova-powervm.po | 427 ---- nova_powervm/locale/it/nova-powervm.po | 425 ---- nova_powervm/locale/ja/nova-powervm.po | 423 ---- nova_powervm/locale/ko/nova-powervm.po | 425 ---- nova_powervm/locale/nova-powervm.pot | 348 --- nova_powervm/locale/pt-BR/nova-powervm.po | 425 ---- nova_powervm/locale/ru/nova-powervm.po | 425 ---- nova_powervm/locale/zh-Hans/nova-powervm.po | 425 ---- nova_powervm/locale/zh-Hant/nova-powervm.po | 425 ---- nova_powervm/tests/__init__.py | 0 nova_powervm/tests/conf/__init__.py | 0 nova_powervm/tests/conf/test_conf.py | 163 -- nova_powervm/tests/virt/__init__.py | 0 nova_powervm/tests/virt/powervm/__init__.py | 93 - .../tests/virt/powervm/disk/__init__.py | 0 .../tests/virt/powervm/disk/fake_adapter.py | 60 - .../tests/virt/powervm/disk/test_driver.py | 88 - .../virt/powervm/disk/test_imagecache.py | 103 - .../tests/virt/powervm/disk/test_localdisk.py | 447 ---- .../tests/virt/powervm/disk/test_ssp.py | 625 ----- nova_powervm/tests/virt/powervm/fixtures.py | 196 -- .../tests/virt/powervm/nvram/__init__.py | 0 .../tests/virt/powervm/nvram/fake_api.py | 67 - .../tests/virt/powervm/nvram/test_manager.py | 97 - .../tests/virt/powervm/nvram/test_swift.py | 319 --- .../tests/virt/powervm/tasks/__init__.py | 0 .../tests/virt/powervm/tasks/test_image.py | 67 - .../tests/virt/powervm/tasks/test_network.py | 416 --- .../tests/virt/powervm/tasks/test_slot.py | 55 - .../tests/virt/powervm/tasks/test_storage.py | 407 --- .../tests/virt/powervm/tasks/test_vm.py | 267 -- .../tests/virt/powervm/test_driver.py | 2219 ----------------- nova_powervm/tests/virt/powervm/test_event.py | 393 --- nova_powervm/tests/virt/powervm/test_host.py | 103 - nova_powervm/tests/virt/powervm/test_image.py | 62 - .../tests/virt/powervm/test_live_migration.py | 335 --- nova_powervm/tests/virt/powervm/test_media.py | 248 -- nova_powervm/tests/virt/powervm/test_mgmt.py | 192 -- nova_powervm/tests/virt/powervm/test_slot.py | 171 -- nova_powervm/tests/virt/powervm/test_vif.py | 968 ------- nova_powervm/tests/virt/powervm/test_vm.py | 913 ------- .../tests/virt/powervm/volume/__init__.py | 0 .../tests/virt/powervm/volume/test_driver.py | 109 - .../tests/virt/powervm/volume/test_fileio.py | 193 -- .../tests/virt/powervm/volume/test_gpfs.py | 40 - .../tests/virt/powervm/volume/test_iscsi.py | 669 ----- .../tests/virt/powervm/volume/test_local.py | 40 - .../tests/virt/powervm/volume/test_nfs.py | 40 - .../tests/virt/powervm/volume/test_npiv.py | 694 ------ .../tests/virt/powervm/volume/test_rbd.py | 194 -- .../tests/virt/powervm/volume/test_vscsi.py | 601 ----- nova_powervm/version.py | 17 - nova_powervm/virt/__init__.py | 0 nova_powervm/virt/powervm/__init__.py | 0 nova_powervm/virt/powervm/disk/__init__.py | 0 nova_powervm/virt/powervm/disk/driver.py | 425 ---- nova_powervm/virt/powervm/disk/imagecache.py | 93 - nova_powervm/virt/powervm/disk/localdisk.py | 364 --- nova_powervm/virt/powervm/disk/ssp.py | 470 ---- nova_powervm/virt/powervm/driver.py | 1920 -------------- nova_powervm/virt/powervm/event.py | 257 -- nova_powervm/virt/powervm/exception.py | 144 -- nova_powervm/virt/powervm/host.py | 114 - nova_powervm/virt/powervm/i18n.py | 21 - nova_powervm/virt/powervm/image.py | 72 - nova_powervm/virt/powervm/live_migration.py | 478 ---- nova_powervm/virt/powervm/media.py | 342 --- nova_powervm/virt/powervm/mgmt.py | 176 -- nova_powervm/virt/powervm/nvram/__init__.py | 0 nova_powervm/virt/powervm/nvram/api.py | 69 - nova_powervm/virt/powervm/nvram/manager.py | 205 -- nova_powervm/virt/powervm/nvram/swift.py | 323 --- nova_powervm/virt/powervm/slot.py | 188 -- nova_powervm/virt/powervm/tasks/__init__.py | 0 nova_powervm/virt/powervm/tasks/base.py | 40 - nova_powervm/virt/powervm/tasks/image.py | 82 - nova_powervm/virt/powervm/tasks/network.py | 311 --- nova_powervm/virt/powervm/tasks/slot.py | 71 - nova_powervm/virt/powervm/tasks/storage.py | 553 ---- nova_powervm/virt/powervm/tasks/vm.py | 322 --- nova_powervm/virt/powervm/vif.py | 845 ------- nova_powervm/virt/powervm/vm.py | 833 ------- nova_powervm/virt/powervm/volume/__init__.py | 82 - nova_powervm/virt/powervm/volume/driver.py | 321 --- nova_powervm/virt/powervm/volume/fileio.py | 189 -- nova_powervm/virt/powervm/volume/gpfs.py | 24 - nova_powervm/virt/powervm/volume/iscsi.py | 478 ---- nova_powervm/virt/powervm/volume/local.py | 24 - nova_powervm/virt/powervm/volume/nfs.py | 26 - nova_powervm/virt/powervm/volume/npiv.py | 744 ------ nova_powervm/virt/powervm/volume/rbd.py | 181 -- nova_powervm/virt/powervm/volume/volume.py | 300 --- nova_powervm/virt/powervm/volume/vscsi.py | 392 --- openstack-common.conf | 4 - requirements.txt | 17 - setup.cfg | 46 - setup.py | 29 - sonar-project.properties | 13 - test-requirements.txt | 15 - tox.ini | 99 - 157 files changed, 6 insertions(+), 32980 deletions(-) delete mode 100644 .gitignore delete mode 100644 .stestr.conf delete mode 100644 .zuul.yaml delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE delete mode 100644 babel.cfg delete mode 100644 devstack/README.rst delete mode 100644 devstack/local.conf.aio-ovs-ssp delete mode 100644 devstack/local.conf.aio-sea-localdisk delete mode 100644 devstack/local.conf.compute delete mode 100644 devstack/local.conf.control delete mode 100644 devstack/override-defaults delete mode 100755 devstack/plugin.sh delete mode 100644 devstack/powervm-functions.sh delete mode 100644 devstack/settings delete mode 100644 doc/requirements.txt delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/devref/development_environment.rst delete mode 100644 doc/source/devref/index.rst delete mode 100644 doc/source/devref/project_structure.rst delete mode 100644 doc/source/devref/testing.rst delete mode 100644 doc/source/devref/usage.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/policies/bugs.rst delete mode 100644 doc/source/policies/code-reviews.rst delete mode 100644 doc/source/policies/contributing.rst delete mode 100644 doc/source/policies/index.rst delete mode 100644 doc/source/readme.rst delete mode 100644 doc/source/specs/index.rst delete mode 100644 doc/source/specs/newton/index.rst delete mode 100644 doc/source/specs/newton/lb_and_ovs_support.rst delete mode 100644 doc/source/specs/newton/powervm-sriov-nova.rst delete mode 100644 doc/source/specs/ocata/image_cache.rst delete mode 100644 doc/source/specs/ocata/index.rst delete mode 100644 doc/source/specs/pike/fileio_cinder.rst delete mode 100644 doc/source/specs/pike/fileio_driver.rst delete mode 100644 doc/source/specs/pike/index.rst delete mode 100644 doc/source/specs/pike/srr-capability-dynamic-toggle.rst delete mode 100644 doc/source/specs/rocky/device-passthrough.rst delete mode 100644 doc/source/specs/rocky/index.rst delete mode 100644 doc/source/specs/template.rst delete mode 100644 doc/source/support-matrix.ini delete mode 100644 doc/source/support-matrix.rst delete mode 100644 lower-constraints.txt delete mode 100644 nova/__init__.py delete mode 100644 nova/virt/__init__.py delete mode 100644 nova/virt/powervm_ext/__init__.py delete mode 100644 nova/virt/powervm_ext/driver.py delete mode 100644 nova_powervm/__init__.py delete mode 100644 nova_powervm/conf/__init__.py delete mode 100644 nova_powervm/conf/powervm.py delete mode 100644 nova_powervm/hacking/__init__.py delete mode 100644 nova_powervm/hacking/checks.py delete mode 100644 nova_powervm/locale/de/nova-powervm.po delete mode 100644 nova_powervm/locale/es/nova-powervm.po delete mode 100644 nova_powervm/locale/fr/nova-powervm.po delete mode 100644 nova_powervm/locale/it/nova-powervm.po delete mode 100644 nova_powervm/locale/ja/nova-powervm.po delete mode 100644 nova_powervm/locale/ko/nova-powervm.po delete mode 100644 nova_powervm/locale/nova-powervm.pot delete mode 100644 nova_powervm/locale/pt-BR/nova-powervm.po delete mode 100644 nova_powervm/locale/ru/nova-powervm.po delete mode 100644 nova_powervm/locale/zh-Hans/nova-powervm.po delete mode 100644 nova_powervm/locale/zh-Hant/nova-powervm.po delete mode 100644 nova_powervm/tests/__init__.py delete mode 100644 nova_powervm/tests/conf/__init__.py delete mode 100644 nova_powervm/tests/conf/test_conf.py delete mode 100644 nova_powervm/tests/virt/__init__.py delete mode 100644 nova_powervm/tests/virt/powervm/__init__.py delete mode 100644 nova_powervm/tests/virt/powervm/disk/__init__.py delete mode 100644 nova_powervm/tests/virt/powervm/disk/fake_adapter.py delete mode 100644 nova_powervm/tests/virt/powervm/disk/test_driver.py delete mode 100644 nova_powervm/tests/virt/powervm/disk/test_imagecache.py delete mode 100644 nova_powervm/tests/virt/powervm/disk/test_localdisk.py delete mode 100644 nova_powervm/tests/virt/powervm/disk/test_ssp.py delete mode 100644 nova_powervm/tests/virt/powervm/fixtures.py delete mode 100644 nova_powervm/tests/virt/powervm/nvram/__init__.py delete mode 100644 nova_powervm/tests/virt/powervm/nvram/fake_api.py delete mode 100644 nova_powervm/tests/virt/powervm/nvram/test_manager.py delete mode 100644 nova_powervm/tests/virt/powervm/nvram/test_swift.py delete mode 100644 nova_powervm/tests/virt/powervm/tasks/__init__.py delete mode 100644 nova_powervm/tests/virt/powervm/tasks/test_image.py delete mode 100644 nova_powervm/tests/virt/powervm/tasks/test_network.py delete mode 100644 nova_powervm/tests/virt/powervm/tasks/test_slot.py delete mode 100644 nova_powervm/tests/virt/powervm/tasks/test_storage.py delete mode 100644 nova_powervm/tests/virt/powervm/tasks/test_vm.py delete mode 100644 nova_powervm/tests/virt/powervm/test_driver.py delete mode 100644 nova_powervm/tests/virt/powervm/test_event.py delete mode 100644 nova_powervm/tests/virt/powervm/test_host.py delete mode 100644 nova_powervm/tests/virt/powervm/test_image.py delete mode 100644 nova_powervm/tests/virt/powervm/test_live_migration.py delete mode 100644 nova_powervm/tests/virt/powervm/test_media.py delete mode 100644 nova_powervm/tests/virt/powervm/test_mgmt.py delete mode 100644 nova_powervm/tests/virt/powervm/test_slot.py delete mode 100644 nova_powervm/tests/virt/powervm/test_vif.py delete mode 100644 nova_powervm/tests/virt/powervm/test_vm.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/__init__.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_driver.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_fileio.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_gpfs.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_iscsi.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_local.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_nfs.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_npiv.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_rbd.py delete mode 100644 nova_powervm/tests/virt/powervm/volume/test_vscsi.py delete mode 100644 nova_powervm/version.py delete mode 100644 nova_powervm/virt/__init__.py delete mode 100644 nova_powervm/virt/powervm/__init__.py delete mode 100644 nova_powervm/virt/powervm/disk/__init__.py delete mode 100644 nova_powervm/virt/powervm/disk/driver.py delete mode 100644 nova_powervm/virt/powervm/disk/imagecache.py delete mode 100644 nova_powervm/virt/powervm/disk/localdisk.py delete mode 100644 nova_powervm/virt/powervm/disk/ssp.py delete mode 100644 nova_powervm/virt/powervm/driver.py delete mode 100644 nova_powervm/virt/powervm/event.py delete mode 100644 nova_powervm/virt/powervm/exception.py delete mode 100644 nova_powervm/virt/powervm/host.py delete mode 100644 nova_powervm/virt/powervm/i18n.py delete mode 100644 nova_powervm/virt/powervm/image.py delete mode 100644 nova_powervm/virt/powervm/live_migration.py delete mode 100644 nova_powervm/virt/powervm/media.py delete mode 100644 nova_powervm/virt/powervm/mgmt.py delete mode 100644 nova_powervm/virt/powervm/nvram/__init__.py delete mode 100644 nova_powervm/virt/powervm/nvram/api.py delete mode 100644 nova_powervm/virt/powervm/nvram/manager.py delete mode 100644 nova_powervm/virt/powervm/nvram/swift.py delete mode 100644 nova_powervm/virt/powervm/slot.py delete mode 100644 nova_powervm/virt/powervm/tasks/__init__.py delete mode 100644 nova_powervm/virt/powervm/tasks/base.py delete mode 100644 nova_powervm/virt/powervm/tasks/image.py delete mode 100644 nova_powervm/virt/powervm/tasks/network.py delete mode 100644 nova_powervm/virt/powervm/tasks/slot.py delete mode 100644 nova_powervm/virt/powervm/tasks/storage.py delete mode 100644 nova_powervm/virt/powervm/tasks/vm.py delete mode 100644 nova_powervm/virt/powervm/vif.py delete mode 100644 nova_powervm/virt/powervm/vm.py delete mode 100644 nova_powervm/virt/powervm/volume/__init__.py delete mode 100644 nova_powervm/virt/powervm/volume/driver.py delete mode 100644 nova_powervm/virt/powervm/volume/fileio.py delete mode 100644 nova_powervm/virt/powervm/volume/gpfs.py delete mode 100644 nova_powervm/virt/powervm/volume/iscsi.py delete mode 100644 nova_powervm/virt/powervm/volume/local.py delete mode 100644 nova_powervm/virt/powervm/volume/nfs.py delete mode 100644 nova_powervm/virt/powervm/volume/npiv.py delete mode 100644 nova_powervm/virt/powervm/volume/rbd.py delete mode 100644 nova_powervm/virt/powervm/volume/volume.py delete mode 100644 nova_powervm/virt/powervm/volume/vscsi.py delete mode 100644 openstack-common.conf delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 sonar-project.properties delete mode 100644 test-requirements.txt delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index d2b65cde..00000000 --- a/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Add patterns in here to exclude files created by tools integrated with this -# repository, such as test frameworks from the project's recommended workflow, -# rendered documentation and package builds. -# -# Don't add patterns to exclude files created by preferred personal tools -# (editors, IDEs, your operating system itself even). These should instead be -# maintained outside the repository, for example in a ~/.gitignore file added -# with: -# -# git config --global core.excludesfile '~/.gitignore' - -# Bytecompiled Python -*.py[cod] - -# Packages -*.egg-info - -# Unit test / coverage reports -.coverage -cover/ -.stestr/ -.tox/ - -# Sphinx -doc/build/ - -# pbr generates these -AUTHORS -ChangeLog diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 85535f45..00000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=./nova_powervm/tests -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index c5652cf9..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- project: - templates: - - check-requirements - - openstack-lower-constraints-jobs - - openstack-python-jobs - - openstack-python36-jobs - - periodic-stable-jobs diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index d898aacc..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -Contributing to Nova-PowerVM -============================ -If you would like to contribute to the development of OpenStack, -you must follow the steps in the "If you're a developer" -section of this page: - - https://wiki.openstack.org/wiki/How_To_Contribute - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/nova-powervm diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 80903c08..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -Nova-PowerVM Style Commandments -=============================== - -- Follow the Nova HACKING.rst diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index 4205584d..b1f2b8ea 100644 --- a/README.rst +++ b/README.rst @@ -1,329 +1,7 @@ -=================== -PowerVM Nova Driver -=================== +The contents of this repository are still available in the Git source +code management system. To see the contents of this repository before it +reached its end of life, please check out the previous commit with +"git checkout HEAD^1". -The IBM PowerVM hypervisor provides virtualization on POWER hardware. PowerVM -admins can see benefits in their environments by making use of OpenStack. -This driver (along with a Neutron ML2 compatible agent and Ceilometer agent) -provides the capability for operators of PowerVM to use OpenStack natively. - - -Problem Description -=================== - -As ecosystems continue to evolve around the POWER platform, a single OpenStack -driver does not meet all of the needs for the various hypervisors. The -standard libvirt driver provides support for KVM on POWER systems. This nova -driver provides PowerVM support to OpenStack environment. - -This driver meets the following: - -* Built within the community - -* Fits the OpenStack model - -* Utilizes automated functional and unit tests - -* Enables use of PowerVM systems through the OpenStack APIs - -* Allows attachment of volumes from Cinder over supported protocols - - -This driver makes the following use cases available for PowerVM: - -* As a deployer, all of the standard lifecycle operations (start, stop, - reboot, migrate, destroy, etc.) should be supported on a PowerVM based - instance. - -* As a deployer, I should be able to capture an instance to an image. - -* VNC console to instances deployed. - - -Usage -===== - -To use the driver, install the nova-powervm project on your NovaLink-based -PowerVM system. The nova-powervm project has a minimal set of configuration. -See the configuration options section of the dev-ref for more information. - -It is recommended that operators also make use of the networking-powervm -project. The project ensures that the network bridge supports the VLAN-based -networks required for the workloads. - -There is also a ceilometer-powervm project that can be included. - -Future work will be done to include PowerVM into the various OpenStack -deployment models. - - -Overview of Architecture -======================== - -The driver enables the following: - -* Provide deployments that work with the OpenStack model. - -* Driver is implemented using a new version of the PowerVM REST API. - -* Ephemeral disks are supported either with Virtual I/O Server (VIOS) - hosted local disks or via Shared Storage Pools (a PowerVM cluster file - system). - -* Volume support is provided via Cinder through supported protocols for the - Hypervisor (virtual SCSI and N-Port ID Virtualization). - -* Live migration support is available when using Shared Storage Pools or boot - from volume. - -* Network integration is supported via the ML2 compatible Neutron Agent. This - is the openstack/networking-powervm project. - -* Automated Functional Testing is provided to validate changes from the broader - OpenStack community against the PowerVM driver. - -* Thorough unit, syntax, and style testing is provided and enforced for the - driver. - -The intention is that this driver follows the OpenStack Nova model. - -The driver is being promoted into the nova core project in stages, the first of -which is represented by blueprint `powervm-nova-compute-driver`_. The -coexistence of these two incarnations of the driver raises some `Upgrade -Considerations`_. - -.. _`powervm-nova-compute-driver`: https://blueprints.launchpad.net/nova/+spec/powervm-nova-compute-driver - - -Data Model Impact ------------------ - -* The evacuate API is supported as part of the PowerVM driver. It optionally - allows for the NVRAM data to be stored to a Swift database. However this - does not impact the data model itself. It simply provides a location to - optionally store the VM's NVRAM metadata in the event of a rebuild, - evacuate, shelve, migration or resize. - - -REST API Impact ---------------- - -No REST API impacts. - - -Security Impact ---------------- - -No known security impacts. - - -Notifications Impact --------------------- - -No new notifications. The driver does expect that the Neutron agent will -return an event when the VIF plug has occurred, assuming that Neutron is -the network service. - - -Other End User Impact ---------------------- - -The administrator may notice new logging messages in the nova compute logs. - - -Performance Impact ------------------- - -The driver has a similar deployment speed and agility to other hypervisors. -It has been tested with up to 10 concurrent deploys with several hundred VMs -on a given server. - -Most operations are comparable in speed. Deployment, attach/detach volumes, -lifecycle, etc... are quick. - -Due to the nature of the project, any performance impacts are limited to the -Compute Driver. The API processes for instance are not impacted. - - -Other Deployer Impact ---------------------- - -The cloud administrator will need to refer to documentation on how to -configure OpenStack for use with a PowerVM hypervisor. - -A 'powervm' configuration group is used to contain all the PowerVM specific -configuration settings. Existing configuration file attributes will be -reused as much as possible (e.g. vif_plugging_timeout). This reduces the number -of PowerVM specific items that will be needed. - -It is the goal of the project to only require minimal additional attributes. -The deployer may specify additional attributes to fit their configuration. - - -Developer Impact ----------------- - -The code for this driver is currently contained within a powervm project. -The driver is within the /nova/virt/powervm_ext/ package and extends the -nova.virt.driver.ComputeDriver class. - -The code interacts with PowerVM through the pypowervm library. This python -binding is a wrapper to the PowerVM REST API. All hypervisor operations -interact with the PowerVM REST API via this binding. The driver is -maintained to support future revisions of the PowerVM REST API as needed. - -For ephemeral disk support, either a Virtual I/O Server hosted local disk or a -Shared Storage Pool (a PowerVM clustered file system) is supported. For -volume attachments, the driver supports Cinder-based attachments via -protocols supported by the hypervisor (e.g. Fibre Channel). - -For networking, the networking-powervm project provides Neutron ML2 Agents. -The agents provide the necessary configuration on the Virtual I/O Server for -networking. The PowerVM Nova driver code creates the VIF for the client VM, -but the Neutron agent creates the VIF for VLANs. - -Automated functional testing is provided through a third party continuous -integration system. It monitors for incoming Nova change sets, runs a set -of functional tests (lifecycle operations) against the incoming change, and -provides a non-gating vote (+1 or -1). - -Developers should not be impacted by these changes unless they wish to try the -driver. - - -Community Impact ----------------- - -The intent of this project is to bring another driver to OpenStack that -aligns with the ideals and vision of the community. The intention is to -promote this to core Nova. - - -Alternatives ------------- - -No alternatives appear viable to bring PowerVM support into the OpenStack -community. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignees: - adreznec - efried - kyleh - thorst - -Other contributors: - multiple - - -Dependencies -============ - -* Utilizes the PowerVM REST API specification for management. Will - utilize future versions of this specification as it becomes available: - http://ibm.co/1lThV9R - -* Builds on top of the `pypowervm library`_. This is a prerequisite to - utilizing the driver. - -.. _pypowervm library: https://github.com/powervm/pypowervm - - -Upgrade Considerations -====================== - -Prior to Ocata, only the out-of-tree nova_powervm driver existed. The in-tree -driver is introduced in Ocata. - -Namespaces ----------- - -In Liberty and Mitaka, the namespace of the out-of-tree driver is -``nova_powervm.virt.powervm``. In Newton, it was moved to -``nova.virt.powervm``. In Ocata, the new in-tree driver occupies the -``nova.virt.powervm`` namespace, and the out-of-tree driver is moved to -``nova.virt.powervm_ext``. Ocata consumers have the option of using the -in-tree driver, which will provide limited functionality until it is fully -integrated; or the out-of-tree driver, which provides full functionality. -Refer to the documentation for the ``nova.conf`` settings required to load -the desired driver. - -Live Migrate Data Object ------------------------- - -In order to use live migration prior to Ocata, it was necessary to run the -customized nova_powervm conductor to bring in the ``PowerVMLiveMigrateData`` -object. In Ocata, this object is included in core nova, so no custom conductor -is necessary. - - -Testing -======= - -Tempest Tests -------------- - -Since the tempest tests should be implementation agnostic, the existing -tempest tests should be able to run against the PowerVM driver without issue. - -Tempest tests that require function that the platform does not yet support -(e.g. iSCSI or Floating IPs) will not pass. These should be ommitted from -the Tempest test suite. - -A `sample Tempest test configuration`_ for the PowerVM driver has been provided. - -Thorough unit tests exist within the project to validate specific functions -within this implementation. - -.. _`sample Tempest test configuration`: https://github.com/powervm/powervm-ci/tree/master/tempest - - -Functional Tests ----------------- - -A third party functional test environment has been created. It monitors -for incoming nova change sets. Once it detects a new change set, it will -execute the existing lifecycle API tests. A non-gating vote (+1 or -1) will -be provided with information provided (logs) based on the result. - - -API Tests ---------- - -Existing APIs should be valid. All testing is planned within the functional -testing system and via unit tests. - - -Documentation Impact -==================== - -User Documentation ------------------- - -See the dev-ref for documentation on how to configure, contribute, use, etc. -this driver implementation. - - -Developer Documentation ------------------------ - -The existing Nova developer documentation should typically suffice. However, -until merge into Nova, we will maintain a subset of dev-ref documentation. - - -References -========== - -* PowerVM REST API Specification (may require newer versions as they - become available): http://ibm.co/1lThV9R - -* PowerVM Virtualization Introduction and Configuration: - http://www.redbooks.ibm.com/abstracts/sg247940.html - -* PowerVM Best Practices: http://www.redbooks.ibm.com/abstracts/sg248062.html +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on OFTC. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb7..00000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index a83afc00..00000000 --- a/devstack/README.rst +++ /dev/null @@ -1,126 +0,0 @@ -======================== -Installing with DevStack -======================== - -What is DevStack? --------------------------- - -DevStack is a script to quickly create an OpenStack development environment. - -Find out more `here `_. - - -What are DevStack plugins? --------------------------- - -DevStack plugins act as project-specific extensions of DevStack. They allow external projects to -execute code directly in the DevStack run, supporting configuration and installation changes as -part of the normal local.conf and stack.sh execution. For NovaLink, we have DevStack plugins for -each of our three projects - nova-powervm, networking-powervm, and ceilometer-powervm. These -plugins, with the appropriate local.conf settings for your environment, will allow you to simply -clone down DevStack, configure, run stack.sh, and end up with a working OpenStack/Novalink PowerVM -environment with no other scripting required. - -More details can be `found here. `_ - - -How to use the NovaLink DevStack plugins: ------------------------------------------ - -1. Download DevStack:: - - $ git clone https://git.openstack.org/openstack-dev/devstack /opt/stack/devstack - -2. Set up your local.conf file to pull in our projects: - 1. If you have an existing DevStack local.conf, modify it to pull in this project by adding:: - - [[local|localrc]] - enable_plugin nova-powervm http://git.openstack.org/openstack/nova-powervm - - and following the instructions for networking-powervm and ceilometer-powervm - as needed for your environment. - - 2. If you're setting up DevStack for the first time, example files are available - in the nova-powervm project to provide reference on using this driver with the - corresponding networking-powervm and ceilometer-powervm drivers. Following these - example files will enable the appropriate drivers and services for each node type. - Example config files for all-in-one, compute, and control nodes - `can be found here. `_ - - The nova-powervm project provides different sample local.conf files as a - starting point for devstack. - - * local.conf.aio-sea-localdisk - - * Runs on the NovaLink VM of the PowerVM system - * Provides a full 'all in one' devstack VM - * Uses Shared Ethernet Adapter networking (networking-powervm) - * Uses localdisk disk driver - - * local.conf.aio-ovs-ssp - - * Runs on the NovaLink VM of the PowerVM system - * Provides a full 'all in one' devstack VM - * Uses Open vSwitch networking (neutron) - * Uses Shared Storage Pool disk driver - - * local.conf.control - - * Can run on any devstack capable machine (POWER or x86) - * Provides the controller node for devstack. Typically paired with the local.conf.compute - - * local.conf.compute - - * Runs on the NovaLink VM of the PowerVM system - * Provides the compute node for a devstack. Typically paired with the local.conf.control - -3. See our devrefs and plugin references for the configuration options for each driver, - then configure the installation in local.conf as needed for your environment. - - * nova-powervm - * http://nova-powervm.readthedocs.org/en/latest/devref/index.html - * https://github.com/openstack/nova-powervm/blob/master/devstack/README.rst - - * networking-powervm - * http://networking-powervm.readthedocs.io/en/latest/devref/index.html - * https://github.com/openstack/networking-powervm/blob/master/devstack/README.rst - - * ceilometer-powervm - * http://ceilometer-powervm.readthedocs.org/en/latest/devref/index.html - * https://github.com/openstack/ceilometer-powervm/blob/master/devstack/README.rst - -4. For nova-powervm, changing the DISK_DRIVER settings for your environment will be required. - The default configuration for other settings will be sufficient for most installs. :: - - [[local|localrc]] - ... - DISK_DRIVER = - VOL_GRP_NAME = - CLUSTER_NAME = - - [[post-config|$NOVA_CONF]] - [powervm] - ... - -5. A few notes: - - * By default this will pull in the latest/trunk versions of all the projects. If you want to - run a stable version instead, you can either check out that stable branch in the DevStack - repo (git checkout stable/liberty) which is the preferred method, or you can do it on a - project by project basis in the local.conf file as needed. - - * If you need any special services enabled for your environment, you can also specify those - in your local.conf file. In our example files we demonstrate enabling and disabling services - (n-cpu, q-agt, etc) required for our drivers. - -6. Run ``stack.sh`` from DevStack:: - - $ cd /opt/stack/devstack - $ FORCE=yes ./stack.sh - - ``FORCE=yes`` is needed on Ubuntu 15.10 since only Ubuntu LTS releases are officially supported - by DevStack. If you're running a control only node on a different, supported OS version you can - skip using ``FORCE=yes``. - -7. At this point DevStack will run through stack.sh, and barring any DevStack issues, you should - end up with a standard link to your Horizon portal at the end of the stack run. Congratulations! diff --git a/devstack/local.conf.aio-ovs-ssp b/devstack/local.conf.aio-ovs-ssp deleted file mode 100644 index b304d12e..00000000 --- a/devstack/local.conf.aio-ovs-ssp +++ /dev/null @@ -1,69 +0,0 @@ -# This is an example devstack local.conf for and all-in-one stack using -# Open vSwitch networking. - -[[local|localrc]] -LOGFILE=/opt/stack/logs/stack.sh.log -SCREEN_LOGDIR=~/screen_log/ -LOGDAYS=1 -LOG_COLOR=True - -ADMIN_PASSWORD=admin -MYSQL_PASSWORD=mysql -RABBIT_PASSWORD=rabbit -SERVICE_PASSWORD=admin -SERVICE_TOKEN=service - -MULTI_HOST=0 -HOST_NAME=$(hostname) - -# Networking configuration. Update these values based on your network. -PUBLIC_INTERFACE= -FLOATING_RANGE= -FIXED_RANGE= -NETWORK_GATEWAY= -PUBLIC_NETWORK_GATEWAY= -Q_FLOATING_ALLOCATION_POOL= -HOST_IP= - -# ML2 Configuration -Q_ML2_TENANT_NETWORK_TYPE=vlan,vxlan,flat -Q_ML2_PLUGIN_TYPE_DRIVERS=vlan,vxlan,flat - -# Forces nova to use config drive -FORCE_CONFIG_DRIVE=True - -# TODO: The default version for etcd3 is 3.1.7. Power is not supported for this version. -# Using the 3.2.0 RC until 3.2.0 is release at which point this can be removed. -ETCD_VERSION=v3.2.0-rc.1 -ETCD_SHA256="c2d846326586afe169e6ca81266815196d6c14bc023f9c7d0c9d622f3c14505c" - -# Use the common SSP pool on the system. -DISK_DRIVER=ssp - -# Enable plugins -enable_plugin nova-powervm https://git.openstack.org/openstack/nova-powervm.git -enable_plugin neutron https://git.openstack.org/openstack/neutron - -# Enable services -enable_service n-novnc neutron neutron-api neutron-agent neutron-l3 neutron-dhcp neutron-metadata-agent -disable_service cinder n-net ceilometer-aipmi q-agt q-svc q-l3 q-dhcp q-meta - -[[post-config|$NOVA_CONF]] -[DEFAULT] -debug=False -default_log_levels=pypowervm=DEBUG,nova_powervm=DEBUG,nova=DEBUG,iamqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN -use_rootwrap_daemon = True - -[powervm] -use_rmc_ipv6_scheme=False - -[[post-config|$NEUTRON_CONF]] -[DEFAULT] -debug=False -verbose=False -default_log_levels=pypowervm=DEBUG,neutron=DEBUG,iamqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN - -[[post-config|$KEYSTONE_CONF]] -[DEFAULT] -debug=False - diff --git a/devstack/local.conf.aio-sea-localdisk b/devstack/local.conf.aio-sea-localdisk deleted file mode 100644 index f5637831..00000000 --- a/devstack/local.conf.aio-sea-localdisk +++ /dev/null @@ -1,63 +0,0 @@ -[[local|localrc]] -LOGFILE=/opt/stack/logs/stack.sh.log -SCREEN_LOGDIR=~/screen_log/ -LOGDAYS=1 -LOG_COLOR=True -DATA_DIR=/var/stack -ADMIN_PASSWORD=admin -MYSQL_PASSWORD=mysql -RABBIT_PASSWORD=rabbit -SERVICE_PASSWORD=admin -SERVICE_TOKEN=service - -MULTI_HOST=0 -HOST_NAME=$(hostname) - -# Networking Configuration -Q_PLUGIN=ml2 -Q_ML2_TENANT_NETWORK_TYPE=vlan -Q_ML2_PLUGIN_TYPE_DRIVERS=vlan -Q_USE_PROVIDERNET_FOR_PUBLIC=False -ENABLE_TENANT_VLANS=True -PHYSICAL_NETWORK=default -TENANT_VLAN_RANGE=1000:2000 -Q_AGENT=pvm_sea -NEUTRON_AGENT=pvm_sea -Q_ML2_PLUGIN_MECHANISM_DRIVERS=pvm_sea -ML2_L3_PLUGIN= -Q_USE_PROVIDER_NETWORKING=False -NEUTRON_CREATE_INITIAL_NETWORKS=False -NEUTRON_CORE_PLUGIN=ml2 -Q_PLUGIN_CONF_FILE=etc/neutron/plugins/ml2/ml2_conf.ini - -# Forces nova to use config drive -FORCE_CONFIG_DRIVE=True - -# localdisk or ssp. localdisk requires VOL_GRP_NAME. Set to the -# volume group that will host the volume groups. Must not be rootvg. -DISK_DRIVER=localdisk -VOL_GRP_NAME=devstackvg - -# TODO: The default version for etcd3 is 3.1.7. Power is not supported for this version. -# Using a 3.2.0 RC until 3.2.0 is released at which point this can be removed. -ETCD_VERSION=v3.2.0-rc.1 -ETCD_SHA256="c2d846326586afe169e6ca81266815196d6c14bc023f9c7d0c9d622f3c14505c" - -# Enable plugins -enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git -enable_plugin ceilometer-powervm https://git.openstack.org/openstack/ceilometer-powervm.git -enable_plugin nova-powervm https://git.openstack.org/openstack/nova-powervm.git -enable_plugin networking-powervm https://git.openstack.org/openstack/networking-powervm.git -enable_plugin neutron https://git.openstack.org/openstack/neutron - -# Enable services -enable_service n-novnc neutron neutron-api pvm-q-sea-agt -disable_service cinder n-net neutron-metering neutron-l3 neutron-dhcp neutron-agent - -[[post-config|/$Q_PLUGIN_CONF_FILE]] -[ml2_type_vlan] -network_vlan_ranges=default:1:4094 - -[ml2] -tenant_network_types=vlan -extension_drivers=port_security diff --git a/devstack/local.conf.compute b/devstack/local.conf.compute deleted file mode 100644 index fadf8000..00000000 --- a/devstack/local.conf.compute +++ /dev/null @@ -1,57 +0,0 @@ -[[local|localrc]] -LOGFILE=/opt/stack/logs/stack.sh.log -SCREEN_LOGDIR=~/screen_log/ -LOGDAYS=1 - -ADMIN_PASSWORD=labstack -MYSQL_PASSWORD=mysql -RABBIT_PASSWORD=rabbit -SERVICE_PASSWORD=admin -SERVICE_TOKEN=service - -MULTI_HOST=1 -HOST_IP=192.168.42.12 #Change this for each compute node -HOST_NAME=$(hostname) -SERVICE_HOST=192.168.42.11 #Change this to your controller IP - -MYSQL_HOST=$SERVICE_HOST -RABBIT_HOST=$SERVICE_HOST -GLANCE_HOSTPORT=$SERVICE_HOST:9292 -KEYSTONE_AUTH_HOST=$SERVICE_HOST -KEYSTONE_SERVICE_HOST=$SERVICE_HOST - -FLAT_INTERFACE=eth0 -Q_PLUGIN=ml2 -Q_ML2_TENANT_NETWORK_TYPE=vlan -Q_ML2_PLUGIN_TYPE_DRIVERS=vlan -ENABLE_TENANT_VLANS=True -PHYSICAL_NETWORK=default -TENANT_VLAN_RANGE=1000:1999 - -# TODO: Set disk driver details for your environment -# DISK_DRIVER: localdisk or ssp. localdisk requires VOL_GRP_NAME. Set to the -# volume group that will host the volume groups. Must not be rootvg. -DISK_DRIVER=localdisk -VOL_GRP_NAME=devstackvg - -NOVA_VNC_ENABLED=True -NOVNCPROXY_BASE_URL="http://$SERVICE_HOST:6080/vnc_auto.html" -VNCSERVER_LISTEN=$HOST_IP -VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN - -# Set enabled services (pvm-q-agt and pvm-ceilometer-acompute started by their plugins) -ENABLED_SERVICES=n-cpu,neutron,n-api-meta - -# Enable plugins -enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git -enable_plugin nova-powervm https://git.openstack.org/openstack/nova-powervm.git -enable_plugin networking-powervm https://git.openstack.org/openstack/networking-powervm.git -enable_plugin ceilometer-powervm https://git.openstack.org/openstack/ceilometer-powervm.git - -# Disable services -disable_service ceilometer-acentral ceilometer-collector ceilometer-api - -[[post-config|$NOVA_CONF]] -[DEFAULT] -debug=False -default_log_levels=nova_powervm=DEBUG,nova=DEBUG,pypowervm=INFO,iamqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN diff --git a/devstack/local.conf.control b/devstack/local.conf.control deleted file mode 100644 index dfe3ab81..00000000 --- a/devstack/local.conf.control +++ /dev/null @@ -1,42 +0,0 @@ -[[local|localrc]] -LOGFILE=/opt/stack/logs/stack.sh.log -SCREEN_LOGDIR=~/screen_log/ -LOGDAYS=1 - -ADMIN_PASSWORD=admin -MYSQL_PASSWORD=mysql -RABBIT_PASSWORD=rabbit -SERVICE_PASSWORD=admin -SERVICE_TOKEN=service - -MULTI_HOST=1 -HOST_NAME=$(hostname) - -FLOATING_RANGE=192.168.2.0/24 -FIXED_RANGE=10.11.12.0/24 -NETWORK_GATEWAY=10.11.12.1 -PUBLIC_NETWORK_GATEWAY=192.168.2.1 -Q_FLOATING_ALLOCATION_POOL=start=192.168.2.225,end=192.168.2.250 -FLAT_INTERFACE=eth0 -Q_PLUGIN=ml2 -Q_ML2_TENANT_NETWORK_TYPE=vlan -Q_ML2_PLUGIN_TYPE_DRIVERS=vlan -ENABLE_TENANT_VLANS=True -PHYSICAL_NETWORK=default -TENANT_VLAN_RANGE=1000:1999 - -# Enable services -enable_service n-novnc neutron q-svc q-l3 q-dhcp q-meta -disable_service n-net n-cpu q-agt c-vol - -# Enable plugins -enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git -enable_plugin networking-powervm https://git.openstack.org/openstack/networking-powervm.git - -# Disable ceilometer-acompute, as it's not needed on a control-only node -disable_service ceilometer-acompute - -[[post-config|$NOVA_CONF]] -[DEFAULT] -debug=False -default_log_levels=nova=DEBUG,iamqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN diff --git a/devstack/override-defaults b/devstack/override-defaults deleted file mode 100644 index 67027994..00000000 --- a/devstack/override-defaults +++ /dev/null @@ -1,3 +0,0 @@ -# Plug-in overrides - -VIRT_DRIVER=powervm diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100755 index 5a74e76a..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash -# -# plugin.sh - Devstack extras script to install and configure the nova compute -# driver for powervm - -# This driver is enabled in override-defaults with: -# VIRT_DRIVER=${VIRT_DRIVER:-powervm} - -# The following entry points are called in this order for nova-powervm: -# -# - install_nova_powervm -# - configure_nova_powervm -# - start_nova_powervm -# - stop_nova_powervm -# - cleanup_nova_powervm - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -# Defaults -# -------- - -# Set up base directories -NOVA_DIR=${NOVA_DIR:-$DEST/nova} -NOVA_CONF_DIR=${NOVA_CONF_DIR:-/etc/nova} -NOVA_CONF=${NOVA_CONF:-NOVA_CONF_DIR/nova.conf} - -# nova-powervm directories -NOVA_POWERVM_DIR=${NOVA_POWERVM_DIR:-${DEST}/nova-powervm} -NOVA_POWERVM_PLUGIN_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})) - -# Support entry points installation of console scripts -if [[ -d $NOVA_DIR/bin ]]; then - NOVA_BIN_DIR=$NOVA_DIR/bin -else - NOVA_BIN_DIR=$(get_python_exec_prefix) -fi - -# Source functions -source $NOVA_POWERVM_PLUGIN_DIR/powervm-functions.sh - -# Entry Points -# ------------ - -# configure_nova_powervm() - Configure the system to use nova_powervm -function configure_nova_powervm { - - # Default configuration - iniset $NOVA_CONF DEFAULT compute_driver $PVM_DRIVER - iniset $NOVA_CONF DEFAULT instance_name_template $INSTANCE_NAME_TEMPLATE - iniset $NOVA_CONF DEFAULT compute_available_monitors $COMPUTE_MONITORS - iniset $NOVA_CONF DEFAULT compute_monitors ComputeDriverCPUMonitor - iniset $NOVA_CONF DEFAULT force_config_drive $FORCE_CONFIG_DRIVE - iniset $NOVA_CONF DEFAULT injected_network_template $INJECTED_NETWORK_TEMPLATE - iniset $NOVA_CONF DEFAULT flat_injected $FLAT_INJECTED - iniset $NOVA_CONF DEFAULT use_ipv6 $USE_IPV6 - iniset $NOVA_CONF DEFAULT firewall_driver $FIREWALL_DRIVER - - # PowerVM specific configuration - iniset $NOVA_CONF powervm disk_driver $DISK_DRIVER - if [[ -n $VOL_GRP_NAME ]]; then - iniset $NOVA_CONF powervm volume_group_name $VOL_GRP_NAME - fi - if [[ -n $CLUSTER_NAME ]]; then - iniset $NOVA_CONF powervm cluster_name $CLUSTER_NAME - fi -} - -# install_nova_powervm() - Install nova_powervm and necessary dependencies -function install_nova_powervm { - # Install the nova-powervm package - setup_develop $NOVA_POWERVM_DIR -} - -# start_nova_powervm() - Start the nova_powervm process -function start_nova_powervm { - # Check that NovaLink is installed and running - check_novalink_install - - # This function intentionally functionless as the - # compute service will start normally -} - -# stop_nova_powervm() - Stop the nova_powervm process -function stop_nova_powervm { - # This function intentionally left blank as the - # compute service will stop normally - : -} - -# cleanup_nova_powervm() - Cleanup the nova_powervm process -function cleanup_nova_powervm { - # This function intentionally left blank - : -} - -# Core Dispatch -# ------------- -if is_service_enabled nova-powervm; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - # Install NovaLink if set - if [[ "$INSTALL_NOVALINK" = "True" ]]; then - echo_summary "Installing NovaLink" - install_novalink - fi - fi - - if [[ "$1" == "stack" && "$2" == "install" ]]; then - # Perform installation of nova-powervm - echo_summary "Installing nova-powervm" - install_nova_powervm - - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - # Lay down configuration post install - echo_summary "Configuring nova-powervm" - configure_nova_powervm - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize and start the nova-powervm/nova-compute service - echo_summary "Starting nova-powervm" - start_nova_powervm - fi - - if [[ "$1" == "unstack" ]]; then - # Shut down nova-powervm/nova-compute - echo_summary "Stopping nova-powervm" - stop_nova_powervm - fi - - if [[ "$1" == "clean" ]]; then - # Remove any lingering configuration data - # clean.sh first calls unstack.sh - echo_summary "Cleaning up nova-powervm and associated data" - cleanup_nova_powervm - fi -fi - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/devstack/powervm-functions.sh b/devstack/powervm-functions.sh deleted file mode 100644 index 2901ecae..00000000 --- a/devstack/powervm-functions.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# devstack/powervm-functions.sh -# Functions to control the installation and configuration of the PowerVM compute services - -# TODO (adreznec) Uncomment when public NovaLink PPA available -# NOVALINK_PPA=${NOVALINK_PPA:-TBD} - -function check_novalink_install { - echo_summary "Checking NovaLink installation" - if ! ( is_package_installed pvm-novalink ); then - echo "WARNING: You are using the NovaLink drivers, but NovaLink is not installed on this system." - fi - - # The user that nova runs as should be a member of **pvm_admin** group - if ! getent group $PVM_ADMIN_GROUP >/dev/null; then - sudo groupadd $PVM_ADMIN_GROUP - fi - add_user_to_group $STACK_USER $PVM_ADMIN_GROUP -} - -function install_novalink { - echo_summary "Installing NovaLink" - if is_ubuntu; then - # Set up the NovaLink PPA - # TODO (adreznec) Uncomment when public NovaLink PPA available - # echo "deb ${NOVALINK_PPA} ${DISTRO} main" | sudo tee /etc/apt/sources.list.d/novalink-${DISTRO}.list - # echo "deb-src ${NOVALINK_PPA} ${DISTRO} main" | sudo tee --append /etc/apt/sources.list.d/novalink-${DISTRO}.list - - NO_UPDATE_REPOS=FALSE - REPOS_UPDATED=FALSE - else - die $LINENO "NovaLink is currently supported only on Ubuntu platforms" - fi - - install_package pvm-novalink - echo_summary "NovaLink install complete" -} diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 9230dbfc..00000000 --- a/devstack/settings +++ /dev/null @@ -1,28 +0,0 @@ -# Devstack settings -# These defaults can be overridden in the localrc section of the local.conf file - -# Add nova-powervm to enabled services -enable_service nova-powervm - -# NovaLink install/upgrade settings -INSTALL_NOVALINK=$(trueorfalse False INSTALL_NOVALINK) -PVM_ADMIN_GROUP=${PVM_ADMIN_GROUP:-pvm_admin} - -# Nova settings -PVM_DRIVER=powervm_ext.driver.PowerVMDriver -INSTANCE_NAME_TEMPLATE=${INSTANCE_NAME_TEMPLATE:-"%(display_name).13s-%(uuid).8s-pvm"} -COMPUTE_MONITORS=${COMPUTE_MONITORS:-nova.compute.monitors.all_monitors} -FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-True} -INJECTED_NETWORK_TEMPLATE=${INJECTED_NETWORK_TEMPLATE:-$DEST/nova/nova/virt/interfaces.template} -FLAT_INJECTED=${FLAT_INJECTED:-true} -# This is required to be true to support the PowerVM RMC management network -USE_IPV6=${USE_IPV6:-True} -FIREWALL_DRIVER=${FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} - -# PowerVM settings -# DISK_DRIVER : 'localdisk' (the default) or 'ssp' -DISK_DRIVER=${DISK_DRIVER:-ssp} -# VOL_GRP_NAME only required for localdisk driver -# VOL_GRP_NAME=${VOL_GRP_NAME:-devstackvg} -# CLUSTER_NAME used by SSP driver -# CLUSTER_NAME=${CLUSTER_NAME:-devstack_cluster} diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 49b050e2..00000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD -sphinx!=1.6.6,!=1.6.7,>=1.6.2,!=2.1.0;python_version>='3.4' # BSD -openstackdocstheme>=1.19.0 # Apache-2.0 -sphinx-feature-classification>=0.2.0 # Apache-2.0 diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index f2907666..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,84 +0,0 @@ -# nova-powervm documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../')) - -# -- General configuration ------------------------------------------------ - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'openstackdocstheme', - 'sphinx_feature_classification.support_matrix' -] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'nova-powervm' -copyright = u'2015, IBM' - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'openstackdocs' - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'IBM', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', - '%s' % project, - u'%s Documentation' % project, - u'IBM', 1) -] - - -# -- Options for openstackdocstheme --------------------------------------- - -repository_name = 'openstack/nova-powervm' -bug_project = 'nova-powervm' -bug_tag = '' diff --git a/doc/source/devref/development_environment.rst b/doc/source/devref/development_environment.rst deleted file mode 100644 index dc525609..00000000 --- a/doc/source/devref/development_environment.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. - Copyright 2015 IBM - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Setting Up a Development Environment -==================================== - -This page describes how to setup a working Python development -environment that can be used in developing Nova-PowerVM. - -These instructions assume you're already familiar with -Git and Gerrit, which is a code repository mirror and code review toolset, -however if you aren't please see `this Git tutorial`_ for an introduction -to using Git and `this guide`_ for a tutorial on using Gerrit and Git for -code contribution to OpenStack projects. - -.. _this Git tutorial: http://git-scm.com/book/en/Getting-Started -.. _this guide: http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Getting the code ----------------- - -Grab the code:: - - git clone https://git.openstack.org/openstack/nova-powervm - cd nova-powervm - -Setting up your environment ---------------------------- - -The purpose of this project is to provide the 'glue' between OpenStack -Compute (Nova) and PowerVM. The `pypowervm`_ project is used to control -PowerVM systems. - -It is recommended that you clone down the OpenStack Nova project along with -pypowervm into your respective development environment. - -Running the tox python targets for tests will automatically clone these down -via the requirements. - -Additional project requirements may be found in the requirements.txt file. - -.. _pypowervm: https://github.com/powervm/pypowervm diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst deleted file mode 100644 index 129bb406..00000000 --- a/doc/source/devref/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Copyright 2015 IBM - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Developer Guide -=============== - -In the Developer Guide, you will find information on how to develop for -Nova-PowerVM and how it interacts with Nova compute. You will also find -information on setup and usage of Nova-PowerVM - - -Internals and Programming -------------------------- -.. toctree:: - :maxdepth: 3 - - project_structure - development_environment - usage - - -Testing -------- -.. toctree:: - :maxdepth: 3 - - testing - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/source/devref/project_structure.rst b/doc/source/devref/project_structure.rst deleted file mode 100644 index 2d60097b..00000000 --- a/doc/source/devref/project_structure.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. - Copyright 2015 IBM - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Source Code Structure -===================== - -Since nova-powervm strives to be integrated into the upstream Nova project, -the source code structure matches a standard driver. - -:: - - nova_powervm/ - virt/ - powervm/ - disk/ - tasks/ - volume/ - ... - tests/ - virt/ - powervm/ - disk/ - tasks/ - volume/ - ... - -nova_powervm/virt/powervm -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The main directory for the overall driver. Provides the driver -implementation, image support, and some high level classes to interact with -the PowerVM system (ex. host, vios, vm, etc...) - -The driver attempts to utilize `TaskFlow`_ for major actions such as spawn. -This allows the driver to create atomic elements (within the tasks) to -drive operations against the system (with revert capabilities). - -.. _TaskFlow: https://wiki.openstack.org/wiki/TaskFlow - -nova_powervm/virt/powervm/disk -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The disk folder contains the various 'nova ephemeral' disk implementations. -These are basic images that do not involve Cinder. - -Two disk implementations exist currently. - -* localdisk - supports Virtual I/O Server Volume Groups. This configuration - uses any Volume Group on the system, allowing operators to make use of the - physical disks local to their system. Images will be cached on the same - volume group as the VMs. The cached images will be periodically cleaned up - by the Nova imagecache manager, at a rate determined by the ``nova.conf`` - setting: image_cache_manager_interval. Also supports file-backed ephemeral - storage, which is specified by using the ``QCOW VG - default`` volume group. - Note: Resizing instances with file-backed ephemeral is not currently - supported. - -* Shared Storage Pool - utilizes PowerVM's distributed storage. As such this - implementation allows operators to make use of live migration capabilities. - -The standard interface between these two implementations is defined in the -driver.py. This ensures that the nova-powervm compute driver does not need -to know the specifics about which disk implementation it is using. - -nova_powervm/virt/powervm/tasks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The task folder contains `TaskFlow`_ classes. These implementations simply -wrap around other methods, providing logical units that the compute -driver can use when building a string of actions. - -For instance, spawning an instance may require several atomic tasks: - - Create VM - - Plug Networking - - Create Disk from Glance - - Attach Disk to VM - - Power On - -The tasks in this directory encapsulate this. If anything fails, they have -corresponding reverts. The logic to perform these operations is contained -elsewhere; these are simple wrappers that enable embedding into Taskflow. - -.. _TaskFlow: https://wiki.openstack.org/wiki/TaskFlow - -nova_powervm/virt/powervm/volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The volume folder contains the Cinder volume connectors. A volume connector -is the code that connects a Cinder volume (which is visible to the host) to -the Virtual Machine. - -The PowerVM Compute Driver has an interface for the volume connectors defined -in this folder's `driver.py`. - -The PowerVM Compute Driver provides two implementations for Fibre Channel -attached disks. - - * Virtual SCSI (vSCSI): The disk is presented to a Virtual I/O Server and - the data is passed through to the VM through a virtualized SCSI - connection. - - * N-Port ID Virtualization (NPIV): The disk is presented directly to the - VM. The VM will have virtual Fibre Channel connections to the disk, and - the Virtual I/O Server will not have the disk visible to it. diff --git a/doc/source/devref/testing.rst b/doc/source/devref/testing.rst deleted file mode 100644 index 6ee09b17..00000000 --- a/doc/source/devref/testing.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Copyright 2015 IBM - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Running Nova-PowerVM Tests -========================== - -This page describes how to run the Nova-PowerVM tests. This page assumes you -have already set up an working Python environment for Nova-PowerVM development. - -With `tox` -~~~~~~~~~~ - -Nova-PowerVM, like other OpenStack projects, uses `tox`_ for managing the virtual -environments for running test cases. It uses `Testr`_ for managing the running -of the test cases. - -Tox handles the creation of a series of `virtualenvs`_ that target specific -versions of Python. - -Testr handles the parallel execution of series of test cases as well as -the tracking of long-running tests and other things. - -For more information on the standard tox-based test infrastructure used by -OpenStack and how to do some common test/debugging procedures with Testr, -see this wiki page: - - https://wiki.openstack.org/wiki/Testr - -.. _Testr: https://wiki.openstack.org/wiki/Testr -.. _tox: http://tox.readthedocs.org/en/latest/ -.. _virtualenvs: https://pypi.org/project/virtualenv/ - -PEP8 and Unit Tests -+++++++++++++++++++ - -Running pep8 and unit tests is as easy as executing this in the root -directory of the Nova-PowerVM source code:: - - tox - -To run only pep8:: - - tox -e pep8 - -To restrict the pylint check to only the files altered by the latest patch changes:: - - tox -e pep8 HEAD~1 - -To run only the unit tests:: - - tox -e py27,py34 diff --git a/doc/source/devref/usage.rst b/doc/source/devref/usage.rst deleted file mode 100644 index ec3ac4da..00000000 --- a/doc/source/devref/usage.rst +++ /dev/null @@ -1,212 +0,0 @@ -.. - Copyright 2015, 2016 IBM - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Usage -===== - -To make use of the PowerVM drivers, a PowerVM system set up with `NovaLink`_ is -required. The nova-powervm driver should be installed on the management VM. - -.. _NovaLink: http://www-01.ibm.com/common/ssi/cgi-bin/ssialias?infotype=AN&subtype=CA&htmlfid=897/ENUS215-262&appname=USN - -**Note:** Installing the NovaLink software creates the ``pvm_admin`` group. In -order to function properly, the user executing the Nova compute service must -be a member of this group. Use the ``usermod`` command to add the user. For -example, to add the user ``stacker`` to the ``pvm_admin`` group, execute:: - - sudo usermod -a -G pvm_admin stacker - -The user must re-login for the change to take effect. - -The NovaLink architecture is such that the compute driver runs directly on the -PowerVM system. No external management element (e.g. Hardware Management -Console or PowerVC) is needed. Management of the virtualization is driven -through a thin virtual machine running on the PowerVM system. - -Configuration of the PowerVM system and NovaLink is required ahead of time. If -the operator is using volumes or Shared Storage Pools, they are required to be -configured ahead of time. - - -Configuration File Options --------------------------- -After nova-powervm has been installed the user must enable PowerVM as the -compute driver. To do so, set the ``compute_driver`` value in the ``nova.conf`` -file to ``compute_driver = powervm_ext.driver.PowerVMDriver``. - -The standard nova configuration options are supported. In particular, to use -PowerVM SR-IOV vNIC for networking, the ``pci_passthrough_whitelist`` option -must be set. See the `networking-powervm usage devref`_ for details. - -.. _`networking-powervm usage devref`: http://networking-powervm.readthedocs.io/en/latest/devref/usage.html - -Additionally, a ``[powervm]`` section is used to provide additional -customization to the driver. - -By default, no additional inputs are needed. The base configuration allows for -a Nova driver to support ephemeral disks to a local volume group (only -one can be on the system in the default config). Connecting Fibre Channel -hosted disks via Cinder will use the Virtual SCSI connections through the -Virtual I/O Servers. - -Operators may change the disk driver (nova based disks - NOT Cinder) via the -``disk_driver`` property. - -All of these values are under the ``[powervm]`` section. The tables are broken -out into logical sections. - -To generate a sample config file for ``[powervm]`` run:: - - oslo-config-generator --namespace nova_powervm > nova_powervm_sample.conf - -The ``[powervm]`` section of the sample can then be edited and pasted into the -full nova.conf file. - -VM Processor Options -~~~~~~~~~~~~~~~~~~~~ -+--------------------------------------+------------------------------------------------------------+ -| Configuration option = Default Value | Description | -+======================================+============================================================+ -| proc_units_factor = 0.1 | (FloatOpt) Factor used to calculate the processor units | -| | per vcpu. Valid values are: 0.05 - 1.0 | -+--------------------------------------+------------------------------------------------------------+ -| uncapped_proc_weight = 64 | (IntOpt) The processor weight to assign to newly created | -| | VMs. Value should be between 1 and 255. Represents the | -| | relative share of the uncapped processor cycles the | -| | Virtual Machine will receive when unused processor cycles | -| | are available. | -+--------------------------------------+------------------------------------------------------------+ - - -Disk Options -~~~~~~~~~~~~ -+--------------------------------------+------------------------------------------------------------+ -| Configuration option = Default Value | Description | -+======================================+============================================================+ -| disk_driver = localdisk | (StrOpt) The disk driver to use for PowerVM disks. Valid | -| | options are: localdisk, ssp | -| | | -| | If localdisk is specified and only one non-rootvg Volume | -| | Group exists on one of the Virtual I/O Servers, then no | -| | further config is needed. If multiple volume groups exist,| -| | then further specification can be done via the | -| | volume_group_name option. | -| | | -| | Live migration is not supported with a localdisk config. | -| | | -| | If ssp is specified, then a Shared Storage Pool will be | -| | used. If only one SSP exists on the system, no further | -| | configuration is needed. If multiple SSPs exist, then the | -| | cluster_name property must be specified. Live migration | -| | can be done within a SSP cluster. | -+--------------------------------------+------------------------------------------------------------+ -| cluster_name = None | (StrOpt) Cluster hosting the Shared Storage Pool to use | -| | for storage operations. If none specified, the host is | -| | queried; if a single Cluster is found, it is used. Not | -| | used unless disk_driver option is set to ssp. | -+--------------------------------------+------------------------------------------------------------+ -| volume_group_name = None | (StrOpt) Volume Group to use for block device operations. | -| | Must not be rootvg. If disk_driver is localdisk, and more | -| | than one non-rootvg volume group exists across the | -| | Virtual I/O Servers, then this attribute must be specified.| -+--------------------------------------+------------------------------------------------------------+ - - -Volume Options -~~~~~~~~~~~~~~ -+--------------------------------------+------------------------------------------------------------+ -| Configuration option = Default Value | Description | -+======================================+============================================================+ -| fc_attach_strategy = vscsi | (StrOpt) The Fibre Channel Volume Strategy defines how FC | -| | Cinder volumes should be attached to the Virtual Machine. | -| | The options are: npiv or vscsi. | -| | | -| | It should be noted that if NPIV is chosen, the WWPNs will | -| | not be active on the backing fabric during the deploy. | -| | Some Cinder drivers will operate without issue. Others | -| | may query the fabric and thus will fail attachment. It is | -| | advised that if an issue occurs using NPIV, the operator | -| | fall back to vscsi based deploys. | -+--------------------------------------+------------------------------------------------------------+ -| vscsi_vios_connections_required = 1 | (IntOpt) Indicates a minimum number of Virtual I/O Servers | -| | that are required to support a Cinder volume attach with | -| | the vSCSI volume connector. | -+--------------------------------------+------------------------------------------------------------+ -| ports_per_fabric = 1 | (IntOpt) (NPIV only) The number of physical ports that | -| | should be connected directly to the Virtual Machine, per | -| | fabric. | -| | | -| | Example: 2 fabrics and ports_per_fabric set to 2 will | -| | result in 4 NPIV ports being created, two per fabric. If | -| | multiple Virtual I/O Servers are available, will attempt | -| | to span ports across I/O Servers. | -+--------------------------------------+------------------------------------------------------------+ -| fabrics = A | (StrOpt) (NPIV only) Unique identifier for each physical | -| | FC fabric that is available. This is a comma separated | -| | list. If there are two fabrics for multi-pathing, then | -| | this could be set to A,B. | -| | | -| | The fabric identifiers are used for the | -| | 'fabric__port_wwpns' key. | -+--------------------------------------+------------------------------------------------------------+ -| fabric__port_wwpns | (StrOpt) (NPIV only) A comma delimited list of all the | -| | physical FC port WWPNs that support the specified fabric. | -| | Is tied to the NPIV 'fabrics' key. | -+--------------------------------------+------------------------------------------------------------+ - - -Config Drive Options -~~~~~~~~~~~~~~~~~~~~ -+--------------------------------------+------------------------------------------------------------+ -| Configuration option = Default Value | Description | -+======================================+============================================================+ -| vopt_media_volume_group = root_vg | (StrOpt) The volume group on the system that should be | -| | used to store the config drive metadata that will be | -| | attached to the VMs. | -+--------------------------------------+------------------------------------------------------------+ -| vopt_media_rep_size = 1 | (IntOpt) The size of the media repository (in GB) for the | -| | metadata for config drive. Only used if the media | -| | repository needs to be created. | -+--------------------------------------+------------------------------------------------------------+ -| image_meta_local_path = /tmp/cfgdrv/ | (StrOpt) The location where the config drive ISO files | -| | should be built. | -+--------------------------------------+------------------------------------------------------------+ - -LPAR Detailed Settings -~~~~~~~~~~~~~~~~~~~~~~ -Fine grained control over LPAR settings can be achieved by setting PowerVM -specific properties (``extra-specs``) on the flavors being used to instantiate a VM. For the -complete list of PowerVM properties see `IBM PowerVC documentation`_. - -.. _`IBM PowerVC documentation`: https://www.ibm.com/support/knowledgecenter/en/SSXK2N_1.4.2/com.ibm.powervc.standard.help.doc/powervc_pg_flavorsextraspecs_hmc.html - -For example, to create a VM with one VCPU and 0.7 entitlement (0.7 of the physical -CPU resource), a user could use a flavor created as follows:: - - openstack flavor create --vcpus 1 --ram 6144 --property \ - powervm:proc_units=0.7 pvm-6-1-0.7 - -In the example above ``powervm:proc_units`` property was used to specify CPU -entitlement for the VM. - -Remarks For IBM i Users -~~~~~~~~~~~~~~~~~~~~~~~ -By default all VMs are created as ``AIX/Linux`` type LPARs. In order to create -IBM i VM (LPAR type ``OS400``) user must add ``os_distro`` property of value -``ibmi`` to the Glance image being used to create the instance. For example, -to add the property to sample image ``i5OSR730``, execute:: - - openstack image set --property os_distro=ibmi i5OSR730 diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 38ee2870..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Copyright 2015 IBM - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Welcome to nova-powervm's documentation! -======================================== - -This project provides a Nova-compatible compute driver for `PowerVM`_ systems. - -The project aims to integrate into OpenStack's Nova project. Initial -development is occurring in a separate project until it has matured and met the -Nova core team's requirements. As such, all development practices should -mirror those of the Nova project. - -Documentation on Nova can be found at the `Nova Devref`_. - -.. _`PowerVM`: http://www-03.ibm.com/systems/power/software/virtualization/ -.. _`Nova Devref`: https://docs.openstack.org/nova/latest/ - -Overview --------- - -.. toctree:: - :maxdepth: 1 - - readme - support-matrix - -Policies --------- - -.. toctree:: - :maxdepth: 1 - - policies/index - -Devref ------- - -.. toctree:: - :maxdepth: 1 - - devref/index - -Specifications --------------- - -.. toctree:: - :maxdepth: 1 - - specs/template - specs/index diff --git a/doc/source/policies/bugs.rst b/doc/source/policies/bugs.rst deleted file mode 100644 index 1934e4f1..00000000 --- a/doc/source/policies/bugs.rst +++ /dev/null @@ -1,26 +0,0 @@ -Nova-PowerVM Bugs -================= - -Nova-PowerVM maintains all of its bugs in `Launchpad `_. -All of the current open Nova-PowerVM bugs can be found in that link. - -Bug Triage Process ------------------- - -The process of bug triaging consists of the following steps: - -1. Check if a bug was filed for a correct component (project). If not, either change the project - or mark it as "Invalid". -2. Add appropriate tags. Even if the bug is not valid or is a duplicate of another one, it still - may help bug submitters and corresponding sub-teams. -3. Check if a similar bug was filed before. If so, mark it as a duplicate of the previous bug. -4. Check if the bug description is consistent, e.g. it has enough information for developers to - reproduce it. If it's not consistent, ask submitter to provide more info and mark a bug as - "Incomplete". -5. Depending on ease of reproduction (or if the issue can be spotted in the code), mark it as - "Confirmed". -6. Assign the importance. Bugs that obviously break core and widely used functionality should get - assigned as "High" or "Critical" importance. The same applies to bugs that were filed for gate - failures. -7. (Optional). Add comments explaining the issue and possible strategy of fixing/working around - the bug. diff --git a/doc/source/policies/code-reviews.rst b/doc/source/policies/code-reviews.rst deleted file mode 100644 index a78d6894..00000000 --- a/doc/source/policies/code-reviews.rst +++ /dev/null @@ -1,13 +0,0 @@ -Code Reviews -============ - -Code reviews are a critical component of all OpenStack projects. Code reviews provide a -way to enforce a level of consistency across the project, and also allow for the careful -onboarding of contributions from new contributors. - -Code Review Practices ---------------------- -Nova-PowerVM follows the `code review guidelines `_ as -set forth for all OpenStack projects. It is expected that all reviewers are following the guidelines -set forth on that page. - diff --git a/doc/source/policies/contributing.rst b/doc/source/policies/contributing.rst deleted file mode 100644 index b1cd2f37..00000000 --- a/doc/source/policies/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../CONTRIBUTING.rst diff --git a/doc/source/policies/index.rst b/doc/source/policies/index.rst deleted file mode 100644 index 2e4470df..00000000 --- a/doc/source/policies/index.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. - Copyright 2015 IBM - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Nova-PowerVM Policies -===================== - -In the Policies Guide, you will find documented policies for developing with -Nova-PowerVM. This includes the processes we use for blueprints and specs, -bugs, contributor onboarding, and other procedural items. - -Policies --------- -.. toctree:: - :maxdepth: 3 - - bugs - contributing - code-reviews - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index a6210d3d..00000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/doc/source/specs/index.rst b/doc/source/specs/index.rst deleted file mode 100644 index ca555912..00000000 --- a/doc/source/specs/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -Nova-PowerVM Specifications -=========================== - -Contents: - -.. toctree:: - :maxdepth: 2 - :glob: - :reversed: - - */index - diff --git a/doc/source/specs/newton/index.rst b/doc/source/specs/newton/index.rst deleted file mode 100644 index 91c41633..00000000 --- a/doc/source/specs/newton/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Newton Specifications -===================== - -.. toctree:: - :glob: - - * diff --git a/doc/source/specs/newton/lb_and_ovs_support.rst b/doc/source/specs/newton/lb_and_ovs_support.rst deleted file mode 100644 index 0398447a..00000000 --- a/doc/source/specs/newton/lb_and_ovs_support.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================ -Linux Bridge and OVS VIF Support -================================ - -`Launchpad BluePrint`_ - -.. _`Launchpad BluePrint` : https://blueprints.launchpad.net/nova-powervm/+spec/powervm-addl-vif-types - -Currently the PowerVM driver requires a PowerVM specific Neutron agent. This -blueprint will add support for additional agent types - specifically the -Open vSwitch and Linux Bridge agents provided by Neutron. - -Problem description -=================== - -PowerVM has support for virtualizing an Ethernet port using the Virtual I/O -Server and Shared Ethernet. This is provided using networking-powervm -Shared Ethernet Agent. This agent provides key PowerVM use cases such as I/O -redundancy. - -There are a subset of operators that have asked for VIF support in line with -other hypervisors. This would be support for the Neutron Linux Bridge Agent -and Open vSwitch agent. While these agents do not provide use cases such as -I/O redundancy, they do enable operators to utilize common upstream networking -solutions when deploying PowerVM with OpenStack - - -Use Cases ---------- - -An operator should be able to deploy an environment using Linux Bridge or -Open vSwitch Neutron agents. In order to do this, the physical I/O must be -assigned to the NovaLink partition on the PowerVM system (the partition with -virtualization admin authority). - -A user should be able to do the standard VIF use cases with either of these -agents: - -* Add NIC -* Remove NIC -* Security Groups -* Multiple Network Types (Flat, VLAN, vxlan) -* Bandwidth limiting - -The existing Neutron agents should be used without any changes from PowerVM. -All of the changes that should occur will be in nova-powervm. Any limitations -of the agents themselves will be limitations to the PowerVM implementation. - -There is one exception to the use case support. The Open vSwitch support will -enable live migration. There is no plan for Linux Bridges live migration -support. - - -Proposed change -=============== - -* Create a parent VIF driver for NovaLink based I/O. This will hold the code - that is common between the Linux Bridge VIFs and OVS VIFs. There will be - common code due to both needing to run on the NovaLink management VM. - -* The VIF drivers should create a Trunk VEA on the NovaLink partition for - each VIF. It will be given a unique channel of communication to the VM. - The device will be named according to the Neutron device name. - -* The OVS VIF driver will use the nova linux_net code to set the metadata on - the trunk adapter. - -* Live migration will suspend the VIF on the target host until it has been - treated. Treating means ensuring that the communication to the VM is on - a unique channel (its own VLAN on a vSwitch). - -* A private PowerVM virtual switch named 'NovaLinkVEABridge' will be created - to support the private communication between the trunk adapters and the - VMs. - -* Live migration on the source will need to clean up the remaining trunk - adapter for Open vSwitch that is left around on the management VM. - -It should be noted that Hybrid VIF plugging will not be supported. Instead, -PowerVM will use the conntrack integration in Ubuntu 16.04/OVS 2.5 to support -the OVSFirewallDriver. As of OVS 2.5, that allows the firewall function -without needing Hybrid VIF Plugging. - -Alternatives ------------- - -None. - - -Security impact ---------------- - -None. - - -End user impact ---------------- - -None. - - -Performance Impact ------------------- - -Performance will not be impacted for the deployment of VMs. However, the -end user performance may change as it is a new networking technology. Both -the Linux Bridge and Open vSwitch support should operate with similar -performance characteristics as other platforms that support these technologies. - - -Deployer impact ---------------- - -The deployer will need to do the following: - -* Attach an Ethernet I/O Card to the NovaLink partition. Configure the ports - in accordance with the Open vSwitch or Linux Bridge Neutron Agent's - requirements. -* Run the agent on their NovaLink management VM. - -No major changes are anticipated outside of this. The Shared Ethernet -Adapter Neutron agent will not work in conjunction with this on the same -system. - - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - thorst - -Other contributors: - kriskend - tjakobs - -Work Items ----------- - -See Proposed Change - - -Dependencies -============ - -* NovaLink core changes will be needed with regard to the live migration flows. - This requires NovaLink 1.0.0.3 or later. - - -Testing -======= - -Testing will be done on live systems. Future work will be done to integrate -into the PowerVM Third-Party CI, however this will not be done initially as the -LB and OVS agents are heavily tested. The SEA Agent continues to need to be -tested. - - -Documentation Impact -==================== - -Deployer documentation will be built around how to configure this. - - -References -========== - -`Neutron Networking Guide`_ - -.. _`Neutron Networking Guide`: https://docs.openstack.org/newton/networking-guide/ diff --git a/doc/source/specs/newton/powervm-sriov-nova.rst b/doc/source/specs/newton/powervm-sriov-nova.rst deleted file mode 100644 index 8eeadd75..00000000 --- a/doc/source/specs/newton/powervm-sriov-nova.rst +++ /dev/null @@ -1,350 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================= -Nova support for SR-IOV VIF Types -================================= - -https://blueprints.launchpad.net/nova-powervm/+spec/powervm-sriov-nova - -This blueprint will address support of SR-IOV in conjunction with SR-IOV -VF attached to VM via PowerVM vNIC into nova-powervm. SR-IOV support -was added to Juno release of OpenStack, this blueprint will fit -this scenario implementation into it. - -A separate `blueprint for networking-powervm`_ has been made available for -design elements regarding networking-powervm. - -These blueprints will be implemented during Newton cycle of OpenStack -development. Referring to Newton schedule, development should be completed -during newton-3. - -Refer to glossary section for explanation of terms. - -.. _`blueprint for networking-powervm`: https://review.openstack.org/#/c/322210/ - -Problem Description -=================== -OpenStack PowerVM drivers currently support networking aspect of PowerVM -virtualization using Shared Ethernet Adapter, Open vSwitch and Linux Bridge. -There is a need for supporting SR-IOV ports with redundancy/failover and -migration. It is possible to associate SR-IOV VF to a VM directly, but this path -will not be supported by this design. Such a setup will not provide migration -support anyway. Support for this configuration will be added in future. This -path also does not utilize advantages of hardware level virtualization offered -by SR-IOV architecture. - -Users should be able to manage a VM with SR-IOV vNIC as a network interface. -This management should include migration of VM with SR-IOV vNIC attached to it. - -PowerVM has a feature called vNIC which can is tied in with SR-IOV. By using -vNIC the following use cases are supported: -- Fail over I/O to a different I/O Server and physical function -- Live Migration with SR-IOV, without significant intervention -The vNIC is exposed to the VM, and the mac address of the client vNIC will -match the neutron port. - -In summary, this blueprint will solve support of SR-IOV in nova-powervm for -these scenarios: - -1. Ability to attach/detach a SR-IOV VF to a VM as a network interface using - vNIC intermediary during and after deployment, including migration. -2. Ability to provide redundancy/failover support across VFs from Physical Ports - within or across SR-IOV cards using vNIC intermediary. -3. Ability to associate a VLAN with vNIC backed by SR-IOV VF. - -Ability to associate a SR-IOV VF directly to a VM will be done in future. - -Refer to separate `blueprint for networking-powervm`_ for changes in -networking-powervm component. This blueprint will focus on changes to -nova-powervm only. - -Use Cases ---------- -1. Attach vNIC backed by SR-IOV VF(s) to a VM during boot time -2. Attach vNIC backed by SR-IOV VF(s) to a VM after it is deployed -3. Detach vNIC backed by SR-IOV VF(s) from a VM -4. When a VM with vNIC backed by SR-IOV is deleted, perform detach and cleanup -5. Live migrate a VM if using vNIC backed SR-IOV support -6. Provide redundancy/failover support of vNIC backed by SR-IOV VF attached to - a VM during both deploy and post deploy scenarios. - -Proposed changes -================ -The changes will be made in two areas: - -1. **Compute virt driver.** -PowerVM compute driver is in nova.virt.powervm.driver.PowerVMDriver and it will -be enhanced for SR-IOV vNIC support. A dictionary is maintained in virt driver -vif code to map between vif type and vif driver class. Based on vif type of vif -object that needs to be plugged, appropriate vif driver will be invoked. This -dictionary will be modified to include a new vif driver class and its vif type -(pvm_sriov). - -The PCI Claims process expects to be able to "claim" a VF from the -``pci_passthrough_devices`` list each time a vNIC is plugged, and return it to -the pool on unplug. Thus the ``get_available_resource`` API will be enhanced to -populate this device list with a suitable number of VFs. - -2. **VIF driver.** -PowerVM VIF driver is in nova_powervm.virt.powervm.vif.PvmVifDriver. A VIF -driver to attach network interface via vNIC (PvmVnicSriovVifDriver) and plug/ -unplug methods will be implemented. Plug and unplug methods will use pypowervm -code to create VF/vNIC server/vNIC clients and attach/detach them. Neutron port -carries binding:vif_type and binding:vnic_type attributes. The vif type for this -implementation will be pvm_sriov. The vnic_type will be 'direct'. - -A VIF driver (PvmVFSriovVifDriver) for directly attached to VM will get -implemented in future. - -Deployment of VM with SR-IOV vNIC will involve picking Physical Port(s), -VIOS(es) and a VM and invoking pypowervm library. Similarly, attachment of the -same to an existing VM will be implemented. RMC will be required. Evacuate and -migration of VM will be supported with changes to compute virt driver and VIF -driver via pypowervm library. - -Physical Port information will be derived from port label attribute of physical -ports on SR-IOV adapters. Port label attribute of physical ports will have to be -updated with 'physical network' names during configuration of the environment. -During attachment of SR-IOV backed vNIC to a VM, physical network attribute of -neutron network will be matched with port labels of physical ports to gather a -list of physical ports. - -**Failover/redundancy:** VIF plug during deploy (or attach of network interface -to a VM) will pass more than one Physical port and VIOS(es) (as stated above in -deploy scenario) to pypowervm library to create vNIC on VIOS with redundancy. It -should be noted that failover is handled automatically by the platform when a -vNIC is backed by multiple VFs. The redundancy level will be controlled by an -``AGENT`` option ``vnic_required_vfs`` in the ML2 configuration file (see the -`blueprint for networking-powervm`_). It will have a default of 2. - -**Quality of Service:** Each VF backing a vNIC can be configured with a capacity -value, dictating the minimum percentage of the physical port's total bandwidth -that will be available to that VF. The ML2 configuration file allows a -``vnic_vf_capacity`` option in the ``AGENT`` section to set the capacity for all -vNIC-backing VFs. If omitted, the platform defaults to the capacity granularity -for each physical port. See the `blueprint for networking-powervm`_ for -details of the configuration option; and see section 1.3.3 of the `IBM Power -Systems SR-IOV Technical Overview and Introduction -`_ for details on VF -capacity. - -For future implementation of VF - VM direct attach of SR-IOV to a VM, the -request will include physical network name. PvmVFSriovVifDriver can lookup -devname(s) associated with it from port label, get physical port information -and create a SR-IOV logical port on the corresponding VM. -Or may include a configuration option to allow the user to dictate how many -ports to attach. Using NIB technique, users can setup redundancy. - -For VF - vNIC - VM attach of SR-IOV port to a VM, the corresponding neutron -network object will include physical network name, PvmVnicSriovVifDriver can -lookup devname(s) associated with it from port label, get physical port -information. Along with adapter ID and physical port ID, VIOS information will -be added and a VNIC dedicated port on the corresponding VM will be created. - -For migration scenario, physical network names should match on source and -destination compute nodes, and accordingly in the physical port labels. On the -destination, vNICs will be rebuilt based on the SR-IOV port configuration. The -platform decides how to reconstruct the vNIC on the destination in terms of -number and distribution of backing VFs, etc. - -Alternatives ------------- -None - -Security impact ---------------- -None - -Other end user impact ---------------------- -None - -Performance impact ------------------- -Since the number of VMs deployed on the host will depend on number of VFs -offered by SR-IOV cards in the environment, scale tests will be limited in -density of VMs. - -Deployer impact ---------------- -1. SR-IOV cards must be configured in ``Sriov`` mode. This can be done via the - ``pvmctl`` command, e.g.: - - ``pvmctl sriov update -i phys_loc=U78C7.001.RCH0004-P1-C1 -s mode=Sriov`` - -2. SR-IOV physical ports must be labeled with the name of the neutron physical - network to which they are cabled. This can be done via the ``pvmctl`` - command, e.g.: - - ``pvmctl sriov update --port-loc U78C7.001.RCH0004-P1-C1-T1 -s label=prod_net`` - -3. The ``pci_passthrough_whitelist`` option in the nova configuration file must - include entries for each neutron physical network to be enabled for vNIC. - Only the ``physical_network`` key is required. For example: - - ``pci_passthrough_whitelist = [{"physical_network": "default"}, {"physical_network": "prod_net"}]`` - -Configuration is also required on the networking side - see the `blueprint for -networking-powervm`_ for details. - -**To deploy a vNIC to a VM,** the neutron port(s) must be pre-created with vnic -type ``direct``, e.g.: - - ``neutron port-create --vnic-type direct`` - -Developer impact ----------------- -None - -Dependencies ------------- - -#. SR-IOV cards and SR-IOV-capable hardware -#. Updated levels of system firmware and the Virtual I/O Server operating system -#. An updated version of Novalink PowerVM feature -#. pypowervm library - https://github.com/powervm/pypowervm - -Implementation -============== - -Assignee(s) ------------ -- Eric Fried (efried) -- Sridhar Venkat (svenkat) -- Eric Larese (erlarese) -- Esha Seth (eshaseth) -- Drew Thorstensen (thorst) - -Work Items ----------- -nova-powervm changes: - -- Updates to PowerVM compute driver to support attachment of SR-IOV VF via vNIC. -- VIF driver for SR-IOV VF connected to VM via vNIC. -- Migration of VM with SR-IOV VF connected to VM via vNIC. This involves live - migration, cold migration and evacuation. -- Failover/redundancy support for SR-IOV VF(s) connected to VM via vNIC(s). - -VIF driver for SR-IOV VF connected to VM directly will be a future work item. - -Testing -======= -1. Unit test -All developed code will accompany structured unit test around them. These -tests validate granular function logic. - -2. Function test -Function test will be performed along with CI infrastructure. Changes -implemented for this blueprint will be tested via CI framework that exists -and used by IBM team. CI framework needs to be enhanced with SR-IOV hardware. -The tests can be executed in batch mode, probably as nightly jobs. - -Documentation impact -==================== -All use-cases need to be documented in developer docs that accompany -nova-powervm. - -References -========== -1. This blog describes how to work with SR-IOV and vNIC (without redundancy/ - failover) using HMC interface: http://chmod666.org/index.php/a-first-look-at-sriov-vnic-adapters/ - -2. These describe vNIC and its usage with SR-IOV. - - - https://www.ibm.com/developerworks/community/wikis/home?lang=en_us#!/wiki/Power%20Systems/page/vNIC%20-%20Introducing%20a%20New%20PowerVM%20Virtual%20Networking%20Technology - - https://www.ibm.com/developerworks/community/wikis/home?lang=en_us#!/wiki/Power%20Systems/page/Introduction%20to%20SR-IOV%20FAQs - - https://www.ibm.com/developerworks/community/wikis/home?lang=en_us#!/wiki/Power%20Systems/page/Introduction%20to%20vNIC%20FAQs - - https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/Power%20Systems/page/vNIC%20Frequently%20Asked%20Questions - -3. These describe SR-IOV in OpenStack. - - - https://wiki.openstack.org/wiki/Nova-neutron-sriov - - http://docs.openstack.org/mitaka/networking-guide/adv-config-sriov.html - -4. This blueprint addresses SR-IOV attach/detach function in nova: https://review.openstack.org/#/c/139910/ - -5. networking-powervm blueprint for same work: https://review.openstack.org/#/c/322210/ - -6. This is a detailed description of SR-IOV implementation in PowerVM: https://www.redbooks.ibm.com/redpapers/pdfs/redp5065.pdf - -7. This provides a overall view of SR-IOV support in nova: https://blueprints.launchpad.net/nova/+spec/pci-passthrough-sriov - -8. Attach/detach of SR-IOV ports to VM with respect to libvirt. Provided here - for comparison purposes: https://review.openstack.org/#/c/139910/ - -9. SR-IOV PCI passthrough reference: https://wiki.openstack.org/wiki/SR-IOV-Passthrough-For-Networking - -10. pypowervm: https://github.com/powervm/pypowervm - -Glossary -======== -:SR-IOV: Single Root I/O Virtualization, used for virtual environments where VMs - need direct access to network interface without any hypervisor overheads. - -:Physical Port: Represents Physical port in SR-IOV adapter. This is not same - as Physical Function. A Physical Port can have many physical functions - associated with it. To clarify further, if a Physical Port supports RCoE, then - it will have two Physical Functions. In other words, one Physical Function per - protocol that port supports. - -:Virtual Function (VF): Represents Virtual port belonging to a Physical Port - (PF). Either directly or indirectly (using vNIC) a Virtual Function (VF) is - connected to a VM. This is otherwise called SR-IOV logical port. - -:Dedicated SR-IOV: This is equivalent to any regular ethernet card and it - can be used with SEA. A logical port of a physical port can be assigned as a - backing device for SEA. - -:Shared SR-IOV: A VF to VM is not supported in Newton release. But an SR-IOV - card in sriov mode is what we will be used for vNIC as described in this - blueprint. Also, a SR-IOV in Sriov mode can have a promiscous VF assigned to - the VIOS and configured for SEA(said configuration to be done outside of the - auspices of OpenStack), which can then be used just like any other SEA - configuration, and is supported (as described in next item below). - -:Shared Ethernet Adapter: Alternate technique to provide network interface to a - VM. - - This involves attachment to a physical interface on PowerVM host and one or - many virtual interfaces that are connected to VMs. A VF of PF in SR-IOV based - environment can be a physical interface to Shared Ethernet Adapter. Existing - support for this configuration in nova-powervm and networking-powervm will - continue. - -:vNIC: A vNIC is an intermediary between VF of PF and VM. This resides on VIOS - and connects to a VF one one end and vNIC client adapter inside a VM. This is - mainly to support migration of VMs across hosts. - -:vNIC failover/redundancy: Multiple vNIC servers (connected to as many VFs that - belong to as many PFs either on same SR-IOV card or across) connected to same - VM as one network interface. Failure of one vNIC/VF/PF path will result in - activation of other such path. - -:VIOS: A partition in PowerVM systems dedicated for i/o operations. In the - context of this blueprint, vNIC server will be created on VIOS. For redundancy - management purposes, a specific PowerVM system may employ more than one VIOS - partitions. - -:VM migration types: - - - **Live Migration:** migration of VM while both host and VM are alive. - - **Cold Migration:** migration of VM while host is alive and VM is down. - - **Evacuation:** migration of VM while hots is down (VM is down as well). - - **Rebuild:** recreation of a VM. - -:pypowervm: A python library that runs on the PowerVM management VM and allows - virtualization control of the system. This is similar to the python library - for libvirt. - -History -======= - -============ =========== -Release Name Description -============ =========== -Newton Introduced -============ =========== diff --git a/doc/source/specs/ocata/image_cache.rst b/doc/source/specs/ocata/image_cache.rst deleted file mode 100644 index a8483812..00000000 --- a/doc/source/specs/ocata/image_cache.rst +++ /dev/null @@ -1,179 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================== -Image Cache Support for localdisk driver -======================================== - -https://blueprints.launchpad.net/nova-powervm/+spec/image-cache-powervm - -The image cache allows for a nova driver to pull an image from glance once, -then use a local copy of that image for future VM creation. This saves -bandwidth between the compute host and glance. It also improves VM -deployment speed and reduces the stress on the overall infrastructure. - - -Problem description -=================== - -Deploy times on PowerVM can be high when using the localdisk driver. This is -partially due to not having linked clones. The image cache offers a way to -reduce those deploy times by transferring the image to the host once, and then -subsequent deploys will reuse that image rather than streaming from glance. - -There are complexities with this of course. The cached images take up disk space, -but the overall image cache from core Nova takes that into account. The value -of using the nova image cache design is that it has hooks in the code to help solve -these problems. - - -Use Cases ---------- - -* As an end user, subsequent deploys of the same image should go faster - - -Proposed change -=============== - -Create a subclass of nova.virt.imagecache.ImageManager in the nova-powervm -project. It should implement the necessary methods of the cache: - -* _scan_base_images -* _age_and_verify_cached_images -* _get_base -* update - -The nova-powervm driver will need to be updated to utilize the cache. This -includes: - -* Implementing the manage_image_cache method -* Adding the has_imagecache capability - -The localdisk driver within nova-powervm will be updated to have the -following logic. It will check the volume group backing the instance. If the -volume group has a disk with the name 'i_', it will -simply copy that disk into a new disk named after the UUID of the instance. -Otherwise, it will create a disk with the name 'i_' -that contains the image. - -The image cache manager's purpose is simply to clean out old images that are -not needed by any instances anymore. - -Further extension, not part of this blueprint, can be done to manage overall -disk space in the volume group to make sure that the image cache is not -overwhelming the backing disks. - -Alternatives ------------- - -* Leave as is, all deploys potentially slow -* Implement support for linked clones. This is an eventual goal, but - the image cache is still needed in this case as it will also manage the - root disk image. - - -Security impact ---------------- - -None - - -End user impact ---------------- - -None - - -Performance Impact ------------------- - -Performance of subsequent deploys of the same image should be faster. -The deploys will have improved image copy times and reduced network -bandwidth requirements. - -Performance of single deploys using different images will be slower. - - -Deployer impact ---------------- - -This change will take effect without any deployer impact immediately after -merging. The deployer will not need to take any specific upgrade actions to -make use of it; however the deployer may need to tune the image cache to make -sure it is not using too much disk space. - -A conf option may be added to force the image cache off if deemed necessary. -This will be based off of operator feedback in the event that we need a way -to reduce disk usage. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - tjakobs - -Other contributors: - None - -Work Items ----------- - -* Implement the image cache code for the PowerVM driver - -* Include support for the image cache in the PowerVM driver. Tolerate it - for other disk drivers, such as SSP. - - -Dependencies -============ - -None - - -Testing -======= - -* Unit tests for all code - -* Deployment tests in local environments to verify speed increases - - -Documentation Impact -==================== - -The deployer docs will be updated to reflect this. - - -References -========== - -None - - -History -======= - -Optional section intended to be used each time the spec is updated to describe -new design. - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Newton - - Introduced diff --git a/doc/source/specs/ocata/index.rst b/doc/source/specs/ocata/index.rst deleted file mode 100644 index 1ca52ca2..00000000 --- a/doc/source/specs/ocata/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Ocata Specifications -==================== - -.. toctree:: - :glob: - - * diff --git a/doc/source/specs/pike/fileio_cinder.rst b/doc/source/specs/pike/fileio_cinder.rst deleted file mode 100644 index 5658e453..00000000 --- a/doc/source/specs/pike/fileio_cinder.rst +++ /dev/null @@ -1,142 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================= -File I/O Cinder Connector -========================= - -https://blueprints.launchpad.net/nova-powervm/+spec/file-io-cinder-connector - -There are several Cinder drivers that support having the file system mounted -locally and then connecting in to the VM as a volume (ex. GPFS, NFS, etc...). -There is the ability to support this type of volume in PowerVM, if the user -has mounted the file system to the NovaLink. This blueprint adds support to -the PowerVM driver to support such Cinder volumes. - - -Problem description -=================== - -The PowerVM driver supports Fibre Channel and iSCSI based volumes. It does not -currently support volumes that are presented on a file system as files. - -The recent release of PowerVM NovaLink has added support for this in the REST -API. This blueprint looks to take advantage of that support. - - -Use Cases ---------- - -* As a user, I want to attach a volume that is backed by a file based Cinder - volume (ex. NFS or GPFS). - -* As a user, I want to detach a volume that is backed by a file based Cinder - volume (ex. NFS or GPFS). - - -Proposed change -=============== - -Add nova_powervm/virt/powervm/volume/fileio.py. This would extend the existing -volume drivers. It would store the LUN ID on the scsi bus. - -This does not support traditional VIOS. Like the iSCSI change, it would -require running through the NovaLink partition. - - -Alternatives ------------- - -None - - -Security impact ---------------- - -None. - -One may consider the permission of the file presented by Cinder. The Cinder -driver's BDM will provide a path to a file. The hypervisor will map that file -as the root user. So file permissions of the volume should not be a concern. -This seems consistent with the other hypervisors utilizing these types of -Cinder drivers. - -End user impact ---------------- - -None - - -Performance Impact ------------------- - -None - - -Deployer impact ---------------- - -Deployer must set up the backing Cinder driver and connect the file systems to -the NovaLink partition in their environment. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - thorst - -Other contributors: - shyama - -Work Items ----------- - -* Create a nova-powervm fileio cinder volume connector. Create associated UT. - -* Validate with the GPFS cinder backend. - - -Dependencies -============ - -* pypowervm 1.0.0.4 or higher - - -Testing -======= - -Unit Testing is obvious. - -Manual testing will be driven via connecting to a GPFS back-end. - -CI environments will be evaluated to determine if there is a way to add this -to the current CI infrastructure. - - -Documentation Impact -==================== - -None. Will update the nova-powervm dev-ref to reflect that 'file I/O drivers' -are supported, but the support matrix doesn't go into the detail of what cinder -drivers work with nova drivers. - - -References -========== - -* pypowervm add storage element to scsi mapping: https://github.com/powervm/pypowervm/blob/release/1.0.0.4/pypowervm/tasks/scsi_mapper.py#L49 - -* pypowervm file storage element: https://github.com/powervm/pypowervm/blob/release/1.0.0.4/pypowervm/wrappers/storage.py#L689 diff --git a/doc/source/specs/pike/fileio_driver.rst b/doc/source/specs/pike/fileio_driver.rst deleted file mode 100644 index c7e283f4..00000000 --- a/doc/source/specs/pike/fileio_driver.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=============== -File I/O Driver -=============== - -https://blueprints.launchpad.net/nova-powervm/+spec/file-io-driver - -The PowerVM driver currently uses logical volumes for localdisk ephemeral -storage. This blueprint will add support for using file-backed disks as a -localdisk ephemeral storage option. - - -Problem description -=================== - -The PowerVM driver only supports logical volumes for localdisk ephemeral -storage. It does not currently support storage that is presented as a file. - - -Use Cases ---------- - -* As a user, I want to have the instance ephemeral storage backed by a file. - - -Proposed change -=============== - -Add nova_powervm/virt/powervm/disk/fileio.py. This would extend the existing -disk driver. Use the DISK_DRIVER powervm conf option to select file I/O. -Will utilize the nova.conf option instances_path. - - -Alternatives ------------- - -None - - -Security impact ---------------- - -None - - -End user impact ---------------- - -None - - -Performance Impact ------------------- - -Performance may change as the backing storage methods of VMs will be different. - - -Deployer impact ---------------- - -The deployer must set the DISK_DRIVER conf option to fileio and ensure that -the instances_path conf option is set in order to utilize the changes described -in the blueprint. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - tjakobs - -Other contributors: - None - -Work Items ----------- - -* Create a nova-powervm fileio driver. Create associated UT. - - -Dependencies -============ - -Novalink 1.0.0.5 - - -Testing -======= - -* Unit tests for all code - -* Manual test will be driven using a File I/O ephemeral disk. - - -Documentation Impact -==================== - - -Will update the nova-powervm dev-ref to include File I/O as an additional -ephemeral disk option. - - -References -========== - -None diff --git a/doc/source/specs/pike/index.rst b/doc/source/specs/pike/index.rst deleted file mode 100644 index a83bfef9..00000000 --- a/doc/source/specs/pike/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Pike Specifications -=================== - -.. toctree:: - :glob: - - * diff --git a/doc/source/specs/pike/srr-capability-dynamic-toggle.rst b/doc/source/specs/pike/srr-capability-dynamic-toggle.rst deleted file mode 100644 index 2deea57a..00000000 --- a/doc/source/specs/pike/srr-capability-dynamic-toggle.rst +++ /dev/null @@ -1,145 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================== -Allow dynamic enable/disable of SRR capability -============================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/nova-powervm/+spec/srr-capability-dynamic-toggle - -Currently to enable or disable the SRR capability on the VM we need to have -the VM in shutoff state. We should be able to toggle this field dynamically -so that the shutdown of a VM is not needed. - -Problem description -=================== - -The simplified remote restart (SRR) capability governs whether a VM can be -rebuilt (remote restarted) on a different host when the host on which the -VM resides is down. Currently this attribute can be changed only when the VM -is in shut-off state. This blueprint addresses that by enabling toggle -of simplified remote restart capability dynamically (while the VM is still -active). - - -Use Cases ---------- - -The end user would like to : -- Enable the srr capability on the VM without shutting it down so that any -workloads on the VM are unaffected. -- Disable the srr capability for a VM which need not be rebuilt to another -host while the VM is still up and running. - - -Proposed change -=============== -The SRR capability is a VM level attribute and can be changed using -the resize operation. In case of a resize operation for an active VM -- Check if the hypervisor supports dynamic toggle of srr capability. -- If it is supported proceed with updating of srr capability if it has been -changed. -- Throw a warning if update of srr capability is not supported. - - -Alternatives ------------- - -None - - -Security impact ---------------- - -None - - -End user impact ---------------- - -None - - -Performance Impact ------------------- - -The change is srr capability is not likely to happen very frequently so this -should not have a major impact. When the change happens the impact on the -performance of any other component (the VM, the compute service, the REST -service, etc.) should be negligible. - - -Deployer impact ---------------- - -End user will be able to dynamically the toggle the srr capability for the -VM. The changes can be utilized immediately once they are deployed. - - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - - -Primary assignee: - manasmandlekar - -Other contributors: - shyvenug - -Work Items ----------- -NA - -Dependencies -============ - -Need to work with PowerVM platform team to ensure that the srr toggle -capability is exposed for the Compute driver to consume. - - -Testing -======= - -The testing of the change requires full Openstack environment with -Compute resources configured. -- Ensure srr state for VM can be toggled when it is up and running. -- Ensure srr state for VM can be toggled when it is shut-off. -- Perform rebuild operations to ensure that the capability is indeed -getting utilized. - - -Documentation Impact -==================== - -None - - -References -========== - -None - - -History -======= - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Pike - - Introduced diff --git a/doc/source/specs/rocky/device-passthrough.rst b/doc/source/specs/rocky/device-passthrough.rst deleted file mode 100644 index 24c4dc6f..00000000 --- a/doc/source/specs/rocky/device-passthrough.rst +++ /dev/null @@ -1,414 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================== -Device Passthrough -================== - -https://blueprints.launchpad.net/nova-powervm/+spec/device-passthrough - -Provide a generic way to identify hardware devices such as GPUs and attach them -to VMs. - -Problem description -=================== - -Deployers want to be able to attach accelerators and other adapters to their -VMs. Today in Nova this is possible only in very restricted circumstances. The -goal of this blueprint is to enable generic passthrough of devices for -consumers of the nova-powervm driver. - -While these efforts may enable more, and should be extensible going forward, -the primary goal for the current release is to pass through entire physical -GPUs. That is, we are not attempting to pass through: - -* Physical functions, virtual functions, regions, etc. I.e. granularity smaller - than "whole adapter". This requires device type-specific support at the - platform level to perform operations such as discovery/inventorying, - configuration, and attach/detach. -* Devices with "a wire out the back" - i.e. those which are physically - connected to anything (networks, storage, etc.) external to the host. These - will require the operator to understand and be able to specify/select - specific connection parameters for proper placement. - -Use Cases ---------- -As an admin, I wish to be able to configure my host and flavors to allow -passthrough of whole physical GPUs to VMs. - -As a user, I wish to make use of appropriate flavors to create VMs with GPUs -attached. - -Proposed change -=============== - -Device Identification and Whitelisting --------------------------------------- -The administrator can identify and allow (explicitly) or deny (by omission) -passthrough of devices by way of a YAML file per compute host. - -.. note:: **Future:** We may someday figure out a way to support a config file - on the controller. This would allow e.g. cloud-wide whitelisting and - specification for particular device types by vendor/product ID, which - could then be overridden (or not) by the files on the compute nodes. - -The path to the config will be hardcoded as ``/etc/nova/inventory.yaml``. - -The file shall contain paragraphs, each of which will: - -* Identify zero or more devices based on information available on the - ``IOSlot`` NovaLink REST object. In pypowervm, given a ManagedSystem wrapper - ``sys_w``, a list of ``IOSlot`` wrappers is available via - ``sys_w.asio_config.io_slots``. See `identification`_. Any device not - identified by any paragraph in the file is denied for passthrough. But see - the `allow`_ section for future plans around supporting explicit denials. -* Name the resource class to associate with the resource provider inventory unit - by which the device will be exposed in the driver. If not specified, - ``CUSTOM_IOSLOT`` is used. See `resource_class`_. -* List traits to include on the resource provider in addition to those generated - automatically. See `traits`_. - -A `formal schema`_ is proposed for review. - -.. _formal schema: https://review.openstack.org/#/c/579289/3/nova_powervm/virt/powervm/passthrough_schema.yaml - -Here is a summary description of each section. - -Name -~~~~ -Each paragraph will be introduced by a key which is a human-readable name for -the paragraph. The name has no programmatic significance other than to separate -paragraphs. Each paragraph's name must be unique within the file. - -identification -~~~~~~~~~~~~~~ -Each paragraph will have an ``identification`` section, which is an object -containing one or more keys corresponding to ``IOSlot`` properties, as follows: - - ================ ==================== ===================================== - YAML key IOSlot property Description - ================ ==================== ===================================== - vendor_id pci_vendor_id \X{4} (four uppercase hex digits) - device_id pci_dev_id \X{4} " - subsys_vendor_id pci_subsys_vendor_id \X{4} " - subsys_device_id pci_subsys_dev_id \X{4} " - class pci_class \X{4} " - revision_id pci_rev_id \X{2} (two uppercase hex digits) - drc_index drc_index \X{8} (eight uppercase hex digits) - drc_name drc_name String (physical location code) - ================ ==================== ===================================== - -The values are expected to match those produced by ``pvmctl ioslot list -d -`` for a given property. - -The ``identification`` section is required, and must contain at least one of -the above keys. - -When multiple keys are provided in a paragraph, they are matched with ``AND`` -logic. - -.. note:: It is a stretch goal of this blueprint to allow wildcards in (some - of) the values. E.g. ``drc_name: U78CB.001.WZS0JZB-P1-*`` would - allow everything on the ``P1`` planar of the ``U78CB.001.WZS0JZB`` - enclosure. If we get that far, a spec amendment will be proposed with - the specifics (what syntax, which fields, etc.). - -allow -~~~~~ -.. note:: The ``allow`` section will not be supported initially, but is - documented here because we thought through what it should look like. - In the initial implementation, any device encompassed by a paragraph - is allowed for passthrough. - -Each paragraph will support a boolean ``allow`` keyword. - -If omitted, the default is ``true`` - i.e. devices identified by this -paragraph's ``identification`` section are permitted for passthrough. (Note, -however, that devices not encompassed by the union of all the -``identification`` paragraphs in the file are denied for passthrough.) - -If ``allow`` is ``false``, the only other section allowed is -``identification``, since the rest don't make sense. - -A given device can only be represented once across all ``allow=true`` -paragraphs (implicit or explicit); an "allowed" device found more than once -will result in an error. - -A given device can be represented zero or more times across all ``allow=false`` -paragraphs. - -We will first apply the ``allow=true`` paragraphs to construct a preliminary -list of devices; and then apply each ``allow=false`` paragraph and remove -explicitly denied devices from that list. - -.. note:: Again, we're not going to support the ``allow`` section at all - initially. It will be a stretch goal to add it as part of this - release, or it may be added in a subsequent release. - -resource_class -~~~~~~~~~~~~~~ -If ``allow`` is omitted or ``true``, an optional ``resource_class`` key is -supported. Its string value allows the author to designate the resource class -to be used for the inventory unit representing the device on the resource -provider. If omitted, ``CUSTOM_IOSLOT`` will be used as the default. - -.. note:: **Future:** We may be able to get smarter about dynamically - defaulting the resource class based on inspecting the device - metadata. For now, we have to rely on the author of the config file - to tell us what kind of device we're looking at. - -traits -~~~~~~ -If ``allow`` is omitted or ``true``, an optional ``traits`` subsection is -supported. Its value is an array of strings, each of which is the name of a -trait to be added to the resource providers of each device represented by this -paragraph. If the ``traits`` section is included, it must have at least one -value in the list. (If no additional traits are desired, omit the section.) - -The values must be valid trait names (either standard from ``os-traits`` or -custom, matching ``CUSTOM_[A-Z0-9_]*``). These will be in addition to the -traits automatically added by the driver - see `Generated Traits`_ below. -Traits which conflict with automatically-generated traits will result in an -error: the driver must be the single source of truth for the traits it -generates. - -Traits may be used to indicate any static attribute of a device - for example, -a capability (``CUSTOM_CAPABILITY_WHIZBANG``) not otherwise indicated by -`Generated Traits`_. - -Resource Providers ------------------- -The driver shall create nested resource providers, one per device (slot), as -children of the compute node provider generated by Nova. - -.. TODO: Figure out how NVLink devices appear and how to handle them - ideally - by hiding them and automatically attaching them with their corresponding - device. - -The provider name shall be generated as ``PowerVM IOSlot %(drc_index)08X`` e.g. -``PowerVM IOSlot 1C0FFEE1``. We shall let the placement service generate the -UUID. This naming scheme allows us to identify the full set of providers we -"own". This includes identifying providers we may have created on a previous -iteration (potentially in a different process) which now need to be purged -(e.g. because the slot no longer exists on the system). It also helps us -provide a clear migration path in the future, if, for example, Cyborg takes -over generating these providers. It also paves the way for providers -corresponding to things smaller than a slot; e.g. PFs might be namespaced -``PowerVM PF %(drc_index)08X``. - -Inventory -~~~~~~~~~ -Each device RP shall have an inventory of:: - - total: 1 - reserved: 0 - min_unit: 1 - max_unit: 1 - step_size: 1 - allocation_ratio: 1.0 - -of the `resource_class`_ specified in the config file for the paragraph -matching this device (``CUSTOM_IOSLOT`` by default). - -.. note:: **Future:** Some day we will provide SR-IOV VFs, vGPUs, FPGA - regions/functions, etc. At that point we will conceivably have - inventory of multiple units of multiple resource classes, etc. - -Generated Traits -~~~~~~~~~~~~~~~~ -The provider for a device shall be decorated with the following -automatically-generated traits: - -* ``CUSTOM_POWERVM_IOSLOT_VENDOR_ID_%(vendor_id)04X`` -* ``CUSTOM_POWERVM_IOSLOT_DEVICE_ID_%(device_id)04X`` -* ``CUSTOM_POWERVM_IOSLOT_SUBSYS_VENDOR_ID_%(subsys_vendor_id)04X`` -* ``CUSTOM_POWERVM_IOSLOT_SUBSYS_DEVICE_ID_%(subsys_device_id)04X`` -* ``CUSTOM_POWERVM_IOSLOT_CLASS_%(class)04X`` -* ``CUSTOM_POWERVM_IOSLOT_REVISION_ID_%(revision_id)02X`` -* ``CUSTOM_POWERVM_IOSLOT_DRC_INDEX_%(drc_index)08X`` -* ``CUSTOM_POWERVM_IOSLOT_DRC_NAME_%(drc_name)s`` where ``drc_name`` is - normalized via ``os_traits.normalize_name``. - -In addition, the driver shall decorate the provider with any `traits`_ -specified in the config file paragraph identifying this device. If that -paragraph specifies any of the above generated traits, an exception shall be -raised (we'll blow up the compute service). - -update_provider_tree -~~~~~~~~~~~~~~~~~~~~ -The above provider tree structure/data shall be provided to Nova by overriding -the ``ComputeDriver.update_provider_tree`` method. The algorithm shall be as -follows: - -* Parse the config file. -* Discover devices (``GET /ManagedSystem``, pull out - ``.asio_config.io_slots``). -* Merge the config data with the discovered devices to produce a list of - devices to pass through, along with inventory of the appropriate resource - class name, and traits (generated and specified). -* Ensure the tree contains entries according to this calculated passthrough - list, with appropriate inventory and traits. -* Set-subtract the names of the providers in the calculated passthrough list - from those in the provider tree whose names are prefixed with ``PowerVM - IOSlot`` and delete the resulting "orphans". - -This is in addition to the standard ``update_provider_tree`` contract of -ensuring appropriate ``VCPU``, ``MEMORY_MB``, and ``DISK_GB`` resources on the -compute node provider. - -.. note:: It is a stretch goal of this blueprint to implement caching and/or - other enhancements to the above algorithm to optimize performance by - minimizing the need to call PowerVM REST and/or process whitelist - files every time. - -Flavor Support --------------- -Existing Nova support for generic resource specification via flavor extra specs -should "just work". For example, a flavor requesting two GPUs might look like:: - - resources:VCPU=1 - resources:MEMORY_MB=2048 - resources:DISK_GB=100 - resources1:CUSTOM_GPU=1 - traits1:CUSTOM_POWERVM_IOSLOT_VENDOR_ID_G00D=required - traits1:CUSTOM_POWERVM_IOSLOT_PRODUCT_ID_F00D=required - resources2:CUSTOM_GPU=1 - traits2:CUSTOM_POWERVM_IOSLOT_DRC_INDEX_1C0FFEE1=required - -PowerVMDriver -------------- - -spawn -~~~~~ -During ``spawn``, we will query placement to retrieve the resource provider -records listed in the ``allocations`` parameter. Any provider names which are -prefixed with ``PowerVM IOSlot`` will be parsed to extract the DRC index (the -last eight characters of the provider name). The corresponding slots will be -extracted from the ``ManagedSystem`` payload and added to the -``LogicalPartition`` payload for the instance as it is being created. - -destroy -~~~~~~~ -IOSlots are detached automatically when we ``DELETE`` the ``LogicalPartition``, -so no changes should be required here. - -Live Migration -~~~~~~~~~~~~~~ -Since we can't migrate the state of an active GPU, we will block live migration -of a VM with an attached IOSlot. - -.. _`Cold Migration`: - -Cold Migration, Rebuild, Remote Restart -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -We should get these for free, but need to make sure they're tested. - -Hot plug/unplug -~~~~~~~~~~~~~~~ -This is not in the scope of the current effort. For now, attaching/detaching -devices to/from existing VMs can only be accomplished via resize (`Cold -Migration`_). - -Alternatives ------------- -Use Nova's PCI passthrough subsystem. We've all agreed this sucks and is not -the way forward. - -Use oslo.config instead of a YAML file. Experience with the -``[pci]passthrough_whitelist`` has led us to conclude that config format is too -restrictive/awkward. The direction for Nova (as discussed in the Queens PTG in -Denver) will be toward some kind of YAML format; we're going to be the pioneers -on this front. - -Security impact ---------------- -It is the operator's responsibility to ensure that the passthrough YAML config -file has appropriate permissions, and lists only devices which do not -themselves pose a security risk if attached to a malicious VM. - -End user impact ---------------- -Users get acceleration for their workloads \o/ - -Performance Impact ------------------- - -Discovery -~~~~~~~~~ -For the `update_provider_tree`_ flow, we're adding the step of loading and -parsing the passthrough YAML config file. This should be negligible compared to -e.g. retrieving the ``ManagedSystem`` object (which we're already doing, so no -impact there). - -spawn/destroy -~~~~~~~~~~~~~ -There's no impact from the community side. It may take longer to create or -destroy a LogicalPartition with attached IOSlots. - -Deployer impact ---------------- -None. - -Developer impact ----------------- -None. - -Upgrade impact --------------- -None. - -Implementation -============== - -Assignee(s) ------------ -Primary assignee: - efried - -Other contributors: - edmondsw, mdrabe - -Work Items ----------- -See `Proposed change`_. - - -Dependencies -============ -os-traits 0.9.0 to pick up the ``normalize_name`` method. - -Testing -======= -Testing this in the CI will be challenging, given that we are not likely to -score GPUs for all of our nodes. - -We will likely need to rely on manual testing and PowerVC to cover the code -paths described under `PowerVMDriver`_ with a handful of various device -configurations. - - -Documentation Impact -==================== -* Add a section to our support matrix for generic device passthrough. -* User documentation for: - * How to build the passthrough YAML file. - * How to construct flavors accordingly. - -References -========== -None. - - -History -======= - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Rocky - - Introduced diff --git a/doc/source/specs/rocky/index.rst b/doc/source/specs/rocky/index.rst deleted file mode 100644 index ce48b5d9..00000000 --- a/doc/source/specs/rocky/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Rocky Specifications -==================== - -.. toctree:: - :glob: - - * diff --git a/doc/source/specs/template.rst b/doc/source/specs/template.rst deleted file mode 100644 index ee36e65b..00000000 --- a/doc/source/specs/template.rst +++ /dev/null @@ -1,316 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Example Spec - The title of your blueprint -========================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/nova-powervm/+spec/example - -Introduction paragraph -- why are we doing anything? A single paragraph of -prose that operators can understand. The title and this first paragraph -should be used as the subject line and body of the commit message -respectively. - -Some notes about the nova-powervm spec and blueprint process: - -* Not all blueprints need a spec. For more information see - https://docs.openstack.org/nova/latest/contributor/blueprints.html#specs - -* The aim of this document is first to define the problem we need to solve, - and second agree the overall approach to solve that problem. - -* This is not intended to be extensive documentation for a new feature. - For example, there is no need to specify the exact configuration changes, - nor the exact details of any DB model changes. But you should still define - that such changes are required, and be clear on how that will affect - upgrades. - -* You should aim to get your spec approved before writing your code. - While you are free to write prototypes and code before getting your spec - approved, its possible that the outcome of the spec review process leads - you towards a fundamentally different solution than you first envisaged. - -* But, API changes are held to a much higher level of scrutiny. - As soon as an API change merges, we must assume it could be in production - somewhere, and as such, we then need to support that API change forever. - To avoid getting that wrong, we do want lots of details about API changes - upfront. - -Some notes about using this template: - -* Your spec should be in ReSTructured text, like this template. - -* Please wrap text at 79 columns. - -* The filename in the git repository should match the launchpad URL, for - example: https://blueprints.launchpad.net/nova-powervm/+spec/awesome-thing - should be named awesome-thing.rst - -* Please do not delete any of the sections in this template. If you have - nothing to say for a whole section, just write: None - -* For help with syntax, see http://sphinx-doc.org/rest.html - -* To test out your formatting, build the docs using tox and see the generated - HTML file in doc/build/html/specs/ - -* If you would like to provide a diagram with your spec, ascii diagrams are - required. http://asciiflow.com/ is a very nice tool to assist with making - ascii diagrams. The reason for this is that the tool used to review specs is - based purely on plain text. Plain text will allow review to proceed without - having to look at additional files which can not be viewed in gerrit. It - will also allow inline feedback on the diagram itself. - -* If your specification proposes any changes to the Nova REST API such - as changing parameters which can be returned or accepted, or even - the semantics of what happens when a client calls into the API, then - you should add the APIImpact flag to the commit message. Specifications with - the APIImpact flag can be found with the following query: - - https://review.openstack.org/#/q/status:open+project:openstack/nova-powervm+message:apiimpact,n,z - -Problem description -=================== - -A detailed description of the problem. What problem is this blueprint -addressing? - -Use Cases ---------- - -What use cases does this address? What impact on actors does this change have? -Ensure you are clear about the actors in each use case: Developer, End User, -Deployer etc. - -Proposed change -=============== - -Here is where you cover the change you propose to make in detail. How do you -propose to solve this problem? - -If this is one part of a larger effort make it clear where this piece ends. In -other words, what's the scope of this effort? - -At this point, if you would like to just get feedback on if the problem and -proposed change fit in nova-powervm, you can stop here and post this for review -to get preliminary feedback. If so please say: -Posting to get preliminary feedback on the scope of this spec. - -Alternatives ------------- - -What other ways could we do this thing? Why aren't we using those? This doesn't -have to be a full literature review, but it should demonstrate that thought has -been put into why the proposed solution is an appropriate one. - -Security impact ---------------- - -Describe any potential security impact on the system. Some of the items to -consider include: - -* Does this change touch sensitive data such as tokens, keys, or user data? - -* Does this change alter the API in a way that may impact security, such as - a new way to access sensitive information or a new way to login? - -* Does this change involve cryptography or hashing? - -* Does this change require the use of sudo or any elevated privileges? - -* Does this change involve using or parsing user-provided data? This could - be directly at the API level or indirectly such as changes to a cache layer. - -* Can this change enable a resource exhaustion attack, such as allowing a - single API interaction to consume significant server resources? Some examples - of this include launching subprocesses for each connection, or entity - expansion attacks in XML. - -For more detailed guidance, please see the OpenStack Security Guidelines as -a reference (https://wiki.openstack.org/wiki/Security/Guidelines). These -guidelines are a work in progress and are designed to help you identify -security best practices. For further information, feel free to reach out -to the OpenStack Security Group at openstack-security@lists.openstack.org. - - -End user impact ---------------- - -How would the end user be impacted by this change? The "End User" is defined -as the users of the deployed cloud. - - -Performance Impact ------------------- - -Describe any potential performance impact on the system, for example -how often will new code be called, and is there a major change to the calling -pattern of existing code. - -Examples of things to consider here include: - -* A small change in a utility function or a commonly used decorator can have a - large impacts on performance. - -* Calls which result in a database queries (whether direct or via conductor) - can have a profound impact on performance when called in critical sections of - the code. - -* Will the change include any locking, and if so what considerations are there - on holding the lock? - - -Deployer impact ---------------- - -Discuss things that will affect how you deploy and configure OpenStack -that have not already been mentioned, such as: - -* What config options are being added? Are the default values ones which will - work well in real deployments? - -* Is this a change that takes immediate effect after its merged, or is it - something that has to be explicitly enabled? - -* If this change is a new binary, how would it be deployed? - -* Please state anything that those doing continuous deployment, or those - upgrading from the previous release, need to be aware of. Also describe - any plans to deprecate configuration values or features. - - -Developer impact ----------------- - -Discuss things that will affect other developers working on the driver or -OpenStack in general. - -Upgrade impact --------------- - -Describe any potential upgrade impact on the system, such as: - -* If this change adds a new feature to the compute host that the controller - services rely on, the controller services may need to check the minimum - compute service version in the deployment before using the new feature. For - example, in Ocata, the FilterScheduler did not use the Placement API until - all compute services were upgraded to at least Ocata. - -* Nova supports N-1 version *nova-compute* services for rolling upgrades. Does - the proposed change need to consider older code running that may impact how - the new change functions, for example, by changing or overwriting global - state in the database? This is generally most problematic when making changes - that involve multiple compute hosts, like move operations such as migrate, - resize, unshelve and evacuate. - - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - - -Other contributors: - - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -* Include specific references to specs and/or blueprints in nova-powervm, or - in other projects, that this one either depends on or is related to. For - example, a dependency on pypowervm changes should be documented here. - -* If this requires functionality of another project that is not currently used - by nova-powervm document that fact. - -* Does this feature require any new library dependencies or code otherwise not - included in OpenStack? Or does it depend on a specific version of library? - - -Testing -======= - -Please discuss the important scenarios needed to test here, as well as -specific edge cases we should be ensuring work correctly. For each -scenario please specify if this requires specialized hardware, a full -openstack environment, or can be simulated inside the nova-powervm tree. - -Please discuss how the change will be tested. We especially want to know what -tempest tests will be added. It is assumed that unit test coverage will be -added so that doesn't need to be mentioned explicitly, but discussion of why -you think unit tests are sufficient and we don't need to add more tempest -tests would need to be included. - -Is this untestable in gate given current limitations (specific hardware / -software configurations available)? If so, are there mitigation plans (3rd -party testing, gate enhancements, etc). - - -Documentation Impact -==================== - -Which audiences are affected most by this change, and which documentation -titles on nova-powervm.readthedocs.io should be updated because of this change? -Don't repeat details discussed above, but reference them here in the context of -documentation for multiple audiences. For example, the Operations Guide targets -cloud operators, and the End User Guide would need to be updated if the change -offers a new feature available through the CLI or dashboard. If a config option -changes or is deprecated, note here that the documentation needs to be updated -to reflect this specification's change. - -References -========== - -Please add any useful references here. You are not required to have any -reference. Moreover, this specification should still make sense when your -references are unavailable. Examples of what you could include are: - -* Links to mailing list or IRC discussions - -* Links to notes from a summit session - -* Links to relevant research, if appropriate - -* Related specifications as appropriate (e.g. if it's an EC2 thing, link the - EC2 docs) - -* Anything else you feel it is worthwhile to refer to - - -History -======= - -Optional section intended to be used each time the spec is updated to describe -new design, API or any database schema updated. Useful to let reader understand -what's happened along the time. - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Rocky - - Introduced diff --git a/doc/source/support-matrix.ini b/doc/source/support-matrix.ini deleted file mode 100644 index d195ba74..00000000 --- a/doc/source/support-matrix.ini +++ /dev/null @@ -1,654 +0,0 @@ -# Copyright (C) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# For information about the format of this file, refer to the documentation -# for sphinx-feature-classification: - -[driver.powervm] -title=PowerVM - -[operation.attach-volume] -title=Attach block volume to instance -status=optional -notes=The attach volume operation provides a means to hotplug - additional block storage to a running instance. This allows - storage capabilities to be expanded without interruption of - service. In a cloud model it would be more typical to just - spin up a new instance with large storage, so the ability to - hotplug extra storage is for those cases where the instance - is considered to be more of a pet than cattle. Therefore - this operation is not considered to be mandatory to support. -cli=nova volume-attach -driver.powervm=complete - -[operation.attach-tagged-volume] -title=Attach tagged block device to instance -status=optional -notes=Attach a block device with a tag to an existing server instance. See - "Device tags" for more information. -cli=nova volume-attach [--tag ] -driver.powervm=missing - -[operation.detach-volume] -title=Detach block volume from instance -status=optional -notes=See notes for attach volume operation. -cli=nova volume-detach -driver.powervm=complete - -[operation.extend-volume] -title=Extend block volume attached to instance -status=optional -notes=The extend volume operation provides a means to extend - the size of an attached volume. This allows volume size - to be expanded without interruption of service. - In a cloud model it would be more typical to just - spin up a new instance with large storage, so the ability to - extend the size of an attached volume is for those cases - where the instance is considered to be more of a pet than cattle. - Therefore this operation is not considered to be mandatory to support. -cli=cinder extend -driver.powervm=partial -driver-notes.powervm=Not supported for rbd volumes. - -[operation.attach-interface] -title=Attach virtual network interface to instance -status=optional -notes=The attach interface operation provides a means to hotplug - additional interfaces to a running instance. Hotplug support - varies between guest OSes and some guests require a reboot for - new interfaces to be detected. This operation allows interface - capabilities to be expanded without interruption of service. - In a cloud model it would be more typical to just spin up a - new instance with more interfaces. -cli=nova interface-attach -driver.powervm=complete - -[operation.attach-tagged-interface] -title=Attach tagged virtual network interface to instance -status=optional -notes=Attach a virtual network interface with a tag to an existing - server instance. See "Device tags" for more information. -cli=nova interface-attach [--tag ] -driver.powervm=missing - -[operation.detach-interface] -title=Detach virtual network interface from instance -status=optional -notes=See notes for attach-interface operation. -cli=nova interface-detach -driver.powervm=complete - -[operation.maintenance-mode] -title=Set the host in a maintenance mode -status=optional -notes=This operation allows a host to be placed into maintenance - mode, automatically triggering migration of any running - instances to an alternative host and preventing new - instances from being launched. This is not considered - to be a mandatory operation to support. - The driver methods to implement are "host_maintenance_mode" and - "set_host_enabled". -cli=nova host-update -driver.powervm=complete - -[operation.evacuate] -title=Evacuate instances from a host -status=optional -notes=A possible failure scenario in a cloud environment is the outage - of one of the compute nodes. In such a case the instances of the down - host can be evacuated to another host. It is assumed that the old host - is unlikely ever to be powered back on, otherwise the evacuation - attempt will be rejected. When the instances get moved to the new - host, their volumes get re-attached and the locally stored data is - dropped. That happens in the same way as a rebuild. - This is not considered to be a mandatory operation to support. -cli=nova evacuate ;nova host-evacuate -driver.powervm=complete - -[operation.rebuild] -title=Rebuild instance -status=optional -notes=A possible use case is additional attributes need to be set - to the instance, nova will purge all existing data from the system - and remakes the VM with given information such as 'metadata' and - 'personalities'. Though this is not considered to be a mandatory - operation to support. -cli=nova rebuild -driver.powervm=complete - -[operation.get-guest-info] -title=Guest instance status -status=mandatory -notes=Provides realtime information about the power state of the guest - instance. Since the power state is used by the compute manager for - tracking changes in guests, this operation is considered mandatory to - support. -cli= -driver.powervm=complete - -[operation.get-host-uptime] -title=Guest host uptime -status=optional -notes=Returns the result of host uptime since power on, - it's used to report hypervisor status. -cli= -driver.powervm=complete - -[operation.get-host-ip] -title=Guest host ip -status=optional -notes=Returns the ip of this host, it's used when doing - resize and migration. -cli= -driver.powervm=complete - -[operation.live-migrate] -title=Live migrate instance across hosts -status=optional -notes=Live migration provides a way to move an instance off one - compute host, to another compute host. Administrators may use - this to evacuate instances from a host that needs to undergo - maintenance tasks, though of course this may not help if the - host is already suffering a failure. In general instances are - considered cattle rather than pets, so it is expected that an - instance is liable to be killed if host maintenance is required. - It is technically challenging for some hypervisors to provide - support for the live migration operation, particularly those - built on the container based virtualization. Therefore this - operation is not considered mandatory to support. -cli=nova live-migration ;nova host-evacuate-live -driver.powervm=complete - -[operation.force-live-migration-to-complete] -title=Force live migration to complete -status=optional -notes=Live migration provides a way to move a running instance to another - compute host. But it can sometimes fail to complete if an instance has - a high rate of memory or disk page access. - This operation provides the user with an option to assist the progress - of the live migration. The mechanism used to complete the live - migration depends on the underlying virtualization subsystem - capabilities. If libvirt/qemu is used and the post-copy feature is - available and enabled then the force complete operation will cause - a switch to post-copy mode. Otherwise the instance will be suspended - until the migration is completed or aborted. -cli=nova live-migration-force-complete -driver.powervm=missing - -[operation.launch] -title=Launch instance -status=mandatory -notes=Importing pre-existing running virtual machines on a host is - considered out of scope of the cloud paradigm. Therefore this - operation is mandatory to support in drivers. -cli= -driver.powervm=complete - -[operation.pause] -title=Stop instance CPUs (pause) -status=optional -notes=Stopping an instances CPUs can be thought of as roughly - equivalent to suspend-to-RAM. The instance is still present - in memory, but execution has stopped. The problem, however, - is that there is no mechanism to inform the guest OS that - this takes place, so upon unpausing, its clocks will no - longer report correct time. For this reason hypervisor vendors - generally discourage use of this feature and some do not even - implement it. Therefore this operation is considered optional - to support in drivers. -cli=nova pause -driver.powervm=missing - -[operation.reboot] -title=Reboot instance -status=optional -notes=It is reasonable for a guest OS administrator to trigger a - graceful reboot from inside the instance. A host initiated - graceful reboot requires guest co-operation and a non-graceful - reboot can be achieved by a combination of stop+start. Therefore - this operation is considered optional. -cli=nova reboot -driver.powervm=complete - -[operation.rescue] -title=Rescue instance -status=optional -notes=The rescue operation starts an instance in a special - configuration whereby it is booted from an special root - disk image. The goal is to allow an administrator to - recover the state of a broken virtual machine. In general - the cloud model considers instances to be cattle, so if - an instance breaks the general expectation is that it be - thrown away and a new instance created. Therefore this - operation is considered optional to support in drivers. -cli=nova rescue -driver.powervm=complete - -[operation.resize] -title=Resize instance -status=optional -notes=The resize operation allows the user to change a running - instance to match the size of a different flavor from the one - it was initially launched with. There are many different - flavor attributes that potentially need to be updated. In - general it is technically challenging for a hypervisor to - support the alteration of all relevant config settings for a - running instance. Therefore this operation is considered - optional to support in drivers. -cli=nova resize -driver.powervm=complete - -[operation.resume] -title=Restore instance -status=optional -notes=See notes for the suspend operation -cli=nova resume -driver.powervm=missing - -[operation.set-admin-password] -title=Set instance admin password -status=optional -notes=Provides a mechanism to (re)set the password of the administrator - account inside the instance operating system. This requires that the - hypervisor has a way to communicate with the running guest operating - system. Given the wide range of operating systems in existence it is - unreasonable to expect this to be practical in the general case. The - configdrive and metadata service both provide a mechanism for setting - the administrator password at initial boot time. In the case where this - operation were not available, the administrator would simply have to - login to the guest and change the password in the normal manner, so - this is just a convenient optimization. Therefore this operation is - not considered mandatory for drivers to support. -cli=nova set-password -driver.powervm=missing - -[operation.snapshot] -title=Save snapshot of instance disk -status=optional -notes=The snapshot operation allows the current state of the - instance root disk to be saved and uploaded back into the - glance image repository. The instance can later be booted - again using this saved image. This is in effect making - the ephemeral instance root disk into a semi-persistent - storage, in so much as it is preserved even though the guest - is no longer running. In general though, the expectation is - that the root disks are ephemeral so the ability to take a - snapshot cannot be assumed. Therefore this operation is not - considered mandatory to support. -cli=nova image-create -driver.powervm=complete - -[operation.suspend] -title=Suspend instance -status=optional -notes=Suspending an instance can be thought of as roughly - equivalent to suspend-to-disk. The instance no longer - consumes any RAM or CPUs, with its live running state - having been preserved in a file on disk. It can later - be restored, at which point it should continue execution - where it left off. As with stopping instance CPUs, it suffers from the fact - that the guest OS will typically be left with a clock that - is no longer telling correct time. For container based - virtualization solutions, this operation is particularly - technically challenging to implement and is an area of - active research. This operation tends to make more sense - when thinking of instances as pets, rather than cattle, - since with cattle it would be simpler to just terminate - the instance instead of suspending. Therefore this operation - is considered optional to support. -cli=nova suspend -driver.powervm=missing - -[operation.swap-volume] -title=Swap block volumes -status=optional -notes=The swap volume operation is a mechanism for changing a running - instance so that its attached volume(s) are backed by different - storage in the host. An alternative to this would be to simply - terminate the existing instance and spawn a new instance with the - new storage. In other words this operation is primarily targeted towards - the pet use case rather than cattle, however, it is required for volume - migration to work in the volume service. This is considered optional to - support. -cli=nova volume-update -driver.powervm=missing - -[operation.terminate] -title=Shutdown instance -status=mandatory -notes=The ability to terminate a virtual machine is required in - order for a cloud user to stop utilizing resources and thus - avoid indefinitely ongoing billing. Therefore this operation - is mandatory to support in drivers. -cli=nova delete -driver.powervm=complete - -[operation.trigger-crash-dump] -title=Trigger crash dump -status=optional -notes=The trigger crash dump operation is a mechanism for triggering - a crash dump in an instance. The feature is typically implemented by - injecting an NMI (Non-maskable Interrupt) into the instance. It provides - a means to dump the production memory image as a dump file which is useful - for users. Therefore this operation is considered optional to support. -cli=nova trigger-crash-dump -driver.powervm=missing - -[operation.unpause] -title=Resume instance CPUs (unpause) -status=optional -notes=See notes for the "Stop instance CPUs" operation -cli=nova unpause -driver.powervm=missing - -[guest.disk.autoconfig] -title=Auto configure disk -status=optional -notes=Partition and resize FS to match the size specified by - flavors.root_gb, As this is hypervisor specific feature. - Therefore this operation is considered optional to support. -cli= -driver.powervm=missing - -[guest.disk.rate-limit] -title=Instance disk I/O limits -status=optional -notes=The ability to set rate limits on virtual disks allows for - greater performance isolation between instances running on the - same host storage. It is valid to delegate scheduling of I/O - operations to the hypervisor with its default settings, instead - of doing fine grained tuning. Therefore this is not considered - to be an mandatory configuration to support. -cli=nova limits -driver.powervm=missing - -[guest.setup.configdrive] -title=Config drive support -status=choice(guest.setup) -notes=The config drive provides an information channel into - the guest operating system, to enable configuration of the - administrator password, file injection, registration of - SSH keys, etc. Since cloud images typically ship with all - login methods locked, a mechanism to set the administrator - password or keys is required to get login access. Alternatives - include the metadata service and disk injection. At least one - of the guest setup mechanisms is required to be supported by - drivers, in order to enable login access. -cli= -driver.powervm=complete - -[guest.setup.inject.file] -title=Inject files into disk image -status=optional -notes=This allows for the end user to provide data for multiple - files to be injected into the root filesystem before an instance - is booted. This requires that the compute node understand the - format of the filesystem and any partitioning scheme it might - use on the block device. This is a non-trivial problem considering - the vast number of filesystems in existence. The problem of injecting - files to a guest OS is better solved by obtaining via the metadata - service or config drive. Therefore this operation is considered - optional to support. -cli= -driver.powervm=missing - -[guest.setup.inject.networking] -title=Inject guest networking config -status=optional -notes=This allows for static networking configuration (IP - address, netmask, gateway and routes) to be injected directly - into the root filesystem before an instance is booted. This - requires that the compute node understand how networking is - configured in the guest OS which is a non-trivial problem - considering the vast number of operating system types. The - problem of configuring networking is better solved by DHCP - or by obtaining static config via - config drive. Therefore this operation is considered optional - to support. -cli= -driver.powervm=missing - -[console.rdp] -title=Remote desktop over RDP -status=choice(console) -notes=This allows the administrator to interact with the graphical - console of the guest OS via RDP. This provides a way to see boot - up messages and login to the instance when networking configuration - has failed, thus preventing a network based login. Some operating - systems may prefer to emit messages via the serial console for - easier consumption. Therefore support for this operation is not - mandatory, however, a driver is required to support at least one - of the listed console access operations. -cli=nova get-rdp-console -driver.powervm=missing - -[console.serial.log] -title=View serial console logs -status=choice(console) -notes=This allows the administrator to query the logs of data - emitted by the guest OS on its virtualized serial port. For - UNIX guests this typically includes all boot up messages and - so is useful for diagnosing problems when an instance fails - to successfully boot. Not all guest operating systems will be - able to emit boot information on a serial console, others may - only support graphical consoles. Therefore support for this - operation is not mandatory, however, a driver is required to - support at least one of the listed console access operations. -cli=nova console-log -driver.powervm=missing - -[console.serial.interactive] -title=Remote interactive serial console -status=choice(console) -notes=This allows the administrator to interact with the serial - console of the guest OS. This provides a way to see boot - up messages and login to the instance when networking configuration - has failed, thus preventing a network based login. Not all guest - operating systems will be able to emit boot information on a serial - console, others may only support graphical consoles. Therefore support - for this operation is not mandatory, however, a driver is required to - support at least one of the listed console access operations. - This feature was introduced in the Juno release with blueprint - https://blueprints.launchpad.net/nova/+spec/serial-ports -cli=nova get-serial-console -driver.powervm=missing - -[console.spice] -title=Remote desktop over SPICE -status=choice(console) -notes=This allows the administrator to interact with the graphical - console of the guest OS via SPICE. This provides a way to see boot - up messages and login to the instance when networking configuration - has failed, thus preventing a network based login. Some operating - systems may prefer to emit messages via the serial console for - easier consumption. Therefore support for this operation is not - mandatory, however, a driver is required to support at least one - of the listed console access operations. -cli=nova get-spice-console -driver.powervm=missing - -[console.vnc] -title=Remote desktop over VNC -status=choice(console) -notes=This allows the administrator to interact with the graphical - console of the guest OS via VNC. This provides a way to see boot - up messages and login to the instance when networking configuration - has failed, thus preventing a network based login. Some operating - systems may prefer to emit messages via the serial console for - easier consumption. Therefore support for this operation is not - mandatory, however, a driver is required to support at least one - of the listed console access operations. -cli=nova get-vnc-console -driver.powervm=complete - -[storage.block] -title=Block storage support -status=optional -notes=Block storage provides instances with direct attached - virtual disks that can be used for persistent storage of data. - As an alternative to direct attached disks, an instance may - choose to use network based persistent storage. OpenStack provides - object storage via the Swift service, or a traditional filesystem - such as NFS may be used. Some types of instances may - not require persistent storage at all, being simple transaction - processing systems reading requests & sending results to and from - the network. Therefore support for this configuration is not - considered mandatory for drivers to support. -cli= -driver.powervm=complete - -[storage.block.backend.fibrechannel] -title=Block storage over fibre channel -status=optional -notes=To maximise performance of the block storage, it may be desirable - to directly access fibre channel LUNs from the underlying storage - technology on the compute hosts. Since this is just a performance - optimization of the I/O path it is not considered mandatory to support. -cli= -driver.powervm=complete - -[storage.block.backend.iscsi] -title=Block storage over iSCSI -status=condition(storage.block==complete) -notes=If the driver wishes to support block storage, it is common to - provide an iSCSI based backend to access the storage from cinder. - This isolates the compute layer for knowledge of the specific storage - technology used by Cinder, albeit at a potential performance cost due - to the longer I/O path involved. If the driver chooses to support - block storage, then this is considered mandatory to support, otherwise - it is considered optional. -cli= -driver.powervm=complete - -[storage.block.backend.iscsi.auth.chap] -title=CHAP authentication for iSCSI -status=optional -notes=If accessing the cinder iSCSI service over an untrusted LAN it - is desirable to be able to enable authentication for the iSCSI - protocol. CHAP is the commonly used authentication protocol for - iSCSI. This is not considered mandatory to support. (?) -cli= -driver.powervm=complete - -[storage.image] -title=Image storage support -status=mandatory -notes=This refers to the ability to boot an instance from an image - stored in the glance image repository. Without this feature it - would not be possible to bootstrap from a clean environment, since - there would be no way to get block volumes populated and reliance - on external PXE servers is out of scope. Therefore this is considered - a mandatory storage feature to support. -cli=nova boot --image -driver.powervm=complete - -[networking.firewallrules] -title=Network firewall rules -status=optional -notes=Unclear how this is different from security groups -cli= -driver.powervm=missing - -[networking.routing] -title=Network routing -status=optional -notes=Unclear what this refers to -cli= -driver.powervm=complete - -[networking.securitygroups] -title=Network security groups -status=optional -notes=The security groups feature provides a way to define rules - to isolate the network traffic of different instances running - on a compute host. This would prevent actions such as MAC and - IP address spoofing, or the ability to setup rogue DHCP servers. - In a private cloud environment this may be considered to be a - superfluous requirement. Therefore this is considered to be an - optional configuration to support. -cli= -driver.powervm=missing - -[networking.topology.flat] -title=Flat networking -status=choice(networking.topology) -notes=Provide network connectivity to guests using a - flat topology across all compute nodes. At least one - of the networking configurations is mandatory to - support in the drivers. -cli= -driver.powervm=complete - -[networking.topology.vlan] -title=VLAN networking -status=choice(networking.topology) -notes=Provide network connectivity to guests using VLANs to define the - topology when using nova-network. At least one of the networking - configurations is mandatory to support in the drivers. -cli= -driver.powervm=complete - -[operation.uefi-boot] -title=uefi boot -status=optional -notes=This allows users to boot a guest with uefi firmware. -cli= -driver.powervm=missing - -[operation.device-tags] -title=Device tags -status=optional -notes=This allows users to set tags on virtual devices when creating a - server instance. Device tags are used to identify virtual device - metadata, as exposed in the metadata API and on the config drive. - For example, a network interface tagged with "nic1" will appear in - the metadata along with its bus (ex: PCI), bus address - (ex: 0000:00:02.0), MAC address, and tag (nic1). If multiple networks - are defined, the order in which they appear in the guest operating - system will not necessarily reflect the order in which they are given - in the server boot request. Guests should therefore not depend on - device order to deduce any information about their network devices. - Instead, device role tags should be used. Device tags can be - applied to virtual network interfaces and block devices. -cli=nova boot -driver.powervm=missing - -[operation.quiesce] -title=quiesce -status=optional -notes=Quiesce the specified instance to prepare for snapshots. - For libvirt, guest filesystems will be frozen through qemu - agent. -cli= -driver.powervm=missing - -[operation.unquiesce] -title=unquiesce -status=optional -notes=See notes for the quiesce operation -cli= -driver.powervm=missing - -[operation.multiattach-volume] -title=Attach block volume to multiple instances -status=optional -notes=The multiattach volume operation is an extension to - the attach volume operation. It allows to attach a - single volume to multiple instances. This operation is - not considered to be mandatory to support. - Note that for the libvirt driver, this is only supported - if qemu<2.10 or libvirt>=3.10. -cli=nova volume-attach -driver.powervm=missing diff --git a/doc/source/support-matrix.rst b/doc/source/support-matrix.rst deleted file mode 100644 index ed08bdb3..00000000 --- a/doc/source/support-matrix.rst +++ /dev/null @@ -1,41 +0,0 @@ - -Feature Support Matrix -====================== - -.. warning:: - Please note, while this document is still being maintained, this is slowly - being updated to re-group and classify features - -When considering which capabilities should be marked as mandatory the -following general guiding principles were applied - -* **Inclusivity** - people have shown ability to make effective - use of a wide range of virtualization technologies with broadly - varying featuresets. Aiming to keep the requirements as inclusive - as possible, avoids second-guessing what a user may wish to use - the cloud compute service for. - -* **Bootstrapping** - a practical use case test is to consider that - starting point for the compute deploy is an empty data center - with new machines and network connectivity. The look at what - are the minimum features required of a compute service, in order - to get user instances running and processing work over the - network. - -* **Competition** - an early leader in the cloud compute service space - was Amazon EC2. A sanity check for whether a feature should be - mandatory is to consider whether it was available in the first - public release of EC2. This had quite a narrow featureset, but - none the less found very high usage in many use cases. So it - serves to illustrate that many features need not be considered - mandatory in order to get useful work done. - -* **Reality** - there are many virt drivers currently shipped with - Nova, each with their own supported feature set. Any feature which is - missing in at least one virt driver that is already in-tree, must - by inference be considered optional until all in-tree drivers - support it. This does not rule out the possibility of a currently - optional feature becoming mandatory at a later date, based on other - principles above. - -.. support_matrix:: support-matrix.ini diff --git a/lower-constraints.txt b/lower-constraints.txt deleted file mode 100644 index fc79cbc8..00000000 --- a/lower-constraints.txt +++ /dev/null @@ -1,186 +0,0 @@ -libxml2-python==2.6.21 -lxml==4.3.4 -alembic==0.9.8 -amqp==2.2.2 -appdirs==1.4.3 -asn1crypto==0.24.0 -attrs==17.4.0 -automaton==1.14.0 -Babel==2.3.4 -bashate==0.5.1 -bandit==1.1.0 -bcrypt==3.1.4 -cachetools==2.0.1 -castellan==0.16.0 -certifi==2018.1.18 -cffi==1.11.5 -chardet==3.0.4 -cliff==2.11.0 -cmd2==0.8.1 -colorama==0.3.9 -contextlib2==0.5.5 -coverage==4.0 -cryptography==2.1.4 -cursive==0.2.1 -ddt==1.0.1 -debtcollector==1.19.0 -decorator==3.4.0 -deprecation==2.0 -dogpile.cache==0.6.5 -enum34==1.0.4 -enum-compat==0.0.2 -eventlet==0.20.0 -extras==1.0.0 -fasteners==0.14.1 -fixtures==3.0.0 -flake8==2.5.5 -future==0.16.0 -futurist==1.8.0 -gabbi==1.35.0 -gitdb2==2.0.3 -GitPython==2.1.8 -greenlet==0.4.10 -hacking==0.12.0 -idna==2.6 -iso8601==0.1.11 -Jinja2==2.10 -jmespath==0.9.3 -jsonpatch==1.21 -jsonpath-rw==1.4.0 -jsonpath-rw-ext==1.1.3 -jsonpointer==2.0 -jsonschema==2.6.0 -keystoneauth1==3.9.0 -keystonemiddleware==4.20.0 -kombu==4.1.0 -linecache2==1.0.0 -lxml==3.4.1 -Mako==1.0.7 -MarkupSafe==1.0 -mccabe==0.2.1 -microversion-parse==0.2.1 -mock==2.0.0 -monotonic==1.4 -mox3==0.20.0 -msgpack==0.5.6 -msgpack-python==0.5.6 -munch==2.2.0 -netaddr==0.7.18 -netifaces==0.10.4 -networkx==1.11 -numpy==1.14.2 -openstacksdk==0.12.0 -os-brick==2.6.1 -os-client-config==1.29.0 -os-resource-classes==0.1.0 -os-service-types==1.2.0 -os-traits==0.12.0 -os-vif==1.14.0 -os-win==3.0.0 -os-xenapi==0.3.3 -osc-lib==1.10.0 -oslo.cache==1.26.0 -oslo.concurrency==3.26.0 -oslo.config==6.1.0 -oslo.context==2.19.2 -oslo.db==4.44.0 -oslo.i18n==3.15.3 -oslo.log==3.36.0 -oslo.messaging==7.0.0 -oslo.middleware==3.31.0 -oslo.policy==1.35.0 -oslo.privsep==1.32.0 -oslo.reports==1.18.0 -oslo.rootwrap==5.8.0 -oslo.serialization==2.21.1 -oslo.service==1.34.0 -oslo.upgradecheck==0.1.1 -oslo.utils==3.37.0 -oslo.versionedobjects==1.35.0 -oslo.vmware==2.17.0 -oslotest==3.2.0 -osprofiler==1.4.0 -ovs==2.10.0 -ovsdbapp==0.15.0 -packaging==17.1 -paramiko==2.0.0 -Paste==2.0.2 -PasteDeploy==1.5.0 -pbr==2.0.0 -pep8==1.5.7 -pika-pool==0.1.3 -pika==0.10.0 -pluggy==0.6.0 -ply==3.11 -prettytable==0.7.1 -psutil==3.2.2 -libpq-dev==9.0.0 -psycopg2==2.8.3 -py==1.5.2 -pyasn1==0.4.2 -pyasn1-modules==0.2.1 -pycadf==2.7.0 -pycparser==2.18 -pyflakes==0.8.1 -pycodestyle==2.0.0 -pyinotify==0.9.6 -pyroute2==0.5.4 -PyJWT==1.7.0 -PyMySQL==0.7.6 -PyNaCl==1.2.1 -pyOpenSSL==17.5.0 -pyparsing==2.2.0 -pyperclip==1.6.0 -pypowervm==1.1.23 -pytest==3.4.2 -python-barbicanclient==4.5.2 -python-cinderclient==3.3.0 -python-dateutil==2.5.3 -python-editor==1.0.3 -python-glanceclient==2.8.0 -python-ironicclient==2.7.0 -python-keystoneclient==3.15.0 -python-mimeparse==1.6.0 -python-neutronclient==6.7.0 -python-subunit==1.2.0 -python-swiftclient==3.2.0 -pytz==2018.3 -PyYAML==3.12 -repoze.lru==0.7 -requests==2.14.2 -requests-mock==1.2.0 -requestsexceptions==1.4.0 -retrying==1.3.3 -rfc3986==1.1.0 -Routes==2.3.1 -simplejson==3.13.2 -six==1.10.0 -smmap2==2.0.3 -sortedcontainers==2.1.0 -SQLAlchemy==1.0.10 -Sphinx==1.6.2 -sqlalchemy-migrate==0.11.0 -sqlparse==0.2.4 -statsd==3.2.2 -stestr==1.0.0 -stevedore==1.20.0 -setuptools==21.0.0 -suds-jurko==0.6 -taskflow==2.16.0 -Tempita==0.5.2 -tenacity==4.9.0 -testrepository==0.0.20 -testresources==2.0.0 -testscenarios==0.4 -testtools==2.2.0 -tooz==1.58.0 -traceback2==1.4.0 -unittest2==1.1.0 -urllib3==1.22 -vine==1.1.4 -voluptuous==0.11.1 -warlock==1.3.0 -WebOb==1.8.2 -websockify==0.8.0 -wrapt==1.10.11 -wsgi-intercept==1.7.0 diff --git a/nova/__init__.py b/nova/__init__.py deleted file mode 100644 index 671e539d..00000000 --- a/nova/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2016 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Required to play nicely with namespace composition (PEP420). -__import__('pkg_resources').declare_namespace(__name__) diff --git a/nova/virt/__init__.py b/nova/virt/__init__.py deleted file mode 100644 index 671e539d..00000000 --- a/nova/virt/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2016 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Required to play nicely with namespace composition (PEP420). -__import__('pkg_resources').declare_namespace(__name__) diff --git a/nova/virt/powervm_ext/__init__.py b/nova/virt/powervm_ext/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova/virt/powervm_ext/driver.py b/nova/virt/powervm_ext/driver.py deleted file mode 100644 index 1c9ec179..00000000 --- a/nova/virt/powervm_ext/driver.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Shim layer for nova_powervm.virt.powervm.driver.PowerVMDriver. - -Duplicate all public symbols. This is necessary for the constants as well as -the classes - because instances of the classes need to be able to resolve -references to the constants. -""" -import nova_powervm.virt.powervm.driver as real_drv - -LOG = real_drv.LOG -CONF = real_drv.CONF -DISK_ADPT_NS = real_drv.DISK_ADPT_NS -DISK_ADPT_MAPPINGS = real_drv.DISK_ADPT_MAPPINGS -NVRAM_NS = real_drv.NVRAM_NS -NVRAM_APIS = real_drv.NVRAM_APIS -KEEP_NVRAM_STATES = real_drv.KEEP_NVRAM_STATES -FETCH_NVRAM_STATES = real_drv.FETCH_NVRAM_STATES -PowerVMDriver = real_drv.PowerVMDriver diff --git a/nova_powervm/__init__.py b/nova_powervm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/conf/__init__.py b/nova_powervm/conf/__init__.py deleted file mode 100644 index d91793c4..00000000 --- a/nova_powervm/conf/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2016 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import nova.conf - -from nova_powervm.conf import powervm - -CONF = nova.conf.CONF - -powervm.register_opts(CONF) diff --git a/nova_powervm/conf/powervm.py b/nova_powervm/conf/powervm.py deleted file mode 100644 index 890bac3d..00000000 --- a/nova_powervm/conf/powervm.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -CONF = cfg.CONF - -powervm_group = cfg.OptGroup( - 'powervm', - title='PowerVM Options') - - -powervm_opts = [ - cfg.IntOpt('uncapped_proc_weight', - default=64, min=1, max=255, - help='The processor weight to assign to newly created VMs. ' - 'Value should be between 1 and 255. Represents how ' - 'aggressively LPARs grab CPU when unused cycles are ' - 'available.'), - cfg.StrOpt('vopt_media_volume_group', - default='rootvg', - help='The volume group on the system that should be used ' - 'to store the config drive metadata that will be attached ' - 'to VMs. If not specified and no media repository ' - 'exists, rootvg will be used. This option is ignored if ' - 'a media repository already exists.'), - cfg.IntOpt('vopt_media_rep_size', - default=1, min=1, - help='The size of the media repository (in GB) for the ' - 'metadata for config drive. Only used if the media ' - 'repository needs to be created.'), - cfg.StrOpt('image_meta_local_path', - default='/tmp/cfgdrv/', - help='The location where the config drive ISO files should be ' - 'built.'), - cfg.StrOpt('pvm_vswitch_for_novalink_io', - default='NovaLinkVEABridge', - help="Name of the PowerVM virtual switch to be used when " - "mapping Linux based network ports to PowerVM virtual " - "Ethernet devices"), - cfg.BoolOpt('remove_vopt_media_on_boot', - default=False, - help="If enabled, tells the PowerVM driver to trigger the " - "removal of the media from the virtual optical device " - "used for initialization of VMs on spawn after " - "'remove_vopt_media_time' minutes."), - cfg.IntOpt('remove_vopt_media_time', - default=60, min=0, - help="The amount of time in minutes after a VM has been " - "created for the virtual optical media to be removed."), - cfg.BoolOpt('use_rmc_mgmt_vif', - default=True, - help="If enabled, tells the PowerVM Driver to create an RMC " - "network interface on the deploy of a VM. This is an " - "adapter that can only talk to the NovaLink partition " - "and enables DLPAR actions."), - cfg.BoolOpt('use_rmc_ipv6_scheme', - default=True, - help="Only used if use_rmc_mgmt_vif is True and config drive " - "is being used. If set, the system will configure the " - "RMC network interface with an IPv6 link local address. " - "This is generally set to True, but users may wish to " - "turn this off if their operating system has " - "compatibility issues."), - cfg.IntOpt('vios_active_wait_timeout', - default=300, - help="Default time in seconds to wait for Virtual I/O Server " - "to be up and running.") -] - -ssp_opts = [ - cfg.StrOpt('cluster_name', - default='', - help='Cluster hosting the Shared Storage Pool to use for ' - 'storage operations. If none specified, the host is ' - 'queried; if a single Cluster is found, it is used. ' - 'Not used unless disk_driver option is set to ssp.') -] - -vol_adapter_opts = [ - cfg.StrOpt('fc_attach_strategy', - choices=['vscsi', 'npiv'], ignore_case=True, - default='vscsi', mutable=True, - help='The Fibre Channel Volume Strategy defines how FC Cinder ' - 'volumes should be attached to the Virtual Machine. The ' - 'options are: npiv or vscsi. If npiv is selected then ' - 'the ports_per_fabric and fabrics option should be ' - 'specified and at least one fabric_X_port_wwpns option ' - '(where X corresponds to the fabric name) must be ' - 'specified.'), - cfg.StrOpt('fc_npiv_adapter_api', - default='nova_powervm.virt.powervm.volume.npiv.' - 'NPIVVolumeAdapter', - help='Volume Adapter API to connect FC volumes using NPIV' - 'connection mechanism.'), - cfg.StrOpt('fc_vscsi_adapter_api', - default='nova_powervm.virt.powervm.volume.vscsi.' - 'PVVscsiFCVolumeAdapter', - help='Volume Adapter API to connect FC volumes through Virtual ' - 'I/O Server using PowerVM vSCSI connection mechanism.'), - cfg.IntOpt('vscsi_vios_connections_required', - default=1, min=1, - help='Indicates a minimum number of Virtual I/O Servers that ' - 'are required to support a Cinder volume attach with the ' - 'vSCSI volume connector.'), - cfg.BoolOpt('volume_use_multipath', - default=False, - help="Use multipath connections when attaching iSCSI or FC"), - cfg.StrOpt('iscsi_iface', - default='default', - help="The iSCSI transport iface to use to connect to target in " - "case offload support is desired. Do not confuse the " - "iscsi_iface parameter to be provided here with the " - "actual transport name."), - cfg.StrOpt('rbd_user', - default='', - help="Refer to this user when connecting and authenticating " - "with the Ceph RBD server.") -] - -# NPIV Options. Only applicable if the 'fc_attach_strategy' is set to 'npiv'. -# Otherwise this section can be ignored. -npiv_opts = [ - cfg.IntOpt('ports_per_fabric', - default=1, min=1, - help='The number of physical ports that should be connected ' - 'directly to the Virtual Machine, per fabric. ' - 'Example: 2 fabrics and ports_per_fabric set to 2 will ' - 'result in 4 NPIV ports being created, two per fabric. ' - 'If multiple Virtual I/O Servers are available, will ' - 'attempt to span ports across I/O Servers.'), - cfg.StrOpt('fabrics', default='A', - help='Unique identifier for each physical FC fabric that is ' - 'available. This is a comma separated list. If there ' - 'are two fabrics for multi-pathing, then this could be ' - 'set to A,B.' - 'The fabric identifiers are used for the ' - '\'fabric__port_wwpns\' key.') -] - -remote_restart_opts = [ - cfg.StrOpt('nvram_store', - choices=['none', 'swift'], ignore_case=True, - default='none', - help='The NVRAM store to use to hold the PowerVM NVRAM for ' - 'virtual machines.'), -] - -swift_opts = [ - cfg.StrOpt('swift_container', default='powervm_nvram', - help='The Swift container to store the PowerVM NVRAM in. This ' - 'must be configured the same value for all compute hosts.'), - cfg.StrOpt('swift_username', default='powervm', - help='The Swift user name to use for operations that use ' - 'the Swift store.'), - cfg.StrOpt('swift_user_domain_name', default='powervm', - help='The Swift domain the user is a member of.'), - cfg.StrOpt('swift_password', secret=True, - help='The password for the Swift user.'), - cfg.StrOpt('swift_project_name', default='powervm', - help='The Swift project.'), - cfg.StrOpt('swift_project_domain_name', default='powervm', - help='The Swift project domain.'), - cfg.StrOpt('swift_auth_version', default='3', help='The Keystone API ' - 'version.'), - cfg.StrOpt('swift_auth_url', help='The Keystone authorization url. ' - 'Example: "http://keystone-hostname:5000/v3"'), - cfg.StrOpt('swift_cacert', required=False, help='Path to CA certificate ' - 'file. Example: /etc/swiftclient/myca.pem'), - cfg.StrOpt('swift_endpoint_type', help='The endpoint/interface type for ' - 'the Swift client to select from the Keystone Service Catalog ' - 'for the connection URL. Swift defaults to "publicURL".') -] - -vnc_opts = [ - cfg.BoolOpt('vnc_use_x509_auth', default=False, - help='If enabled, uses X509 Authentication for the ' - 'VNC sessions started for each VM.'), - cfg.StrOpt('vnc_ca_certs', help='Path to CA certificate ' - 'to use for verifying VNC X509 Authentication.'), - cfg.StrOpt('vnc_server_cert', help='Path to Server certificate ' - 'to use for verifying VNC X509 Authentication.'), - cfg.StrOpt('vnc_server_key', help='Path to Server private key ' - 'to use for verifying VNC X509 Authentication.') -] - -STATIC_OPTIONS = (powervm_opts + ssp_opts + vol_adapter_opts + npiv_opts - + remote_restart_opts + swift_opts + vnc_opts) - - -# Dictionary where the key is the NPIV Fabric Name, and the value is a list of -# Physical WWPNs that match the key. -NPIV_FABRIC_WWPNS = {} -FABRIC_WWPN_HELP = ('A comma delimited list of all the physical FC port ' - 'WWPNs that support the specified fabric. Is tied to ' - 'the NPIV fabrics key.') -# This is only used to provide a sample for the list_opt() method -fabric_sample = [ - cfg.StrOpt('fabric_A_port_wwpns', default='', help=FABRIC_WWPN_HELP), - cfg.StrOpt('fabric_B_port_wwpns', default='', help=FABRIC_WWPN_HELP), -] - - -def _register_fabrics(conf, fabric_mapping): - """Registers the fabrics to WWPNs options and builds a mapping. - - This method registers the 'fabric_X_port_wwpns' (where X is determined by - the 'fabrics' option values) and then builds a dictionary that mapps the - fabrics to the WWPNs. This mapping can then be later used without having - to reparse the options. - """ - # At this point, the fabrics should be specified. Iterate over those to - # determine the port_wwpns per fabric. - if conf.powervm.fabrics is not None: - port_wwpn_keys = [] - fabrics = conf.powervm.fabrics.split(',') - for fabric in fabrics: - opt = cfg.StrOpt('fabric_%s_port_wwpns' % fabric, - default='', help=FABRIC_WWPN_HELP) - port_wwpn_keys.append(opt) - - conf.register_opts(port_wwpn_keys, group='powervm') - - # Now that we've registered the fabrics, saturate the NPIV dictionary - for fabric in fabrics: - key = 'fabric_%s_port_wwpns' % fabric - wwpns = conf.powervm[key].split(',') - wwpns = [x.upper().strip(':') for x in wwpns] - fabric_mapping[fabric] = wwpns - - -def register_opts(conf): - conf.register_group(powervm_group) - conf.register_opts(STATIC_OPTIONS, group=powervm_group) - _register_fabrics(conf, NPIV_FABRIC_WWPNS) - - -# To generate a sample config run: -# $ oslo-config-generator --namespace nova_powervm > nova_powervm_sample.conf -def list_opts(): - # The nova conf tooling expects each module to return a dict of options. - # When powervm is pulled into nova proper the return value would be in - # this form: - # return {powervm_group.name: STATIC_OPTIONS + fabric_sample} - # - # The oslo-config-generator tooling expects a tuple: - return [(powervm_group.name, STATIC_OPTIONS + fabric_sample)] diff --git a/nova_powervm/hacking/__init__.py b/nova_powervm/hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/hacking/checks.py b/nova_powervm/hacking/checks.py deleted file mode 100644 index 24095bbc..00000000 --- a/nova_powervm/hacking/checks.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2016 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.hacking import checks - - -def factory(register): - checks.factory(register) diff --git a/nova_powervm/locale/de/nova-powervm.po b/nova_powervm/locale/de/nova-powervm.po deleted file mode 100644 index 82c059e8..00000000 --- a/nova_powervm/locale/de/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=2; plural=n != 1;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "Erwartet wurde genau ein Host; gefunden: %d" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"Die Momentaufnahmeoperation wird in Verbindung mit der " -"CONF.powervm.disk_driver-Einstellung %s nicht unterstützt." - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "VIF einfügen fehlgeschlagen, da Instanz %s nicht gefunden wurde." - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "VIF einfügen wegen eines unerwarteten Fehlers fehlgeschlagen." - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "Plattengröße kann nicht verringert werden." - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "Lokale Festplatten können nicht migriert werden." - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"Das VNC-basierte Terminal für Instanz %(instance_name)s konnte nicht geöffnet werden: " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"Die Datenträgergruppe %(vol_grp)s, in der die virtuellen " -"optischen Medien gespeichert werden sollen, wurde nicht gefunden. Das Medienrepository konnte nicht erstellt werden." - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"Der SCSI-Bus %(bus)x auf der Managementpartition wurde durchsucht, die Platte mit " -"UDID %(udid)s erschien nach %(polls)d Abfragen über %(timeout)d " -"Sekunden nicht." - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"Erwartet wurde genau eine Platte auf der Managementpartition unter " -"%(path_pattern)s; gefunden wurden %(count)d." - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"Die Einheit %(devpath)s ist immer noch auf der Managementpartition vorhanden, nachdem " -"versucht wurde, sie zu löschen. Es wurde %(polls)d Mal in %(timeout)d " -"Sekunden abgefragt." - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"Fehler beim Zuordnen des Bootdatenträgers von Instanz %(instance_name)s zur Managementpartition " -"eines virtuellen E/A-Servers." - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"Die neu erstellte Zuordnung des Speicherelements %(stg_name)s vom" -" virtuellen E/A-Server %(vios_name)s zur Managementpartition konnte nicht gefunden werden." - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "Die Datenträgergruppe '%(vg_name)s' für diesen Vorgang konnte nicht lokalisiert werden." - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "Cluster '%(clust_name)s' für diesen Vorgang konnte nicht lokalisiert werden." - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "Es konnte kein Cluster für diesen Vorgang lokalisiert werden." - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"Gefunden wurden unerwartet %(clust_count)d Cluster zu dem Namen " -"'%(clust_name)s'." - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"Kein cluster_name angegeben. Verweigerung der Auswahl eines der %(clust_count)d " -" gefundenen Cluster." - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Der Speicher (ID: %(volume_id)s) konnte nicht zur virtuellen Maschine " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"Fehler beim Erweitern des Datenträgers (ID: %(volume_id)s) auf der virtuellen Maschine " -"%(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Der Datenträger (ID: %(volume_id)s) konnte nicht von der virtuellen Maschine " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"Schritte vor der Livemigration für Datenträger (ID: %(volume_id)s) " -"von der virtuellen Maschine %(instance_name)s konnten nicht durchgeführt werden." - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "PowerVM-API fehlgeschlagen für Instanz=%(inst_name)s.%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"Es ist kein virtueller E/A-Server (VIOS) verfügbar. Der Treiber hat" -" %(wait_time)d Sekunden darauf gewartet, dass ein VIOS aktiv wird. Der Compute-Agent " -"kann nicht gestartet werden, wenn keine virtuellen E/A-Server verfügbar sind. Überprüfen Sie " -"die RMC-Konnektivität zwischen den PowerVM-NovaLink- und den virtuellen E/A-" -"Servern und starten Sie dann den Nova Compute-Agenten erneut." - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "Es sind keine aktiven virtuellen E/A-Server verfügbar." - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "Die virtuelle Maschine kann auf einem neuen Host nicht neu erstellt werden. Fehler: %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"Die Option %(then_opt)s ist erforderlich, wenn %(if_opt)s angegeben wurde als " -"'%(if_value)s'." - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "Livemigration der Instanz '%(name)s' fehlgeschlagen. Grund: %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"Migration der Instanz %(name)s konnte nicht durchgeführt werden, da der Datenträger %(volume)s nicht " -"an den Zielhost %(host)s angehängt werden kann." - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"Migration der Instanz %(name)s konnte nicht durchgeführt werden, da der Host %(host)s nur %(allowed)s " -" gleichzeitige Migrationen zulässt und %(running)s Migrationen derzeit ausgeführt werden." - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"Migration der Instanz '%(name)s' konnte nicht durchgeführt werden, da die Speicherregionsgröße der " -"Quelle (%(source_mrs)d MB) nicht mit der Speicherregionsgröße des " -"Ziels (%(target_mrs)d MB) übereinstimmt." - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"Migration der Instanz %(name)s konnte nicht durchgeführt werden, da ihr Prozessorkompatibilitätsmodus %(mode)s" -" in der Liste der durch den Zielhost unterstützten Modi \"%(modes)s\" nicht enthalten ist." - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"Livemigration der Instanz '%(name)s' fehlgeschlagen. Grund: Migrationsstatus " -"lautet: %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"Livemigration der Instanz '%(name)s' fehlgeschlagen, da sie nicht bereit ist. " -"Grund: %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "Der Parameter vif_type muss für diese vif_driver-Implementierung vorhanden sein" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"Es kann kein geeigneter PowerVM-VIF-Driver für den VIF-Typ %(vif_type)s " -"auf der Instanz %(instance)s gefunden werden" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"Gefunden wurden keine zulässigen Ethernet-Anschlüsse auf dem physischen Netz " -"'%(physnet)s' für Instanz %(inst)s für den SRIOV-basierten VIF mit der MAC-Adresse " -"%(vif_mac)s." - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "Mehrere gemeinsam genutzte Verarbeitungspools mit dem Namen %(pool)s." - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "Gemeinsam genutzter Verarbeitungspool %(pool)s nicht gefunden" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"Versionsattribut %(attr)s muss True oder False sein. Der aktuelle Wert " -"%(val)s ist nicht zulässig." - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "Die konfigurierte Platte unterstützt keine Migration oder Größenänderung." - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "Das Ändern der Größe von dateigestützten Instanzen wird derzeit nicht unterstützt." - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"Der Host ist kein Element desselben SSP-Clusters. Quellenhost-" -"Cluster: %(source_clust_name)s. Quellenhost-SSP: %(source_ssp_name)s." - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Der nicht flüchtige Arbeitsspeicher konnte für Instanz %(instance)s nicht gespeichert werden. Grund: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Der nicht flüchtige Arbeitsspeicher konnte für Instanz %(instance)s nicht abgerufen werden. Grund: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Der nicht flüchtige Arbeitsspeicher konnte für Instanz %(instance)s nicht gelöscht werden. Grund: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "Die Konfigurationsoption '%(option)s' muss festgelegt werden." - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "NVRAM konnte nach %d Versuchen nicht gespeichert werden" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "Objekt ist in Swift nicht vorhanden." - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "Ungültiger Verbindungstyp von %s" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"Es konnte kein virtueller E/A-Server gefunden werden, der die NPIV-Port-Zuordnung für den Server hostet. " -"" - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"Fehler beim Erkennen einer gültigen HDisk auf einem virtuellen E/A-Server für Datenträger " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"Es konnte keine HDisk in der erforderlichen Anzahl von virtuellen E/A-Servern erkannt werden. " -"Der Datenträger %(volume_id)s erfordert %(vios_req)d virtuelle E/A-Server," -" aber der Datenträger wurde nur auf %(vios_act)d virtuellen E/A-Servern gefunden." - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/es/nova-powervm.po b/nova_powervm/locale/es/nova-powervm.po deleted file mode 100644 index 25f29b6b..00000000 --- a/nova_powervm/locale/es/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=2; plural=n != 1;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "Se esperaba exactamente un solo host; se han encontrado %d" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"La operación de instantánea no recibe soporte junto con el valor " -"CONF.powervm.disk_driver de %s." - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "La conexión de vif ha fallado porque no se ha encontrado la instancia %s." - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "La conexión de vif ha fallado debido a un error inesperado." - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "No se puede reducir el tamaño de disco." - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "No se puede migrar los discos locales." - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"No se ha podido abrir el terminal basado en VNC para la instancia %(instance_name)s: " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"No se puede ubicar el grupo de volúmenes %(vol_grp)s en el que almacenar " -"el soporte óptico virtual. No se puede crear el repositorio de soportes." - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"Tras haber explorado el bus SCSI %(bus)x en la partición de gestión, el disco con " -"UDID %(udid)s no ha aparecido después de los sondeos %(polls)d en %(timeout)d " -"segundos." - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"Se esperaba encontrar un único disco en la partición de gestión en " -"%(path_pattern)s; se han encontrado %(count)d." - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"El dispositivo %(devpath)s todavía está presente en la partición de gestión después " -"de intentar suprimirlo. Se ha sondeado %(polls)d veces durante %(timeout)d " -"segundos." - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"No se ha podido correlacionar el disco de arranque de la instancia %(instance_name)s con la partición " -"de gestión desde ningún servidor de E/S virtual." - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"No se ha encontrado la correlación recién creada del elemento de almacenamiento %(stg_name)s del" -" servidor de E/S virtual %(vios_name)s con la partición de gestión." - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "No se puede ubicar el grupo de volúmenes '%(vg_name)s' para esta operación." - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "No se puede ubicar el clúster '%(clust_name)s' para esta operación." - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "No se puede ubicar ningún clúster para esta operación." - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"Inesperadamente, se han encontrado clústeres %(clust_count)d coincidentes con el nombre " -"'%(clust_name)s'." - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"No se ha especificado cluster_name. Se rechaza seleccionar uno de los clústeres %(clust_count)d " -" encontrados." - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"No se puede asociar el almacenamiento (id: %(volume_id)s) con la máquina virtual " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"No se puede ampliar el volumen (id: %(volume_id)s) en la máquina virtual " -"%(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"No se puede desconectar el volumen (id: %(volume_id)s) de la máquina virtual " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"No se pueden realizar los pasos previos a la migración en directo en el volumen (id: %(volume_id)s) " -"desde la máquina virtual %(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "La interfaz de programación de aplicaciones de PowerVM no se ha podido completar para la instancia=%(inst_name)s.%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"No hay servidores de E/S virtuales disponibles. El controlador ha intentado esperar a que un " -"VIOS pasara estar activo durante %(wait_time)d segundos. El agente de cálculo " -"no se puede iniciar si no hay ningún servidor de E/S virtual disponible. Compruebe " -"la conectividad RMC entre NovaLink de PowerVM y los servidores de E/S virtuales " -"y luego reinicie el agente de cálculo Nova. " - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "No hay servidores de E/S virtuales activos disponibles." - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "No se puede recrear la máquina virtual en el host nuevo. El error es %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"La opción %(then_opt)s es necesaria si %(if_opt)s se especifica como " -"'%(if_value)s'." - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "La migración en vivo de la instancia '%(name)s' ha fallado por la razón: %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"No se puede migrar %(name)s porque no se puede conectar el volumen %(volume)s " -"en el host de destino %(host)s." - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"No se puede migrar %(name)s porque el host %(host)s solo permite %(allowed)s" -" migraciones simultáneas y hay actualmente %(running)s migraciones en ejecución." - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"No se puede migrar la instancia '%(name)s' porque el tamaño de región de memoria del " -"origen (%(source_mrs)d MB) no coincide con el tamaño de región de memoria del " -"destino (%(target_mrs)d MB)." - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"No se puede migrar %(name)s porque su modalidad de compatibilidad del procesador %(mode)s" -" no está en la lista de modalidades \"%(modes)s\" soportadas por el host de destino." - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"La migración en vivo de la instancia '%(name)s' ha fallado porque el estado de migración " -"es: %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"La migración en vivo de la instancia '%(name)s' ha fallado porque no está lista. " -"Razón: %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "El parámetro vif_type debe estar presente para esta implementación de vif_driver." - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"No se ha podido encontrar el controlador de VIF de PowerVM para el tipo de VIF %(vif_type)s " -"en la instancia %(instance)s." - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"No se pueden encontrar puertos Ethernet aceptables en la red física " -"'%(physnet)s' para la instancia %(inst)s para el VIF basado en SRIOV con la dirección MAC " -"%(vif_mac)s." - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "Varias agrupaciones de proceso compartidas con el nombre %(pool)s." - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "No se puede encontrar la agrupación de proceso compartida %(pool)s" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"El atributo de flavor %(attr)s debe ser True o False. El valor actual " -"%(val)s no está permitido." - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "El controlador de disco configurado no admite la migración ni el redimensionamiento." - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "El redimensionamiento de instancias con archivos de copia de seguridad no está soportado actualmente." - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"El host no es miembro del mismo clúster de SSP. El clúster de host de " -"origen: %(source_clust_name)s. El SSP del host de origen: %(source_ssp_name)s." - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"La NVRAM no se ha podido almacenar para la instancia %(instance)s. Razón: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"La NVRAM no se ha podido captar para la instancia %(instance)s. Razón: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"La NVRAM no se ha podido suprimir para la instancia %(instance)s. Razón: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "La opción de configuración '%(option)s' debe establecerse." - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "No se puede almacenar NVRAM después de %d intentos" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "El objeto no existe en Swift." - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "Tipo de conexión no válido de %s" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"No se ha podido encontrar ningún servidor de E/S virtual que aloje la correlación de puerto de NPIV para el " -"servidor." - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"No se ha podido descubrir hdisk válido en ningún servidor de E/S virtual para el volumen " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"Se ha encontrado un error en el descubrimiento del hdisk en el número necesario de servidores de E/S " -"virtuales. El volumen %(volume_id)s necesita %(vios_req)d servidores de E/S virtuales, " -" pero el disco solo se ha encontrado en %(vios_act)d servidores de E/S virtuales." - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/fr/nova-powervm.po b/nova_powervm/locale/fr/nova-powervm.po deleted file mode 100644 index dbbcfd2c..00000000 --- a/nova_powervm/locale/fr/nova-powervm.po +++ /dev/null @@ -1,427 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=2; plural=n>1;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "Précisément un hôte attendu ; trouvé %d" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"Opération d'instantané non prise en charge en association avec " -"un paramètre CONF.powervm.disk_driver de %s." - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "Echec de connexion vif car l'instance %s est introuvable." - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "Echec de connexion vif en raison d'une erreur inattendue." - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "Impossible de réduire la taille du disque." - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "Impossible de migrer des disques locaux." - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"Echec d'ouverture du terminal basé VNC pour l'instance %(instance_name)s : " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"Impossible de localiser le groupe de volumes %(vol_grp)s dans lequel " -"est stocké le support optique virtuel. Impossible de créer " -"le référentiel de supports." - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"Après analyse du bus SCSI %(bus)x sur la partition de gestion, le disque " -"avec l'UDID %(udid)s n'est pas apparu après %(polls)d interrogations en " -"%(timeout)d secondes." - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"Précisément un disque attendu sur la partition de gestion à l'adresse " -"Attendu %(path_pattern)s ; trouvé %(count)d." - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"L'unité %(devpath)s est encore présente sur la partition de gestion après " -"la tentative de suppression. %(polls)d interrogations en %(timeout)d " -"secondes." - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"Echec du mappage du disque d'amorçage de l'instance %(instance_name)s " -"sur la partition de gestion depuis tout serveur Virtual I/O Server." - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"Echec de détection du mappage nouvellement créé de l'élément de stockage" -" %(stg_name)s du serveur VIOS %(vios_name)s vers la partition de gestion." - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "Impossible de localiser le groupe de volumes '%(vg_name)s' pour cette opération." - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "Impossible de localiser la grappe '%(clust_name)s' pour cette opération." - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "Impossible de localiser une grappe pour cette opération." - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"Détection inattendue de %(clust_count)d grappes avec un nom correspondant. " -"'%(clust_name)s'." - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"Aucun cluster_name spécifié. Refus de sélectionner une des %(clust_count)d" -" grappes détectées." - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Impossible de connecter le stockage (ID : %(volume_id)s) à la machine " -"virtuelle %(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"Impossible d'étendre le volume (ID : %(volume_id)s) de la machine " -"virtuelle %(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Impossible de déconnecter le volume (ID : %(volume_id)s) de la machine " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"Impossible d'effectuer la procédure de pré-migration sur le volume " -"(ID : %(volume_id)s) depuis la machine virtuelle %(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "Echec de l'API PowerVM pour l'instance=%(inst_name)s.%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"Aucun serveur VIOS disponible. Le pilote a tenté d'attendre qu'un VIOS" -" soit disponible pendant %(wait_time)d s. L'agent de calcul ne peut pas " -"démarrer si aucun serveur VIOS n'est disponible. Vérifiez la connectivité " -"RMC entre les serveurs PowerVM NovaLink et Virtual I/O Server, puis " -"redémarrez l'aget de calcul Nova. " - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "Aucun serveur Virtual I/O Server actif disponible." - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "Impossible de régénérer la machine virtuelle sur le nouvel hôte. Erreur : %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"L'option %(then_opt)s est obligatoire si %(if_opt)s est spécifié pour " -"'%(if_value)s'." - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "Echec de la migration active de l'instance '%(name)s' ; motif : %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"Impossible de migrer %(name)s car le volume %(volume)s ne peut pas être " -"connecté à l'hôte de destination %(host)s." - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"Impossible de migrer %(name)s car l'hôte %(host)s autorise smt %(allowed)s" -" %(allowed)s migrations simultanées et %(running)s sont déjà en cours." - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"Impossible de migrer l'instance '%(name)s' car la taille de région de " -"mémoire de la source (%(source_mrs)d Mo) ne correspond pas à celle de " -"la cible (%(target_mrs)d Mo)." - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"Impossible de migrer %(name)s car son mode de compatibilité processeur" -" %(mode)s n'est pas dans la liste de modes \"%(modes)s\" pris en charge " -"par l'hôte cible." - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"Echec de la migration active de l'instance '%(name)s' car l'état de " -"la migration est %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"Echec de la migration active de l'instance '%(name)s' car non prête. " -"Motif : %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "Le paramètre vif_type doit être présent pour cette implémentation de vif_driver." - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"Pilote VIF PowerVM approprié introuvable pour le type VIF %(vif_type)s " -"sur l'instance %(instance)s" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"Impossible de trouver des ports Ethernet acceptables sur le réseau " -"physique '%(physnet)s' pour l'instance %(inst)s pour SRIOV basé VIF " -"avec l'adresse MAC %(vif_mac)s." - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "Plusieurs pools de traitement partagé avec le nom %(pool)s." - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "Impossible de trouver le pool de traitement partagé %(pool)s" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"L'attribut de style %(attr)s doit être Vrai ou Faux. La valeur en cours " -"%(val)s n'est pas admise." - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "Le pilote de disque configuré ne prend pas en charge la migration ou le redimensionnement." - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "Le redimensionnement des instances à base de fichiers n'est pas pris en charge actuellement." - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"L'hôte n'est pas membre de la même grappe SSP. Grappe d'hôtes " -"source : %(source_clust_name)s. SSP hôte source : %(source_ssp_name)s." - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Impossible stocker NVRAM pour l'instance %(instance)s. Motif : " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Impossible extraire NVRAM pour l'instance %(instance)s. Motif : " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Impossible supprimer NVRAM pour l'instance %(instance)s. Motif : " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "L'option de configuration '%(option)s' doit être définie." - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "Impossible de stocker la mémoire rémanente après %d tentatives" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "L'objet n'existe pas dans Swift." - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "Type de connexion non valide : %s" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"Impossible de trouver un serveur VIOS hébergeant la mappe de port NPIV " -"pour le serveur." - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"Echec de reconnaissance de hdisk valide sur un serveur Virtual I/O Server " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"Echec de reconnaissance du hdisk sur le nombre requis de serveurs " -"Virtual I/O Server. Volume %(volume_id)s requérant %(vios_req)d serveurs" -" VIOS mais disque détecté seulement sur %(vios_act)d serveurs VIOS." - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/it/nova-powervm.po b/nova_powervm/locale/it/nova-powervm.po deleted file mode 100644 index bb310888..00000000 --- a/nova_powervm/locale/it/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=2; plural=n != 1;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "Previsto un solo host; trovati %d" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"L'operazione di istantanea non è supportata in congiunzione con " -"un'impostazione CONF.powervm.disk_driver di %s." - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "Collegamento vif non riuscito perché l'istanza %s non è stata trovata." - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "Collegamento vif non riuscito a causa di un errore imprevisto." - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "Impossibile ridurre la dimensione del disco." - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "Impossibile migrare i dischi locali." - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"L'apertura del terminale basato su VNC per l'istanza %(instance_name)s non è riuscita: " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"Impossibile individuare il gruppo di volumi %(vol_grp)s per memorizzarvi i supporti ottici" -"virtuali. Impossibile creare il repository di supporti." - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"Avendo effettuato la scansione del bus SCSI %(bus)x sulla partizione di gestione, non è stato possibile rilevare " -"il disco con UDID %(udid)s dopo l'esecuzione di %(polls)d operazioni di polling nell'arco di %(timeout)d secondi." -"per l'istanza del servizio." - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"Era previsto trovare un solo disco sulla partizione di gestione in " -"%(path_pattern)s; trovati %(count)d." - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"Il dispositivo %(devpath)s è ancora presente nella partizione dopo " -"il tentativo di eliminarlo. Operazione di polling eseguita %(polls)d volte nell'arco di %(timeout)d" -"secondi." - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"Impossibile associare il disco di avvio dell'istanza %(instance_name)s " -"alla partizione di gestione da qualsiasi Virtual I/O Server." - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"Impossibile trovare l'associazione appena creata dell'elemento memoria %(stg_name)s" -" dal Virtual I/O Server %(vios_name)s alla partizione di gestione." - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "Impossibile individuare il gruppo di volumi '%(vg_name)s' per questa operazione." - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "Impossibile individuare il cluster '%(clust_name)s' per questa operazione." - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "Impossibile individuare un cluster per questa operazione." - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"Sono stati trovati inaspettatamente %(clust_count)d cluster che corrispondono al nome" -"'%(clust_name)s'." - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"Nessun cluster_name specificato. Rifiutata la selezione di uno dei %(clust_count)d" -" cluster trovati." - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Impossibile collegare la memoria (id: %(volume_id)s) alla macchina virtuale " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"Impossibile estendere il volume (id: %(volume_id)s) sulla macchina virtuale " -"%(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Impossibile scollegare il volume (id: %(volume_id)s) dalla macchina virtuale " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"Impossibile eseguire i passi preliminari della migrazione live sul volume (id: %(volume_id)s) " -"dalla macchina virtuale %(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "Impossibile completare l'API PowerVM per l'istanza=%(inst_name)s.%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"Non è disponibile alcun Virtual I/O Server. Il driver ha provato ad attendere che un" -" VIOS diventasse disponibile per %(wait_time)d secondi. L'agent di calcolo " -"non è in grado di avviarsi, se non sono disponibili VIOS (Virtual I/O Server). Controllare " -"la connettività RMC tra PowerVM NovaLink e i Virtual I/O " -"Server, quindi, riavviare l'agent di calcolo Nova." - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "Non sono disponibili Virtual I/O Server attivi." - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "Impossibile ricreare la macchina virtuale sul nuovo host. L'errore è %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"L'opzione %(then_opt)s è richiesta se %(if_opt)s è specificato come " -"'%(if_value)s'." - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "La migrazione live dell'istanza '%(name)s' non è riuscita per il motivo: %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"Impossibile migrare %(name)s, perché il volume %(volume)s non può essere collegato " -"all'host di destinazione %(host)s." - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"Impossibile migrare %(name)s perché l'host %(host)s consente solo %(allowed)s" -" migrazioni simultanee e attualmente sono in esecuzione %(running)s migrazioni." - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"Impossibile migrare l'istanza '%(name)s' perché la dimensione dell'area di memoria " -"dell'origine (%(source_mrs)d MB) non corrisponde alla dimensione dell'area di memoria della " -"destinazione (%(target_mrs)d MB)." - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"Impossibile migrare %(name)s, perché la sua modalità di compatibilità del processore %(mode)s " -" non è inclusa nell'elenco di modalità \"%(modes)s\" supportate dall'host di destinazione." - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"La migrazione live dell'istanza '%(name)s' non è riuscita perché lo stato della migrazione " -"è: %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"La migrazione live dell'istanza '%(name)s' non è riuscita perché non è pronta. " -"Motivo: %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "il parametro vif_type deve essere presente per questa implementazione di vif_driver" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"Impossibile trovare il driver PowerVM VIF appropriato per il tipo VIF %(vif_type)s " -"sull'istanza %(instance)s" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"Impossibile trovare porte Ethernet accettabili sulla rete fisica " -"'%(physnet)s' per l'istanza %(inst)s, per il VIF basato su SRIOV con indirizzo MAC " -"%(vif_mac)s." - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "Più pool di elaborazione condivisi con nome %(pool)s." - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "Impossibile trovare il pool di elaborazione condiviso %(pool)s" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"L'attributo versione %(attr)s deve essere True o False. Il valore corrente " -"%(val)s non è consentito." - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "Il driver disco configurato non supporta la migrazione o il ridimensionamento." - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "Il ridimensionamento delle istanze con backup su file non è attualmente supportato." - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"L'host non è un membro dello stesso cluster SSP. Il cluster dell'host " -"di origine: %(source_clust_name)s. SSP host di origine: %(source_ssp_name)s." - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Impossibile memorizzare NVRAM per l'istanza %(instance)s. Motivo:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Impossibile recuperare NVRAM per l'istanza %(instance)s. Motivo:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Impossibile eliminare NVRAM per l'istanza %(instance)s. Motivo:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "È necessario impostare l'opzione di configurazione '%(option)s'." - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "Impossibile memorizzare NVRAM dopo %d tentativi" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "L'oggetto non esiste in Swift." - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "Tipo di connessione non valido di %s" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"Impossibile trovare un Virtual I/O Server che ospiti l'associazione porta NPIV per il " -"server." - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"Impossibile rilevare un disco valido su qualsiasi Virtual I/O Server per il volume " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"Impossibile rilevare l'hdisk sul numero richiesto di Virtual I/O " -"Server. Il volume %(volume_id)s richiedeva %(vios_req)d Virtual I/O Server, " -" ma il disco è stato trovato solo su %(vios_act)d Virtual I/O Server." - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/ja/nova-powervm.po b/nova_powervm/locale/ja/nova-powervm.po deleted file mode 100644 index 7cd73079..00000000 --- a/nova_powervm/locale/ja/nova-powervm.po +++ /dev/null @@ -1,423 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=1; plural=0;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "予期されたホストは 1 つのみです。検出されたのは %d 個です" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"このスナップショット操作は %s の CONF.powervm.disk_driver 設定と一緒では" -"サポートされません。" - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "インスタンス %s が見つからなかったため、Plug vif は失敗しました。" - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "予期しないエラーが発生したため、Plug vif は失敗しました。" - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "ディスク・サイズを削減できません。" - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "ローカル・ディスクをマイグレーションできません。" - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"インスタンス %(instance_name)s の VNC ベースの端末を開くことができませんでした: " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"仮想光メディアの保管場所となるボリューム・グループ %(vol_grp)s が " -"が見つかりません。 メディア・リポジトリーを作成できません。" - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"管理区画で SCSI バス %(bus)x がスキャンされました。%(timeout)d 秒にわたる " -"%(polls)d 回のポーリング後、UDID %(udid)s のディスクは検出されませんでした。" - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"%(path_pattern)s の管理区画で 1 つのディスクのみが見つかると予期されて" -"いましたが、%(count)d 個が見つかりました。" - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"デバイス %(devpath)s は、削除の試行後にも依然として管理区画上に存在します。" -" %(timeout)d 秒にわたって %(polls)d 回ポーリングが行われました。" - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"インスタンス %(instance_name)s のブート・ディスクを、どの " -"Virtual I/O Server からも管理区画にマップできませんでした。" - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"Virtual I/O Server %(vios_name)s から管理区画へのストレージ・エレメント " -"%(stg_name)s の新規作成されたマッピングが見つかりませんでした。" - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "この操作用のボリューム・グループ「%(vg_name)s」が見つかりません。" - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "この操作用のクラスター「%(clust_name)s」が見つかりません。" - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "この操作用のクラスターが見つかりません。" - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"名前「%(clust_name)s」に合致するクラスターが予期せず " -"%(clust_count)d 個見つかりました。" - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"cluster_name が指定されていません。 見つかった %(clust_count)d 個の" -"クラスターのうち 1 つを選択することを拒否しています。" - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"ストレージ (id: %(volume_id)s) を仮想マシン %(instance_name)s に" -"接続できません。%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"仮想マシン %(instance_name)s 上でボリューム (id: %(volume_id)s) を" -"拡張できません。" - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"ボリューム (id: %(volume_id)s) を仮想マシン %(instance_name)s から" -"切り離すことができません。%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"ボリューム (id: %(volume_id)s) で仮想マシン %(instance_name)s から" -"ライブ・マイグレーション前手順を実行できません。" - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "PowerVM API はインスタンス %(inst_name)s について完了しませんでした。%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"使用可能な Virtual I/O Server がありません。 ドライバーは、VIOS がアクティブに" -"なるまで %(wait_time)d 秒間待機しようとしました。 使用可能な Virtual I/O " -"Server がない場合、計算エージェントは開始できません。 PowerVM NovaLink と " -"Virtual I/O Server の間の RMC 接続を調べて、Nova 計算エージェントを再始動して" -"ください。" - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "使用可能なアクティブ Virtual I/O Server がありません。" - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "新規ホスト上で仮想マシンを再構築できません。 エラーは %(error)s です" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"%(if_opt)s が「%(if_value)s」と指定されている場合、%(then_opt)s オプションが必要です。" - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "インスタンス「%(name)s」のライブ・マイグレーションが次の理由で失敗しました: %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"宛先ホスト %(host)s 上でボリューム %(volume)s を接続できないため、" -"%(name)s をマイグレーションできません。" - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"ホスト %(host)s で許可されている並行マイグレーションは " -"%(allowed)s 個であり、現在実行されているマイグレーションは " -"%(running)s 個であるため、%(name)s をマイグレーションできません。" - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"ソースのメモリー領域サイズ (%(source_mrs)d MB) がターゲットのメモリー" -"領域サイズ (%(target_mrs)d MB) と一致しないため、インスタンス" -"「%(name)s」をマイグレーションできません。" - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"ターゲット・ホストでサポートされるモードのリスト「%(modes)s」に" -"プロセッサー互換モード %(mode)s がないため、%(name)s を" -"マイグレーションできません。" - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"マイグレーション状態が次の状態であったため、インスタンス「%(name)s」の" -"ライブ・マイグレーションが失敗しました: %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"準備ができていないため、インスタンス「%(name)s」のライブ・マイグレーションが失敗しました。 " -"理由: %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "この vif_driver 実装には vif_type パラメーターを指定する必要があります。" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"インスタンス %(instance)s 上で VIF タイプ %(vif_type)s に対して適切な " -"PowerVM VIF ドライバーが見つかりません" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"SRIOV ベースの VIF (MAC アドレス %(vif_mac)s) のインスタンス %(inst)s について" -"物理ネットワーク「%(physnet)s」上に受け入れ可能なイーサネット・ポートが見つかりません。" - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "%(pool)s という名前の共用処理プールが複数あります。" - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "共用処理プール %(pool)s が見つかりません" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"フレーバー属性 %(attr)s は True または False でなければなりません。 現行値 " -"%(val)s は許可されていません。" - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "この構成済みディスク・ドライバーはマイグレーションもサイズ変更もサポートしていません。" - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "ファイル支援のインスタンスのサイズ変更は、現在サポートされていません。" - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"このホストは同じ SSP クラスターのメンバーではありません。 ソース・ホスト・" -"クラスター: %(source_clust_name)s。 ソース・ホスト SSP: %(source_ssp_name)s。" - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"インスタンス %(instance)s について NVRAM を格納できませんでした。 理由:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"インスタンス %(instance)s について NVRAM を取り出すことができませんでした。 理由:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"インスタンス %(instance)s について NVRAM を削除できませんでした。 理由:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "構成オプション「%(option)s」を設定する必要があります。" - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "%d 回試みましたが NVRAM を保管できません。" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "オブジェクトが Swift に存在しません。" - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "%s の接続タイプが無効です" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"Virtual I/O Server (このサーバー自体の NPIV ポート・マップをホストするもの) が" -"見つかりません。" - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"Virtual I/O Server 上でボリュームに対して有効な hdisk をディスカバーできませんでした " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"必要な数の Virtual I/O Server 上で hdisk を検出できませんでした。ボリューム " -"%(volume_id)s には %(vios_req)d 個の Virtual I/O Server が必要でしたが、" -"ディスクは %(vios_act)d 個の Virtual I/O Server 上でのみ検出されました。" - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/ko/nova-powervm.po b/nova_powervm/locale/ko/nova-powervm.po deleted file mode 100644 index e4bd7b74..00000000 --- a/nova_powervm/locale/ko/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=1; plural=0;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "정확히 하나의 호스트를 예상했지만 %d개를 찾았습니다." - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"스냅샷 조작은 CONF.powervm.disk_driver 설정이 " -"%s인 경우에는 지원되지 않습니다. " - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "%s 인스턴스를 찾을 수 없으므로 vif 플러그에 실패했습니다." - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "예기치 않은 오류 때문에 vif 플러그에 실패했습니다." - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "디스크 크기를 줄일 수 없습니다." - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "로컬 디스크를 마이그레이션할 수 없습니다." - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"인스턴스 %(instance_name)s에 대한 VNC 기반 터미널을 열지 못함: " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"가상 광학 매체가 저장될 볼륨 그룹 %(vol_grp)s을(를) " -"찾을 수 없습니다. 매체 저장소를 작성할 수 없습니다." - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"관리 파티션에서 SCSI 버스 %(bus)x을(를) 스캔한 경우, " -"UDID %(udid)s의 디스크가 %(timeout)d초 동안 %(polls)d번 폴링한 이후 발견되지 " -"않습니다." - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"%(path_pattern)s에서 관리 파티션의 디스크를 정확히 하나를 " -"찾을 것으로 예상했지만, %(count)d개를 찾았습니다. " - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"삭제를 시도한 후에 장치 %(devpath)s이(가) 아직 관리 파티션에 " -"존재합니다. %(timeout)d초 동안 %(polls)d번 " -"폴링했습니다." - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"인스턴스 %(instance_name)s의 부트 디스크를 " -"Virtual I/O Server의 관리 파티션에 맵핑할 수 없습니다. " - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"Virtual I/O Server %(vios_name)s에서 관리 파티션으로 " -" 스토리지 요소 %(stg_name)s의 새로 작성된 맵핑을 찾을 수 없습니다. " - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "이 조작의 볼륨 그룹 '%(vg_name)s'을(를) 찾을 수 없습니다. " - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "이 조작의 클러스터 '%(clust_name)s'을(를) 찾을 수 없습니다. " - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "이 조작의 클러스터를 찾을 수 없습니다." - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"다음 이름과 일치하는 %(clust_count)d개의 클러스터를 예상치 않게 찾았습니다." -"'%(clust_name)s'." - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"cluster_name이 지정되지 않습니다. 발견된 %(clust_count)d개의 " -" 클러스터 중 하나를 선택할 것을 거부 중입니다. " - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"스토리지(id: %(volume_id)s)를 가상 머신에 연결할 수 없습니다. " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"가상 머신에서 볼륨(id: %(volume_id)s)을 확장할 수 없습니다. " -"%(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"가상 머신에서 볼륨(id: %(volume_id)s)의 연결을 끊을 수 없습니다. " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"볼륨(id: %(volume_id)s)의 이전 실시간 마이그레이션 단계를 " -"%(instance_name)s에서 수행할 수 없습니다. " - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "PowerVM API: instance=%(inst_name)s에 대해 완료에 실패했습니다. 이유: %(reason)s " - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"Virtual I/O Server를 사용할 수 없습니다. 드라이버가 VIOS의 활성화 시점까지 " -" %(wait_time)d초 동안 대기하려고 시도했습니다. Virtual I/O Server를 " -"사용할 수 없으면 계산 에이전트를 시작할 수 없습니다. PowerVM NovaLink 및 " -"Virtual I/O Server 간의 RMC 연결을 확인한 후" -"Nova 계산 에이전트를 다시 시작하십시오." - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "활성 Virtual I/O Server가 사용 가능하지 않습니다. " - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "새 호스트에서 가상 머신을 다시 빌드할 수 없습니다. 오류: %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"%(if_opt)s이(가) 지정된 경우 %(then_opt)s 옵션이 필요합니다." -"'%(if_value)s'." - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "인스턴스 '%(name)s'의 실시간 마이그레이션에 실패했습니다. 이유: %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"볼륨 %(volume)s을(를) 대상 호스트 %(host)s에 연결할 수 없으므로 %(name)s을(를) " -"마이그레이션할 수 없습니다." - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"호스트 %(host)s이(가) %(allowed)s 동시 마이그레이션만 허용하고 " -" %(running)s 마이그레이션이 현재 실행 중이므로 %(name)s을(를) 마이그레이션할 수 없습니다." - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"소스의 메모리 영역 크기(%(source_mrs)d MB)가 대상의 " -"메모리 영역 크기(%(target_mrs)d MB)와 일치하지 않으므로 " -"'%(name)s' 인스턴스를 마이그레이션할 수 없습니다." - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"해당 프로세서 호환성 모드 %(mode)s이(가) 대상 호스트에서 지원하는 모드 \"%(modes)s\"의 목록에 없으므로 " -" %(name)s을(를) 마이그레이션할 수 없습니다." - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"마이그레이션 상태가 %(state)s이므로 인스턴스 '%(name)s'의 실시간 마이그레이션에 " -"실패했습니다." - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"인스턴스 '%(name)s'의 실시간 마이그레이션이 준비되지 않았으므로 실패했습니다. " -"이유: %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "이 vif_driver 구현을 위해 vif_type 매개변수가 존재해야 함" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"VIF 유형 %(vif_type)s에 해당하는 PowerVM VIF 드라이버를 " -"%(instance)s 인스턴스에서 찾을 수 없음" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"MAC 주소가 사용된 SRIOV 기반 VIF의 인스턴스 %(inst)s에 대해 " -"물리적 네트워크 '%(physnet)s'에서 허용되는 이더넷 포트를 찾을 수 없음" -"%(vif_mac)s." - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "이름이 %(pool)s인 다중 공유 처리 풀" - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "공유 처리 풀 %(pool)s을(를) 찾을 수 없음" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"플레이버 속성 %(attr)s은(는) true 또는 false여야 합니다. 현재 값 " -"%(val)s은(는) 허용되지 않습니다." - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "구성된 디스크 드라이버에서 마이그레이션 또는 크기 조정을 지원하지 않습니다." - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "파일 지원 인스턴스 크기 조정이 현재 지원되지 않습니다." - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"호스트가 동일한 SSP 클러스터의 멤버가 아닙니다. 소스 호스트 " -"클러스터: %(source_clust_name)s. 소스 호스트 SSP: %(source_ssp_name)s." - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"인스턴스 %(instance)s에 대해 NVRAM을 저장할 수 없습니다. 이유: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"인스턴스 %(instance)s에 대해 NVRAM을 페치할 수 없습니다. 이유: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"인스턴스 %(instance)s에 대해 NVRAM을 삭제할 수 없습니다. 이유: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "구성 옵션 '%(option)s'을(를) 설정해야 합니다." - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "%d번의 시도 후에는 NVRAM을 저장할 수 없음" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "Swift에 오브젝트가 없습니다." - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "%s의 올바르지 않은 연결 유형" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"서버에 대한 NPIV 포트 맵을 호스트하는 Virtual I/O Server를 찾을 수 " -"없습니다." - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"볼륨 %(volume_id)s에 대한 Virtual I/O Server에서 올바른 hdisk를 검색하는 데 " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"필수 개수의 Virtual I/O Server에서 hdisk를 검색하지 못했습니다. " -"없었습니다. 볼륨 %(volume_id)s에서 %(vios_req)d Virtual I/O Server가 필요하지만, " -" 디스크는 %(vios_act)d개의 Virtual I/O Server에서만 검색되었습니다." - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/nova-powervm.pot b/nova_powervm/locale/nova-powervm.pot deleted file mode 100644 index 982965c3..00000000 --- a/nova_powervm/locale/nova-powervm.pot +++ /dev/null @@ -1,348 +0,0 @@ -# Translations template for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" -"Language-Team: LANGUAGE \n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "" - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "" - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "" - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "" - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "" - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "" - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "" - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "" - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "" - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" - diff --git a/nova_powervm/locale/pt-BR/nova-powervm.po b/nova_powervm/locale/pt-BR/nova-powervm.po deleted file mode 100644 index 62471aa3..00000000 --- a/nova_powervm/locale/pt-BR/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=2; plural=n>1;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "Esperado exatamente um host; localizados %d" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"A operação de captura instantânea não é suportada em conjunto com uma " -"configuração CONF.powervm.disk_driver de %s." - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "Plugue vif falhou porque a instância %s não foi localizada." - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "Plugue vif falhou devido a erro inesperado." - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "Impossível reduzir o tamanho do disco." - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "Não é possível migrar discos locais." - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"O VNC baseado em terminal para a instância %(instance_name)s falhou ao abrir: " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"Não é possível localizar o grupo de volumes %(vol_grp)s no qual armazenar a mídia " -"virtual ótica. Impossível criar o repositório de mídia." - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"Tendo barramento SCSI digitalizado %(bus)x na partição de gerenciamento, disco com " -"UDID%(udid)s falhou em aparecer após as pesquisas %(polls)d em %(timeout)d " -"segundos." - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"Esperava localizar exatamente um disco na partição de gerenciamento no " -"%(path_pattern)s; localizado%(count)d." - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"O dispositivo %(devpath)s ainda está presente na partição de gerenciamento após " -"tentar excluí-lo. Pesquisado %(polls)d vezes em %(timeout)d " -"segundos." - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"Falha em mapear o disco de inicialização da instância %(instance_name)s para a partição de " -"gerenciamento de qualquer Virtual I/O Server." - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"Falha ao localizar o mapeamento recém-criado do elemento de armazenamento %(stg_name)s do" -" Virtual I/O Server %(vios_name)s para a partição de gerenciamento." - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "Não é possível localizar o grupo de volumes '%(vg_name)s' para esta operação." - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "Não é possível localizar o Cluster '%(clust_name)s' para esta operação." - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "Não é possível localizar um cluster para esta operação." - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"Clusters com nomes correspondentes %(clust_count)d localizados inesperadamente " -"'%(clust_name)s'." - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"Nenhum cluster_name especificado. Recusando-se selecionar um dos %(clust_count)d" -" localizados." - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Não é possível conectar o armazenamento (ID: %(volume_id)s) à máquina virtual " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"Não é possível estender o volume (id:%(volume_id)s) na máquina virtual " -"%(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Não é possível remover o volume (ID: %(volume_id)s) da máquina virtual " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"Não é possível executar as etapas de pré-migração em tempo real no volume (id:%(volume_id)s) " -"a partir da máquina virtual %(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "A API do PowerVM falhou em concluir instance=%(inst_name)s.%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"Nenhum Virtual I/O Server está disponível. O driver tentou aguardar um" -" VIOS (Virtual I/O Server) se tornar ativo por %(wait_time)d segundos. O agente de cálculo " -"não é capaz de iniciar se nenhum Virtual I/O Server está disponível. Verifique " -"a conectividade do RMC entre o PowerVM NovaLink e o Virtual I/O " -"Server e, em seguida, reinicie o Nova Compute Agent." - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "Não há nenhum Virtual I/O Server ativo disponível." - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "Não é possível reconstruir a máquina virtual no novo host. O erro é %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"A opção %(then_opt)s será necessária se %(if_opt)s for especificado como " -"'%(if_value)s'." - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "Migração em tempo real da instância %(name)s' falhou devido a: %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"Impossível migrar %(name)s porque o volume %(volume)s não pode ser anexado " -"no host de destino %(host)s." - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"Impossível migrar %(name)s porque o host %(host)s somente permite %(allowed)s" -" migrações simultâneas e %(running)s migrações está em execução no momento." - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"Impossível migrar instância '%(name)s' porque o tamanho da região de memória da " -"origem (%(source_mrs)d MB) não corresponde ao tamanho da região de memória do " -"destino (%(target_mrs)d MB)." - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"Impossível migrar %(name)s porque seu modo de capacidade de processador %(mode)s" -" não está na lista de modos \"%(modes)s\" suportados pelo host de destino." - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"Migração em tempo real da instância '%(name)s' falhou porque o estado da migração " -"é: %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"A migração em tempo real da instância '%(name)s' falhou porque ela não está pronta. " -"Motivo: %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "o parâmetro vif_type deve estar presente para esta implementação de vif_driver" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"Não é possível localizar o driver da VIF do PowerVM apropriado para o tipo de VIF %(vif_type)s " -"na instância %(instance)s" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"Não é possível localizar portas Ethernet aceitáveis na rede física " -"'%(physnet)s' para a instância %(inst)s para o VIF baseado em SRIOV com endereço MAC " -"%(vif_mac)s." - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "Vários conjuntos de processo compartilhados com o nome %(pool)s." - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "Impossível localizar o conjunto de processamento compartilhado %(pool)s" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"O atributo flavor %(attr)s deve ser True ou False. O valor atual " -"%(val)s não é permitido." - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "O driver do disco configurado não suporta migração ou redimensionamento." - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "O redimensionamento das instâncias suportadas por arquivo não é suportado atualmente." - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"O host não é um membro do mesmo cluster do SSP. A máquina do host de origem " -"cluster: %(source_clust_name)s. O SSP do host de origem: %(source_ssp_name)s." - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"O NVRAM não pôde ser armazenado para a instância %(instance)s. Razão: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"O NVRAM não pôde ser buscado para a instância %(instance)s. Razão: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"O NVRAM não pôde ser excluído para a instância %(instance)s. Razão: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "A opção de configuração '%(option)s' deve ser configurada." - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "Não é possível armazenar a NVRAM (memória de acesso aleatório não volátil) após %d tentativas" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "O objeto não existe no Swift." - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "Tipo de conexão inválida de %s" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"Não é possível localizar um Virtual I/O Server que hospede o mapa de porta NPIV para o " -"servidor rabbitmq." - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"Falha em descobrir hdisk válido em qualquer Virtual I/O Server para o volume " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"Falha ao descobrir o hdisk no número necessário de Virtual I/O " -"Server. O volume %(volume_id)s requeria %(vios_req)d Virtual I/O Servers," -" mas o disco somente foi localizado em %(vios_act)d Virtual I/O Servers." - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/ru/nova-powervm.po b/nova_powervm/locale/ru/nova-powervm.po deleted file mode 100644 index b2ac7cf9..00000000 --- a/nova_powervm/locale/ru/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "Ожидался только один хост; обнаружено: %d" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"Операция моментальной копии не поддерживается, если для параметра " -"CONF.powervm.disk_driver указано значение %s." - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "Подключение vif не выполнено, поскольку экземпляр %s не найден." - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "Подключение vif не выполнено вследствие непредвиденной ошибки." - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "Невозможно уменьшить размер диска." - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "Невозможно выполнить миграцию локальных дисков." - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"Не удалось открыть терминал VNC для экземпляра %(instance_name)s: " -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"Не удалось найти группу томов %(vol_grp)s для размещения виртуального " -"оптического носителя. Не удалось создать хранилище носителей." - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"После сканирования шины SCSI %(bus)x в разделе управления " -"не удалось отобразить диск с UDID %(udid)s после %(polls)d опросов за %(timeout)d " -"секундах." - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"Ожидался ровно один диск в разделе управления " -"%(path_pattern)s; обнаружено %(count)d." - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"Устройство %(devpath)s по-прежнему присутствует в разделе управления после " -"попытки удалить его. Опрошено %(polls)d раз за %(timeout)d " -"сек. " - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"Не удалось подключить загрузочный диск экземпляра %(instance_name)s к разделу " -"управления ни через один сервер VIOS." - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"Не найдена только что созданная связь элемента системы хранения %(stg_name)s" -" из сервера VIOS %(vios_name)s с разделом управления." - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "Не найдена группа томов '%(vg_name)s', необходимая для этой операции." - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "Не найден кластер '%(clust_name)s', необходимый для этой операции." - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "Не найден ни один кластер для выполнения этой операции." - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"Неожиданно обнаружено %(clust_count)d кластеров с именем " -"'%(clust_name)s'." - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"Не задано значение cluster_name. Невозможно выбрать ни один из %(clust_count)d" -" найденных кластеров." - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Не удалось подключить устройство хранения (ИД: %(volume_id)s) к виртуальной машине " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"Не удалось расширить том (ИД: %(volume_id)s) в виртуальной машине " -"%(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"Не удалось отключить том (ИД: %(volume_id)s) от виртуальной машины " -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"Не удалось выполнить предварительные шаги оперативной миграции для тома (ИД: %(volume_id)s) " -"в виртуальной машине %(instance_name)s." - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "Сбой API PowerVM для экземпляра %(inst_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"Не доступен ни один сервер VIOS. Драйвер ждал, пока какой-либо" -" VIOS станет активным, в течение %(wait_time)d с. Вычислительный агент " -"нельзя запустить, если нет активных серверов VIOS. Проверьте " -"соединение между PowerVM NovaLink и VIOS, " -"затем перезапустите вычислительный агент Nova." - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "Нет ни одного активного сервера VIOS." - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "Невозможно заново скомпоновать виртуальную машину на новом хосте. Ошибка: %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"Должен быть указан параметр %(then_opt)s, если в параметре %(if_opt)s указано " -"'%(if_value)s'." - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "Сбой оперативной миграции экземпляра '%(name)s', причина: %(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"Невозможно выполнить миграцию %(name)s, поскольку том %(volume)s нельзя подключить " -"к целевому хосту %(host)s." - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"Невозможно выполнить миграцию %(name)s, так как хост %(host)s допускает не более %(allowed)s" -" параллельных операций миграции, а в данный момент выполняется %(running)s миграций." - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"Невозможно выполнить миграцию экземпляра '%(name)s', поскольку размер исходной области памяти " -"(%(source_mrs)d МБ) не совпадает с размером целевой области памяти " -"(%(target_mrs)d МБ)." - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"Невозможно выполнить миграцию %(name)s, поскольку режим совместимости процессора %(mode)s" -" отсутствует в списке поддерживаемых режимов \"%(modes)s\" целевого хоста." - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"Сбой оперативной миграции экземпляра '%(name)s', поскольку миграция находится в следующем " -"состоянии: %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"Сбой оперативной миграции экземпляра '%(name)s', поскольку подготовка не выполнена. " -"Причина: %(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "Параметр vif_type должен присутствовать для этой реализации vif_driver" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"Не найден соответствующий драйвер VIF PowerVM для типа VIF %(vif_type)s " -"в экземпляре %(instance)s" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"В физической сети '%(physnet)s' не найдены подходящие порты Ethernet " -"для экземпляра %(inst)s для VIF на основе SRIOV с MAC-адресом " -"%(vif_mac)s." - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "Несколько общих пулов процессоров с именем %(pool)s." - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "Не удалось найти общий пул процессоров %(pool)s" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"Атрибут Flavor %(attr)s должен иметь значение True или False. Текущее значение " -"%(val)s недопустимо." - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "Настроенный драйвер диска не поддерживает миграцию или изменение размера." - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "Изменение экземпляров на основе файлов пока не поддерживается." - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"Хост не является элементом того же кластера SSP. Кластер исходного хоста: " -"%(source_clust_name)s. SSP исходного хоста: %(source_ssp_name)s." - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Не удалось сохранить NVRAM для экземпляра %(instance)s. Причина: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Не удалось получить NVRAM для экземпляра %(instance)s. Причина: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"Невозможно удалить NVRAM для экземпляра %(instance)s. Причина: " -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "Должен быть задан параметр конфигурации '%(option)s'." - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "Не удалось сохранить NVRAM за %d попыток" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "Объект не существует в Swift." - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "Недопустимый тип соединения %s" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"Не найден VIOS с картой портов NPIV для " -"сервера." - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"Не удалось найти допустимый жесткий диск на серверах виртуального ввода-вывода для тома " -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"Не найден жесткий диск на требуемом числе VIOS. " -"VIOS. Для тома %(volume_id)s требуется %(vios_req)d VIOS, " -" однако диск найден только на %(vios_act)d VIOS." - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/zh-Hans/nova-powervm.po b/nova_powervm/locale/zh-Hans/nova-powervm.po deleted file mode 100644 index cd2b6ea0..00000000 --- a/nova_powervm/locale/zh-Hans/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=1; plural=0;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "期望刚好找到一个主机;但是找到 %d 个主机" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"当 CONF.powervm.disk_driver 设置为" -"%s 时,不支持快照操作。" - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "插入 VIF 失败,因为找不到实例 %s。" - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "插入 VIF 失败,因为发生了意外错误。" - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "无法减小磁盘大小。" - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "无法迁移本地磁盘。" - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"对于 %(instance_name)s 实例,未能打开基于 VNC 的终端:" -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"找不到用于存储虚拟光学介质的卷组 %(vol_grp)s。" -"无法创建介质存储库。" - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"已扫描管理分区上的 SCSI 总线 %(bus)x," -"在 %(timeout)d 秒内进行 %(polls)d 次轮询后,UDID 为 %(udid)s 的磁盘未能" -"显示。" - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"期望以下位置的管理分区中找到正好一个磁盘:" -"%(path_pattern)s;但发现 %(count)d 个磁盘。" - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"尝试删除设备 %(devpath)s 之后," -"该设备仍然存在管理分区上。已轮询 %(polls)d 次,耗时 %(timeout)d" -"秒。" - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"无法将实例 %(instance_name)s 的引导磁盘映射至任何" -"Virtual I/O Server 中的管理分区。" - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"找不到存储元素 %(stg_name)s 的新建映射" -" (从 Virtual I/O Server %(vios_name)s 映射到管理分区)。" - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "找不到对应此操作的卷组“%(vg_name)s”。" - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "找不到对应此操作的集群“%(clust_name)s”。" - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "找不到对应此操作的任何集群。" - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"意外找到 %(clust_count)d 个与名称" -"“%(clust_name)s”匹配的集群。" - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"未指定 cluster_name。拒绝选择所发现的 %(clust_count)d 个" -" 集群中的一个。" - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"无法将存储器(标识:%(volume_id)s)连接至虚拟机" -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"无法扩展虚拟机 %(instance_name)s 上的" -"卷(标识:%(volume_id)s)。" - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"无法将卷(标识:%(volume_id)s)从虚拟机" -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"无法从虚拟机 %(instance_name)s 对卷(标识:%(volume_id)s)" -"执行预先实时迁移步骤。" - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "未能对实例 %(inst_name)s 完成 PowerVM API。原因:%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"没有可用的 Virtual I/O Server。驱动程序已尝试等待" -" %(wait_time)d 秒以使 VIOS 变为活动状态。没有可用的" -"Virtual I/O Server 时,计算代理程序无法启动。请检查" -"PowerVM NovaLink 与 Virtual I/O Server 之间的 RMC 连接," -"然后重新启动 Nova 计算代理程序。" - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "没有活动可用 Virtual I/O Server。" - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "无法在新主机上重建虚拟机。错误为 %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"%(then_opt)s 选项为必需(如果 %(if_opt)s 指定为" -"“%(if_value)s”。" - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "实时迁移实例“%(name)s”失败,原因:%(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"无法迁移 %(name)s,因为在目标主机 %(host)s 上" -"无法连接卷 %(volume)s。" - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"无法迁移 %(name)s,因为主机 %(host)s 只允许 %(allowed)s 个" -" 个并行迁移,但是有 %(running)s 个迁移当前正在运行。" - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"无法迁移实例“%(name)s”," -"因为源的内存区域大小 (%(source_mrs)d MB)" -"与目标的内存区域大小 (%(target_mrs)d MB) 不匹配。" - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"无法迁移 %(name)s,因为它的处理器兼容性方式 %(mode)s" -" 不在目标主机所支持的方式列表“%(modes)s”中。" - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"实时迁移实例“%(name)s”失败," -"因为迁移状态为 %(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"实时迁移实例“%(name)s”失败,因为它未就绪。" -"原因:%(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "对于此 vif_driver 实现,必须存在 vif_type 参数" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"在下列实例上,找不到 VIF 类型 %(vif_type)s 的相应 PowerVM VIF 驱动程序:" -"%(instance)s" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"对于具有 MAC 地址 %(vif_mac)s 的基于 SRIOV 的 VIF 的" -"实例 %(inst)s,在物理网络“%(physnet)s”上找不到可接受的" -"以太网端口。" - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "存在多个名称为 %(pool)s 的共享处理池。" - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "找不到共享处理池 %(pool)s" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"flavor 属性 %(attr)s 必须为 True 或 False。" -"不允许使用当前值 %(val)s。" - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "所配置的磁盘驱动程序不支持迁移或调整大小。" - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "当前不支持调整文件备份实例的大小。" - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"该主机不是同一 SSP 集群的成员。源主机" -"集群:%(source_clust_name)s。源主机 SSP:%(source_ssp_name)s。" - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"无法存储实例 %(instance)s 的 NVRAM。原因:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"无法访存实例 %(instance)s 的 NVRAM。原因:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"无法删除实例 %(instance)s 的 NVRAM。原因:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "必须设置配置选项“%(option)s”。" - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "尝试 %d 次之后仍然无法存储 NVRAM" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "Swift 中没有对象。" - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "%s 的连接类型无效" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"找不到用来管理服务器的 NPIV 端口映射的" -"Virtual I/O Server。" - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"未能在任何 Virtual I/O Server 上发现卷的有效 hdisk" -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"未能在所需数量的 Virtual I/O Server 上发现" -"hdisk。卷 %(volume_id)s 需要 %(vios_req)d 个 Virtual I/O Server," -" 但仅在 %(vios_act)d 个 Virtual I/O Server 上找到磁盘。" - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/locale/zh-Hant/nova-powervm.po b/nova_powervm/locale/zh-Hant/nova-powervm.po deleted file mode 100644 index aa4a123b..00000000 --- a/nova_powervm/locale/zh-Hant/nova-powervm.po +++ /dev/null @@ -1,425 +0,0 @@ -# English translations for nova_powervm. -# Copyright (C) 2018 ORGANIZATION -# This file is distributed under the same license as the nova_powervm -# project. -# FIRST AUTHOR , 2018. -# -msgid "" -msgstr "" -"Project-Id-Version: nova_powervm 6.0.0\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2018-03-19 18:06-0400\n" -"PO-Revision-Date: 2018-03-19 18:07-0400\n" -"Last-Translator: FULL NAME \n" -"Language: en\n" -"Language-Team: en \n" -"Plural-Forms: nplurals=1; plural=0;" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.5.3\n" - -#: nova_powervm/virt/powervm/driver.py:216 -#, python-format -msgid "Expected exactly one host; found %d" -msgstr "預期只有一個主機;但找到 %d 個" - -#: nova_powervm/virt/powervm/driver.py:821 -#, python-format -msgid "" -"The snapshot operation is not supported in conjunction with a " -"CONF.powervm.disk_driver setting of %s." -msgstr "" -"當 CONF.powervm.disk_driver 設定為" -"%s 時,不支援 Snapshot 作業。" - -#: nova_powervm/virt/powervm/driver.py:1023 -#, python-format -msgid "Plug vif failed because instance %s was not found." -msgstr "插入 VIF 失敗,因為找不到實例 %s。" - -#: nova_powervm/virt/powervm/driver.py:1028 -msgid "Plug vif failed because of an unexpected error." -msgstr "插入 VIF 失敗,因為發生了非預期的錯誤。" - -#: nova_powervm/virt/powervm/driver.py:1118 -msgid "Cannot reduce disk size." -msgstr "無法減少磁碟大小。" - -#: nova_powervm/virt/powervm/driver.py:1132 -#: nova_powervm/virt/powervm/driver.py:1240 -msgid "Cannot migrate local disks." -msgstr "無法移轉本端磁碟。" - -#: nova_powervm/virt/powervm/driver.py:1757 -#, python-format -msgid "" -"VNC based terminal for instance %(instance_name)s failed to open: " -"%(exc_msg)s" -msgstr "" -"針對 %(instance_name)s 實例,未能開啟 VNC 型終端機:" -"%(exc_msg)s" - -#: nova_powervm/virt/powervm/exception.py:38 -#, python-format -msgid "" -"Unable to locate the volume group %(vol_grp)s to store the virtual " -"optical media within. Unable to create the media repository." -msgstr "" -"找不到在其中儲存虛擬光學媒體的磁區群組 %(vol_grp)s。" -"無法建立媒體儲存庫。" - -#: nova_powervm/virt/powervm/exception.py:45 -#, python-format -msgid "" -"Having scanned SCSI bus %(bus)x on the management partition, disk with " -"UDID %(udid)s failed to appear after %(polls)d polls over %(timeout)d " -"seconds." -msgstr "" -"在管理分割區上掃描 SCSI 匯流排 %(bus)x 時," -"UDID 為 %(udid)s 的磁碟未在 %(timeout)d 秒內的 %(polls)d 次輪詢之後" -"出現。" - -#: nova_powervm/virt/powervm/exception.py:52 -#, python-format -msgid "" -"Expected to find exactly one disk on the management partition at " -"%(path_pattern)s; found %(count)d." -msgstr "" -"預期在 %(path_pattern)s 處的管理分割區上只找到" -"一個磁碟;但卻找到 %(count)d 個。" - -#: nova_powervm/virt/powervm/exception.py:58 -#, python-format -msgid "" -"Device %(devpath)s is still present on the management partition after " -"attempting to delete it. Polled %(polls)d times over %(timeout)d " -"seconds." -msgstr "" -"在嘗試刪除裝置 %(devpath)s 之後,該裝置仍" -"呈現在管理分割區上。已輪詢 %(polls)d 次,歷時 %(timeout)d" -"秒。" - -#: nova_powervm/virt/powervm/exception.py:64 -#, python-format -msgid "" -"Failed to map boot disk of instance %(instance_name)s to the management " -"partition from any Virtual I/O Server." -msgstr "" -"無法透過任何 Virtual I/O Server 將實例 %(instance_name)s 的開機磁碟" -"對映至管理分割區。" - -#: nova_powervm/virt/powervm/exception.py:70 -#, python-format -msgid "" -"Failed to find newly-created mapping of storage element %(stg_name)s from" -" Virtual I/O Server %(vios_name)s to the management partition." -msgstr "" -"找不到儲存體元素 %(stg_name)s 的新建對映" -" (從 Virtual I/O Server %(vios_name)s 對映至管理分割區)。" - -#: nova_powervm/virt/powervm/exception.py:76 -#, python-format -msgid "Unable to locate the volume group '%(vg_name)s' for this operation." -msgstr "找不到用於這項作業的磁區群組 '%(vg_name)s'。" - -#: nova_powervm/virt/powervm/exception.py:81 -#, python-format -msgid "Unable to locate the Cluster '%(clust_name)s' for this operation." -msgstr "找不到用於這項作業的叢集 '%(clust_name)s'。" - -#: nova_powervm/virt/powervm/exception.py:86 -msgid "Unable to locate any Cluster for this operation." -msgstr "找不到用於這項作業的任何叢集。" - -#: nova_powervm/virt/powervm/exception.py:90 -#, python-format -msgid "" -"Unexpectedly found %(clust_count)d Clusters matching name " -"'%(clust_name)s'." -msgstr "" -"非預期地找到 %(clust_count)d 個符合名稱" -"'%(clust_name)s' 的叢集。" - -#: nova_powervm/virt/powervm/exception.py:95 -#, python-format -msgid "" -"No cluster_name specified. Refusing to select one of the %(clust_count)d" -" Clusters found." -msgstr "" -"未指定 cluster_name。將拒絕選取所找到的 %(clust_count)d " -" 個叢集中的一個。" - -#: nova_powervm/virt/powervm/exception.py:100 -#, python-format -msgid "" -"Unable to attach storage (id: %(volume_id)s) to virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"無法將儲存體(ID:%(volume_id)s)連接至虛擬機器" -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:105 -#, python-format -msgid "" -"Unable to extend volume (id: %(volume_id)s) on virtual machine " -"%(instance_name)s." -msgstr "" -"無法延伸虛擬機器 %(instance_name)s 上的" -"磁區(ID:%(volume_id)s)。" - -#: nova_powervm/virt/powervm/exception.py:110 -#, python-format -msgid "" -"Unable to detach volume (id: %(volume_id)s) from virtual machine " -"%(instance_name)s. %(reason)s" -msgstr "" -"無法將磁區(ID:%(volume_id)s)從下列虛擬機器分離:" -"%(instance_name)s. %(reason)s" - -#: nova_powervm/virt/powervm/exception.py:115 -#, python-format -msgid "" -"Unable to perform pre live migration steps on volume (id: %(volume_id)s) " -"from virtual machine %(instance_name)s." -msgstr "" -"從以下虛擬機器中,無法對磁區(ID:%(volume_id)s)執行前置即時移轉步驟:" -"%(instance_name)s。" - -#: nova_powervm/virt/powervm/exception.py:120 -#, python-format -msgid "PowerVM API failed to complete for instance=%(inst_name)s.%(reason)s" -msgstr "未能對實例 %(inst_name)s 完成 PowerVM API。%(reason)s" - -#: nova_powervm/virt/powervm/exception.py:125 -#, python-format -msgid "" -"No Virtual I/O Servers are available. The driver attempted to wait for a" -" VIOS to become active for %(wait_time)d seconds. The compute agent is " -"not able to start if no Virtual I/O Servers are available. Please check " -"the RMC connectivity between the PowerVM NovaLink and the Virtual I/O " -"Servers and then restart the Nova Compute Agent." -msgstr "" -"沒有 Virtual I/O Server 可用。驅動程式已嘗試等待" -" VIOS 變為作用中狀態達 %(wait_time)d 秒。沒有可用的" -"Virtual I/O Server 時,計算代理程式無法啟動。請檢查" -"PowerVM NovaLink 與 Virtual I/O Server 之間的 RMC 連線功能," -"然後重新啟動 Nova 計算代理程式。" - -#: nova_powervm/virt/powervm/exception.py:134 -msgid "There are no active Virtual I/O Servers available." -msgstr "沒有作用中的 Virtual I/O Server 可用。" - -#: nova_powervm/virt/powervm/exception.py:138 -#, python-format -msgid "Unable to rebuild virtual machine on new host. Error is %(error)s" -msgstr "無法在新主機上重建虛擬機器。錯誤為 %(error)s" - -#: nova_powervm/virt/powervm/exception.py:143 -#, python-format -msgid "" -"The %(then_opt)s option is required if %(if_opt)s is specified as " -"'%(if_value)s'." -msgstr "" -"%(then_opt)s 選項是需要的(如果 %(if_opt)s 指定為" -"'%(if_value)s'。" - -#: nova_powervm/virt/powervm/live_migration.py:44 -#, python-format -msgid "Live migration of instance '%(name)s' failed for reason: %(reason)s" -msgstr "實例 '%(name)s' 的即時移轉失敗,原因:%(reason)s" - -#: nova_powervm/virt/powervm/live_migration.py:49 -#, python-format -msgid "" -"Cannot migrate %(name)s because the volume %(volume)s cannot be attached " -"on the destination host %(host)s." -msgstr "" -"無法移轉 %(name)s,因為磁區 %(volume)s 無法連接到" -"目的地主機 %(host)s。" - -#: nova_powervm/virt/powervm/live_migration.py:59 -#, python-format -msgid "" -"Cannot migrate %(name)s because the host %(host)s only allows %(allowed)s" -" concurrent migrations and %(running)s migrations are currently running." -msgstr "" -"無法移轉 %(name)s,因為主機 %(host)s 只容許 %(allowed)s" -" 個並行移轉,但卻有 %(running)s 個移轉目前在執行中。" - -#: nova_powervm/virt/powervm/live_migration.py:109 -#, python-format -msgid "" -"Cannot migrate instance '%(name)s' because the memory region size of the " -"source (%(source_mrs)d MB) does not match the memory region size of the " -"target (%(target_mrs)d MB)." -msgstr "" -"無法移轉實例 '%(name)s',因為來源的記憶體範圍大小" -"(%(source_mrs)d MB) 與目標的記憶體範圍大小" -"(%(target_mrs)d MB) 不符。" - -#: nova_powervm/virt/powervm/live_migration.py:279 -#, python-format -msgid "" -"Cannot migrate %(name)s because its processor compatibility mode %(mode)s" -" is not in the list of modes \"%(modes)s\" supported by the target host." -msgstr "" -"無法移轉 %(name)s,因為它的處理器相容模式 %(mode)s" -" 不在目標主機所支援的模式清單 \"%(modes)s\" 中。" - -#: nova_powervm/virt/powervm/live_migration.py:294 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because the migration state " -"is: %(state)s" -msgstr "" -"實例 '%(name)s' 的即時移轉失敗,因為移轉狀態為:" -"%(state)s" - -#: nova_powervm/virt/powervm/live_migration.py:455 -#, python-format -msgid "" -"Live migration of instance '%(name)s' failed because it is not ready. " -"Reason: %(reason)s" -msgstr "" -"實例 '%(name)s' 的即時移轉失敗,因為該實例尚未備妥。" -"原因:%(reason)s" - -#: nova_powervm/virt/powervm/vif.py:85 -msgid "vif_type parameter must be present for this vif_driver implementation" -msgstr "此 vif_driver 實作的 vif_type 參數必須存在" - -#: nova_powervm/virt/powervm/vif.py:95 -#, python-format -msgid "" -"Unable to find appropriate PowerVM VIF Driver for VIF type %(vif_type)s " -"on instance %(instance)s" -msgstr "" -"在下列實例上,找不到 VIF 類型 %(vif_type)s 的適當 PowerVM VIF 驅動程式:" -"%(instance)s" - -#: nova_powervm/virt/powervm/vif.py:540 -#, python-format -msgid "" -"Unable to find acceptable Ethernet ports on physical network " -"'%(physnet)s' for instance %(inst)s for SRIOV based VIF with MAC address " -"%(vif_mac)s." -msgstr "" -"對於 MAC 位址為 %(vif_mac)s 的 SRIOV 型 VIF 的" -"實例 %(inst)s,在實體網路 '%(physnet)s' 上找不到可接受的" -"乙太網路埠。" - -#: nova_powervm/virt/powervm/vm.py:449 -#, python-format -msgid "Multiple Shared Processing Pools with name %(pool)s." -msgstr "多個「共用處理程序儲存區」具有名稱 %(pool)s。" - -#: nova_powervm/virt/powervm/vm.py:453 -#, python-format -msgid "Unable to find Shared Processing Pool %(pool)s" -msgstr "找不到「共用處理程序儲存區」%(pool)s" - -#: nova_powervm/virt/powervm/vm.py:475 -#, python-format -msgid "" -"Flavor attribute %(attr)s must be either True or False. Current value " -"%(val)s is not allowed." -msgstr "" -"flavor 屬性 %(attr)s 必須為 True 或 False。不容許現行值" -"%(val)s。" - -#: nova_powervm/virt/powervm/disk/driver.py:129 -msgid "The configured disk driver does not support migration or resize." -msgstr "所配置的磁碟驅動程式不支援移轉或調整大小。" - -#: nova_powervm/virt/powervm/disk/localdisk.py:300 -msgid "Resizing file-backed instances is not currently supported." -msgstr "目前不支援重新調整檔案所支持實例的大小。" - -#: nova_powervm/virt/powervm/disk/ssp.py:119 -#, python-format -msgid "" -"The host is not a member of the same SSP cluster. The source host " -"cluster: %(source_clust_name)s. The source host SSP: %(source_ssp_name)s." -msgstr "" -"主機不是同一 SSP 叢集的成員。來源主機" -"叢集:%(source_clust_name)s。來源主機 SSP:%(source_ssp_name)s。" - -#: nova_powervm/virt/powervm/nvram/api.py:25 -#, python-format -msgid "" -"The NVRAM could not be stored for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"無法儲存實例 %(instance)s 的 NVRAM。原因:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:30 -#, python-format -msgid "" -"The NVRAM could not be fetched for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"無法提取實例 %(instance)s 的 NVRAM。原因:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:35 -#, python-format -msgid "" -"The NVRAM could not be deleted for instance %(instance)s. Reason: " -"%(reason)s" -msgstr "" -"無法刪除實例 %(instance)s 的 NVRAM。原因:" -"%(reason)s" - -#: nova_powervm/virt/powervm/nvram/api.py:40 -#, python-format -msgid "The configuration option '%(option)s' must be set." -msgstr "必須設定配置選項 '%(option)s'。" - -#: nova_powervm/virt/powervm/nvram/swift.py:195 -#, python-format -msgid "Unable to store NVRAM after %d attempts" -msgstr "嘗試 %d 次之後仍然無法儲存 NVRAM" - -#: nova_powervm/virt/powervm/nvram/swift.py:272 -msgid "Object does not exist in Swift." -msgstr "物件不存在於 Swift 中。" - -#: nova_powervm/virt/powervm/volume/__init__.py:65 -#, python-format -msgid "Invalid connection type of %s" -msgstr "連線類型 %s 無效" - -#: nova_powervm/virt/powervm/volume/npiv.py:522 -msgid "" -"Unable to find a Virtual I/O Server that hosts the NPIV port map for the " -"server." -msgstr "" -"找不到用來管理伺服器之 NPIV 埠對映的" -"Virtual I/O Server。" - -#: nova_powervm/virt/powervm/volume/volume.py:117 -#, python-format -msgid "" -"Failed to discover valid hdisk on any Virtual I/O Server for volume " -"%(volume_id)s." -msgstr "" -"針對下列磁區,無法在任何 Virtual I/O Server 上探索有效硬碟:" -"%(volume_id)s." - -#: nova_powervm/virt/powervm/volume/volume.py:121 -#, python-format -msgid "" -"Failed to discover the hdisk on the required number of Virtual I/O " -"Servers. Volume %(volume_id)s required %(vios_req)d Virtual I/O Servers," -" but the disk was only found on %(vios_act)d Virtual I/O Servers." -msgstr "" -"無法在所需數量的 Virtual I/O Server 上探索到" -"硬碟。磁區 %(volume_id)s 需要 %(vios_req)d 個 Virtual I/O Server," -" 但卻只在 %(vios_act)d 個 Virtual I/O Server 上找到磁碟。" - - -# ENGL1SH_VERS10N 62006_10 DO NOT REMOVE OR CHANGE THIS LINE -# T9N_SRC_ID 28 -# T9N_SH1P_STR1NG VC141AAP001 1 diff --git a/nova_powervm/tests/__init__.py b/nova_powervm/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/tests/conf/__init__.py b/nova_powervm/tests/conf/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/tests/conf/test_conf.py b/nova_powervm/tests/conf/test_conf.py deleted file mode 100644 index 64373f23..00000000 --- a/nova_powervm/tests/conf/test_conf.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import test -import oslo_config - -from nova_powervm import conf as cfg - -CONF = cfg.CONF - - -class TestConf(test.NoDBTestCase): - def setUp(self): - super(TestConf, self).setUp() - - def test_conf(self): - """Tests that powervm config values are configured.""" - # Try an option from each grouping of static options - - # Base set of options - self.assertEqual(0.1, CONF.powervm.proc_units_factor) - # Local disk - self.assertEqual('', CONF.powervm.volume_group_name) - # SSP disk - self.assertEqual('', CONF.powervm.cluster_name) - # Volume attach - self.assertEqual('vscsi', CONF.powervm.fc_attach_strategy) - # NPIV - self.assertEqual(1, CONF.powervm.ports_per_fabric) - - -class TestConfBounds(test.NoDBTestCase): - def setUp(self): - super(TestConfBounds, self).setUp() - - def _bounds_test(self, should_pass, opts, **kwargs): - """Test the bounds of an option.""" - # Use the Oslo fixture to create a temporary conf object - with oslo_config.fixture.Config(oslo_config.cfg.ConfigOpts()) as fx: - # Load the raw values - fx.load_raw_values(group='powervm', **kwargs) - # Register the options - fx.register_opts(opts, group='powervm') - # For each kwarg option passed, validate it. - for kw in kwargs: - if not should_pass: - # Reference the option to cause a bounds exception - self.assertRaises(oslo_config.cfg.ConfigFileValueError, - lambda: fx.conf.powervm[kw]) - else: - # It's expected to succeed - fx.conf.powervm[kw] - - def test_bounds(self): - # Uncapped proc weight - self._bounds_test(False, cfg.powervm.powervm_opts, - uncapped_proc_weight=0) - self._bounds_test(False, cfg.powervm.powervm_opts, - uncapped_proc_weight=256) - self._bounds_test(True, cfg.powervm.powervm_opts, - uncapped_proc_weight=200) - # vopt media repo size - self._bounds_test(False, cfg.powervm.powervm_opts, - vopt_media_rep_size=0) - self._bounds_test(True, cfg.powervm.powervm_opts, - vopt_media_rep_size=10) - # vscsi connections - self._bounds_test(False, cfg.powervm.vol_adapter_opts, - vscsi_vios_connections_required=0) - self._bounds_test(True, cfg.powervm.vol_adapter_opts, - vscsi_vios_connections_required=2) - # ports per fabric - self._bounds_test(False, cfg.powervm.npiv_opts, - ports_per_fabric=0) - self._bounds_test(True, cfg.powervm.npiv_opts, - ports_per_fabric=2) - - -class TestConfChoices(test.NoDBTestCase): - def setUp(self): - super(TestConfChoices, self).setUp() - - def _choice_test(self, invalid_choice, valid_choices, opts, option, - ignore_case=True): - """Test the choices of an option.""" - - def _setup(fx, value): - # Load the raw values - fx.load_raw_values(group='powervm', **{option: value}) - # Register the options - fx.register_opts(opts, group='powervm') - - def _build_list(): - for val in valid_choices: - yield val - yield val.lower() - yield val.upper() - - if ignore_case: - # We expect to be able to ignore upper/lower case, so build a list - # of possibilities and ensure we do ignore them. - valid_choices = [x for x in _build_list()] - - if invalid_choice: - # Use the Oslo fixture to create a temporary conf object - with oslo_config.fixture.Config(oslo_config.cfg.ConfigOpts() - ) as fx: - _setup(fx, invalid_choice) - # Reference the option to cause an exception - self.assertRaises(oslo_config.cfg.ConfigFileValueError, - lambda: fx.conf.powervm[option]) - - for choice in valid_choices: - # Use the Oslo fixture to create a temporary conf object - with oslo_config.fixture.Config(oslo_config.cfg.ConfigOpts() - ) as fx: - _setup(fx, choice) - - # It's expected to succeed - fx.conf.powervm[option] - - def test_choices(self): - # FC attachment - self._choice_test('bad_value', ['vscsi', 'npiv'], - cfg.powervm.vol_adapter_opts, 'fc_attach_strategy') - - -class TestConfDynamic(test.NoDBTestCase): - def setUp(self): - super(TestConfDynamic, self).setUp() - self.conf_fx = self.useFixture( - oslo_config.fixture.Config(oslo_config.cfg.ConfigOpts())) - # Set the raw values in the config - self.conf_fx.load_raw_values(group='powervm', fabrics='A,B', - fabric_A_port_wwpns='WWPN1', - fabric_B_port_wwpns='WWPN2') - # Now register the NPIV options with the values - self.conf_fx.register_opts(cfg.powervm.npiv_opts, group='powervm') - self.conf = self.conf_fx.conf - - def test_npiv(self): - """Tests that NPIV dynamic options are registered correctly.""" - # Register the dynamic FC values - fabric_mapping = {} - cfg.powervm._register_fabrics(self.conf, fabric_mapping) - self.assertEqual('A,B', self.conf.powervm.fabrics) - self.assertEqual('WWPN1', self.conf.powervm.fabric_A_port_wwpns) - self.assertEqual('WWPN2', self.conf.powervm.fabric_B_port_wwpns) - # Ensure the NPIV data was setup correctly - self.assertEqual({'B': ['WWPN2'], 'A': ['WWPN1']}, fabric_mapping) diff --git a/nova_powervm/tests/virt/__init__.py b/nova_powervm/tests/virt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/tests/virt/powervm/__init__.py b/nova_powervm/tests/virt/powervm/__init__.py deleted file mode 100644 index 34a612a5..00000000 --- a/nova_powervm/tests/virt/powervm/__init__.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014, 2016 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.compute import power_state -from nova.compute import task_states -from nova.compute import vm_states -from nova.objects import flavor -from nova.objects import image_meta -from nova.objects import instance -import os -import sys - - -TEST_FLAVOR = flavor.Flavor(memory_mb=2048, - swap=0, - vcpu_weight=None, - root_gb=10, - id=2, - name=u'm1.small', - ephemeral_gb=0, - rxtx_factor=1.0, - flavorid=u'1', - vcpus=1) - -TEST_INSTANCE = { - 'id': 1, - 'uuid': '49629a5c-f4c4-4721-9511-9725786ff2e5', - 'display_name': 'Fake Instance', - 'root_gb': 10, - 'ephemeral_gb': 0, - 'instance_type_id': '5', - 'system_metadata': {'image_os_distro': 'rhel'}, - 'host': 'host1', - 'flavor': TEST_FLAVOR, - 'task_state': None, - 'vm_state': vm_states.ACTIVE, - 'power_state': power_state.SHUTDOWN, -} - -TEST_INST_SPAWNING = dict(TEST_INSTANCE, task_state=task_states.SPAWNING, - uuid='b3c04455-a435-499d-ac81-371d2a2d334f') - -TEST_INST1 = instance.Instance(**TEST_INSTANCE) -TEST_INST2 = instance.Instance(**TEST_INST_SPAWNING) - -TEST_MIGRATION = { - 'id': 1, - 'source_compute': 'host1', - 'dest_compute': 'host2', - 'migration_type': 'resize', - 'old_instance_type_id': 1, - 'new_instance_type_id': 2, -} -TEST_MIGRATION_SAME_HOST = dict(TEST_MIGRATION, dest_compute='host1') - -IMAGE1 = { - 'id': '3e865d14-8c1e-4615-b73f-f78eaecabfbd', - 'name': 'image1', - 'size': 300, - 'container_format': 'bare', - 'disk_format': 'raw', - 'checksum': 'b518a8ba2b152b5607aceb5703fac072', -} -TEST_IMAGE1 = image_meta.ImageMeta.from_dict(IMAGE1) -EMPTY_IMAGE = image_meta.ImageMeta.from_dict({}) - -# NOTE(mikal): All of this is because if dnspython is present in your -# environment then eventlet monkeypatches socket.getaddrinfo() with an -# implementation which doesn't work for IPv6. What we're checking here is -# that the magic environment variable was set when the import happened. -if ('eventlet' in sys.modules): - if (os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): - raise ImportError('eventlet imported before nova/cmd/__init__ ' - '(env var set to %s)' - % os.environ.get('EVENTLET_NO_GREENDNS')) - -os.environ['EVENTLET_NO_GREENDNS'] = 'yes' -import eventlet - -eventlet.monkey_patch(os=False) diff --git a/nova_powervm/tests/virt/powervm/disk/__init__.py b/nova_powervm/tests/virt/powervm/disk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/tests/virt/powervm/disk/fake_adapter.py b/nova_powervm/tests/virt/powervm/disk/fake_adapter.py deleted file mode 100644 index 4e83315c..00000000 --- a/nova_powervm/tests/virt/powervm/disk/fake_adapter.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova_powervm.virt.powervm.disk import driver as disk_dvr - - -class FakeDiskAdapter(disk_dvr.DiskAdapter): - """A fake subclass of DiskAdapter. - - This is done so that the abstract methods/properties can be stubbed and the - class can be instantiated for testing. - """ - def vios_uuids(self): - pass - - def _disk_match_func(self, disk_type, instance): - pass - - def disconnect_disk_from_mgmt(self, vios_uuid, disk_name): - pass - - def capacity(self): - pass - - def capacity_used(self): - pass - - def disconnect_disk(self, instance): - pass - - def delete_disks(self, storage_elems): - pass - - def create_disk_from_image(self, context, instance, image_meta): - pass - - def connect_disk(self, instance, disk_info, stg_ftsk): - pass - - def extend_disk(self, instance, disk_info, size): - pass - - def check_instance_shared_storage_local(self, context, instance): - pass - - def check_instance_shared_storage_remote(self, context, data): - pass diff --git a/nova_powervm/tests/virt/powervm/disk/test_driver.py b/nova_powervm/tests/virt/powervm/disk/test_driver.py deleted file mode 100644 index ced8f12a..00000000 --- a/nova_powervm/tests/virt/powervm/disk/test_driver.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock -from nova import test -from pypowervm import const as pvm_const - -from nova_powervm.tests.virt.powervm.disk import fake_adapter -from nova_powervm.tests.virt.powervm import fixtures as fx - - -class TestDiskAdapter(test.NoDBTestCase): - """Unit Tests for the generic storage driver.""" - - def setUp(self): - super(TestDiskAdapter, self).setUp() - self.useFixture(fx.ImageAPI()) - - # Return the mgmt uuid - self.mgmt_uuid = self.useFixture(fixtures.MockPatch( - 'nova_powervm.virt.powervm.mgmt.mgmt_uuid')).mock - self.mgmt_uuid.return_value = 'mp_uuid' - - # The values (adapter and host uuid) are not used in the base. - # Default them to None. We use the fake adapter here because we can't - # instantiate DiskAdapter which is an abstract base class. - self.st_adpt = fake_adapter.FakeDiskAdapter(None, None) - - def test_get_info(self): - # Ensure the base method returns empty dict - self.assertEqual({}, self.st_adpt.get_info()) - - def test_validate(self): - # Ensure the base method returns error message - self.assertIsNotNone(self.st_adpt.validate(None)) - - @mock.patch("pypowervm.util.sanitize_file_name_for_api") - def test_get_disk_name(self, mock_san): - inst = mock.Mock() - inst.configure_mock(name='a_name_that_is_longer_than_eight', - uuid='01234567-abcd-abcd-abcd-123412341234') - - # Long - self.assertEqual(mock_san.return_value, - self.st_adpt._get_disk_name('type', inst)) - mock_san.assert_called_with(inst.name, prefix='type_', - max_len=pvm_const.MaxLen.FILENAME_DEFAULT) - - mock_san.reset_mock() - - # Short - self.assertEqual(mock_san.return_value, - self.st_adpt._get_disk_name('type', inst, short=True)) - mock_san.assert_called_with('a_name_t_0123', prefix='t_', - max_len=pvm_const.MaxLen.VDISK_NAME) - - @mock.patch("pypowervm.util.sanitize_file_name_for_api") - def test_get_name_by_uuid(self, mock_san): - uuid = '01234567-abcd-abcd-abcd-123412341234' - - # Long - self.assertEqual(mock_san.return_value, - self.st_adpt.get_name_by_uuid('type', uuid)) - mock_san.assert_called_with(uuid, prefix='type_', - max_len=pvm_const.MaxLen.FILENAME_DEFAULT) - - mock_san.reset_mock() - - # Short - self.assertEqual(mock_san.return_value, - self.st_adpt.get_name_by_uuid('type', uuid, - short=True)) - mock_san.assert_called_with(uuid, prefix='t_', - max_len=pvm_const.MaxLen.VDISK_NAME) diff --git a/nova_powervm/tests/virt/powervm/disk/test_imagecache.py b/nova_powervm/tests/virt/powervm/disk/test_imagecache.py deleted file mode 100644 index 414862f9..00000000 --- a/nova_powervm/tests/virt/powervm/disk/test_imagecache.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import fixtures -import mock - -from nova import test -from pypowervm.wrappers import storage as pvm_stor -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm.virt.powervm.disk import imagecache - - -class TestImageCache(test.NoDBTestCase): - """Unit Tests for the LocalDisk storage driver.""" - - def setUp(self): - super(TestImageCache, self).setUp() - self.mock_vg = mock.MagicMock(virtual_disks=[]) - # Initialize the ImageManager - self.adpt = mock.MagicMock() - self.vg_uuid = 'vg_uuid' - self.vios_uuid = 'vios_uuid' - self.img_cache = imagecache.ImageManager(self.vios_uuid, self.vg_uuid, - self.adpt) - - # Setup virtual_disks to be used later - self.inst1 = pvm_stor.VDisk.bld(None, 'b_inst1', 10) - self.inst2 = pvm_stor.VDisk.bld(None, 'b_inst2', 10) - self.image = pvm_stor.VDisk.bld(None, 'i_bf8446e4_4f52', 10) - - def test_get_base(self): - self.mock_vg_get = self.useFixture(fixtures.MockPatch( - 'pypowervm.wrappers.storage.VG.get')).mock - self.mock_vg_get.return_value = self.mock_vg - vg_wrap = self.img_cache._get_base() - self.assertEqual(vg_wrap, self.mock_vg) - self.mock_vg_get.assert_called_once_with( - self.adpt, uuid=self.vg_uuid, - parent_type=pvm_vios.VIOS.schema_type, parent_uuid=self.vios_uuid) - - def test_scan_base_image(self): - # No cached images - self.mock_vg.virtual_disks = [self.inst1, self.inst2] - base_images = self.img_cache._scan_base_image(self.mock_vg) - self.assertEqual([], base_images) - # One 'cached' image - self.mock_vg.virtual_disks.append(self.image) - base_images = self.img_cache._scan_base_image(self.mock_vg) - self.assertEqual([self.image], base_images) - - @mock.patch('pypowervm.tasks.storage.rm_vg_storage') - @mock.patch('nova.virt.imagecache.ImageCacheManager.' - '_list_running_instances') - @mock.patch('nova_powervm.virt.powervm.disk.imagecache.ImageManager.' - '_scan_base_image') - def test_age_and_verify(self, mock_scan, mock_list, mock_rm): - mock_context = mock.MagicMock() - all_inst = mock.MagicMock() - mock_scan.return_value = [self.image] - # Two instances backed by image 'bf8446e4_4f52' - # Mock dict returned from _list_running_instances - used_images = {'': [self.inst1, self.inst2], - 'bf8446e4_4f52': [self.inst1, self.inst2]} - mock_list.return_value = {'used_images': used_images} - - self.mock_vg.virtual_disks = [self.inst1, self.inst2, self.image] - self.img_cache._age_and_verify_cached_images(mock_context, all_inst, - self.mock_vg) - mock_rm.assert_not_called() - mock_scan.assert_called_once_with(self.mock_vg) - mock_rm.reset_mock() - - # No instances - mock_list.return_value = {'used_images': {}} - self.img_cache._age_and_verify_cached_images(mock_context, all_inst, - self.mock_vg) - mock_rm.assert_called_once_with(self.mock_vg, vdisks=[self.image]) - - @mock.patch('nova_powervm.virt.powervm.disk.imagecache.ImageManager.' - '_get_base') - @mock.patch('nova_powervm.virt.powervm.disk.imagecache.ImageManager.' - '_age_and_verify_cached_images') - def test_update(self, mock_age, mock_base): - mock_base.return_value = self.mock_vg - mock_context = mock.MagicMock() - mock_all_inst = mock.MagicMock() - self.img_cache.update(mock_context, mock_all_inst) - mock_base.assert_called_once_with() - mock_age.assert_called_once_with(mock_context, mock_all_inst, - self.mock_vg) diff --git a/nova_powervm/tests/virt/powervm/disk/test_localdisk.py b/nova_powervm/tests/virt/powervm/disk/test_localdisk.py deleted file mode 100644 index 066fe80d..00000000 --- a/nova_powervm/tests/virt/powervm/disk/test_localdisk.py +++ /dev/null @@ -1,447 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock - -from nova import exception as nova_exc -from nova import test -from oslo_utils.fixture import uuidsentinel as uuids -from pypowervm import const as pvm_const -from pypowervm.tasks import storage as tsk_stor -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import storage as pvm_stor -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm.tests.virt import powervm -from nova_powervm.tests.virt.powervm import fixtures as fx -from nova_powervm.virt.powervm.disk import driver as disk_dvr -from nova_powervm.virt.powervm.disk import localdisk as ld -from nova_powervm.virt.powervm import exception as npvmex -from nova_powervm.virt.powervm import vm - - -class TestLocalDisk(test.NoDBTestCase): - """Unit Tests for the LocalDisk storage driver.""" - - def setUp(self): - super(TestLocalDisk, self).setUp() - - self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt - - # The mock VIOS needs to have scsi_mappings as a list. Internals are - # set by individual test cases as needed. - smaps = [mock.Mock()] - self.vio_to_vg = mock.Mock(spec=pvm_vios.VIOS, scsi_mappings=smaps, - uuid='vios-uuid') - # Set up mock for internal VIOS.get()s - self.mock_vios_get = self.useFixture(fixtures.MockPatch( - 'pypowervm.wrappers.virtual_io_server.VIOS', - autospec=True)).mock.get - # For our tests, we want find_maps to return the mocked list of scsi - # mappings in our mocked VIOS. - self.mock_find_maps = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.scsi_mapper.find_maps', autospec=True)).mock - self.mock_find_maps.return_value = smaps - - # Set up for the mocks for get_ls - self.mock_find_vg = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.storage.find_vg', autospec=True)).mock - self.vg_uuid = uuids.vg_uuid - self.vg = mock.Mock(spec=pvm_stor.VG, uuid=self.vg_uuid) - self.mock_find_vg.return_value = (self.vio_to_vg, self.vg) - - # Return the mgmt uuid - self.mgmt_uuid = self.useFixture(fixtures.MockPatch( - 'nova_powervm.virt.powervm.mgmt.mgmt_uuid')).mock - self.mgmt_uuid.return_value = 'mp_uuid' - - self.flags(volume_group_name='fakevg', group='powervm') - - @staticmethod - def get_ls(adpt): - return ld.LocalStorage(adpt, 'host_uuid') - - def test_init(self): - local = self.get_ls(self.apt) - self.mock_find_vg.assert_called_once_with(self.apt, 'fakevg') - self.assertEqual('vios-uuid', local._vios_uuid) - self.assertEqual(self.vg_uuid, local.vg_uuid) - self.assertEqual(self.apt, local.adapter) - self.assertEqual('host_uuid', local.host_uuid) - - @mock.patch('pypowervm.tasks.storage.crt_copy_vdisk', autospec=True) - @mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.' - '_get_or_upload_image') - def test_create_disk_from_image(self, mock_get_image, mock_copy): - mock_copy.return_value = 'vdisk' - inst = mock.Mock() - inst.configure_mock(name='Inst Name', - uuid='d5065c2c-ac43-3fa6-af32-ea84a3960291', - flavor=mock.Mock(root_gb=20)) - mock_image = mock.MagicMock() - mock_image.name = 'cached_image' - mock_get_image.return_value = mock_image - - vdisk = self.get_ls(self.apt).create_disk_from_image( - None, inst, powervm.TEST_IMAGE1) - self.assertEqual('vdisk', vdisk) - - mock_get_image.reset_mock() - exception = Exception - mock_get_image.side_effect = exception - with mock.patch('time.sleep', autospec=True) as mock_sleep: - self.assertRaises(exception, - self.get_ls(self.apt).create_disk_from_image, - None, inst, powervm.TEST_IMAGE1) - self.assertEqual(mock_get_image.call_count, 4) - self.assertEqual(3, mock_sleep.call_count) - - @mock.patch('pypowervm.tasks.storage.upload_new_vdisk', autospec=True) - @mock.patch('nova.image.api.API.download') - @mock.patch('nova_powervm.virt.powervm.disk.driver.IterableToFileAdapter') - @mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.' - '_get_vg_wrap') - def test_get_or_upload_image(self, mock_get_vg, mock_it2f, mock_img_api, - mock_upload_vdisk): - mock_wrapper = mock.Mock() - mock_wrapper.configure_mock(name='vg_name', virtual_disks=[]) - mock_get_vg.return_value = mock_wrapper - local = self.get_ls(self.apt) - - self.assertEqual( - mock_upload_vdisk.return_value[0].udid, - local._get_or_upload_image('ctx', powervm.TEST_IMAGE1)) - - # Make sure the upload was invoked properly - mock_upload_vdisk.assert_called_once_with( - self.apt, 'vios-uuid', self.vg_uuid, mock_it2f.return_value, - 'i_3e865d14_8c1e', powervm.TEST_IMAGE1.size, - d_size=powervm.TEST_IMAGE1.size, - upload_type=tsk_stor.UploadType.IO_STREAM, - file_format=powervm.TEST_IMAGE1.disk_format) - mock_it2f.assert_called_once_with(mock_img_api.return_value) - mock_img_api.assert_called_once_with('ctx', powervm.TEST_IMAGE1.id) - - mock_img_api.reset_mock() - mock_upload_vdisk.reset_mock() - - # Now ensure upload_new_vdisk isn't called if the vdisk already exists. - mock_image = mock.MagicMock() - mock_image.configure_mock(name='i_3e865d14_8c1e', udid='udid') - mock_instance = mock.MagicMock() - mock_instance.configure_mock(name='b_Inst_Nam_d506') - mock_wrapper.virtual_disks = [mock_instance, mock_image] - mock_get_vg.return_value = mock_wrapper - self.assertEqual( - mock_image.udid, - local._get_or_upload_image('ctx', powervm.TEST_IMAGE1)) - mock_img_api.assert_not_called() - self.assertEqual(0, mock_upload_vdisk.call_count) - - @mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.' - '_get_vg_wrap') - def test_capacity(self, mock_vg): - """Tests the capacity methods.""" - local = self.get_ls(self.apt) - mock_vg.return_value = mock.Mock( - capacity='5120', available_size='2048') - self.assertEqual(5120.0, local.capacity) - self.assertEqual(3072.0, local.capacity_used) - - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True) - @mock.patch('pypowervm.tasks.partition.get_active_vioses', autospec=True) - def test_disconnect_disk(self, mock_active_vioses, mock_rm_maps): - # vio_to_vg is a single-entry response. Wrap it and put it in a list - # to act as the feed for FeedTaskFx and FeedTask. - feed = [self.vio_to_vg] - mock_active_vioses.return_value = feed - - # The mock return values - mock_rm_maps.return_value = True - - # Create the feed task - local = self.get_ls(self.apt) - inst = mock.Mock(uuid=fx.FAKE_INST_UUID) - - # As initialized above, remove_maps returns True to trigger update. - local.disconnect_disk(inst, stg_ftsk=None, - disk_type=[disk_dvr.DiskType.BOOT]) - self.assertEqual(1, mock_rm_maps.call_count) - self.assertEqual(1, self.vio_to_vg.update.call_count) - mock_rm_maps.assert_called_once_with(feed[0], fx.FAKE_INST_UUID_PVM, - match_func=mock.ANY) - - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True) - @mock.patch('pypowervm.tasks.partition.get_active_vioses', autospec=True) - def test_disconnect_disk_no_update(self, mock_active_vioses, mock_rm_maps): - # vio_to_vg is a single-entry response. Wrap it and put it in a list - # to act as the feed for FeedTaskFx and FeedTask. - feed = [self.vio_to_vg] - mock_active_vioses.return_value = feed - - # The mock return values - mock_rm_maps.return_value = False - - # Create the feed task - local = self.get_ls(self.apt) - inst = mock.Mock(uuid=fx.FAKE_INST_UUID) - # As initialized above, remove_maps returns True to trigger update. - local.disconnect_disk(inst, stg_ftsk=None, - disk_type=[disk_dvr.DiskType.BOOT]) - self.assertEqual(1, mock_rm_maps.call_count) - self.vio_to_vg.update.assert_not_called() - mock_rm_maps.assert_called_once_with(feed[0], fx.FAKE_INST_UUID_PVM, - match_func=mock.ANY) - - @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True) - def test_disconnect_disk_disktype(self, mock_match_func): - """Ensures that the match function passes in the right prefix.""" - # Set up the mock data. - inst = mock.Mock(uuid=fx.FAKE_INST_UUID) - mock_match_func.return_value = 'test' - - # Invoke - local = self.get_ls(self.apt) - local.disconnect_disk(inst, stg_ftsk=mock.MagicMock(), - disk_type=[disk_dvr.DiskType.BOOT]) - - # Make sure the find maps is invoked once. - self.mock_find_maps.assert_called_once_with( - mock.ANY, client_lpar_id=fx.FAKE_INST_UUID_PVM, match_func='test') - - # Make sure the matching function is generated with the right disk type - mock_match_func.assert_called_once_with( - pvm_stor.VDisk, prefixes=[disk_dvr.DiskType.BOOT]) - - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True) - @mock.patch('pypowervm.tasks.partition.get_active_vioses', autospec=True) - def test_connect_disk(self, mock_active_vioses, mock_add_map, - mock_build_map): - # vio_to_vg is a single-entry response. Wrap it and put it in a list - # to act as the feed for FeedTask. - feed = [self.vio_to_vg] - mock_active_vioses.return_value = feed - - # The mock return values - mock_add_map.return_value = True - mock_build_map.return_value = 'fake_map' - - # Need the driver to return the actual UUID of the VIOS in the feed, - # to match the FeedTask. - local = self.get_ls(self.apt) - inst = mock.Mock(uuid=fx.FAKE_INST_UUID) - lpar_uuid = vm.get_pvm_uuid(inst) - mock_disk = mock.Mock() - # As initialized above, remove_maps returns True to trigger update. - local.connect_disk(inst, mock_disk, stg_ftsk=None) - self.assertEqual(1, mock_add_map.call_count) - mock_build_map.assert_called_once_with( - 'host_uuid', self.vio_to_vg, lpar_uuid, mock_disk) - mock_add_map.assert_called_once_with(feed[0], 'fake_map') - self.assertEqual(1, self.vio_to_vg.update.call_count) - - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True) - @mock.patch('pypowervm.tasks.partition.get_active_vioses', autospec=True) - def test_connect_disk_no_update(self, mock_active_vioses, mock_add_map, - mock_build_map): - # vio_to_vg is a single-entry response. Wrap it and put it in a list - # to act as the feed for FeedTask. - feed = [self.vio_to_vg] - mock_active_vioses.return_value = feed - - # The mock return values - mock_add_map.return_value = False - mock_build_map.return_value = 'fake_map' - - # Need the driver to return the actual UUID of the VIOS in the feed, - # to match the FeedTask. - local = self.get_ls(self.apt) - inst = mock.Mock(uuid=fx.FAKE_INST_UUID) - - # As initialized above, remove_maps returns True to trigger update. - local.connect_disk(inst, mock.Mock(), stg_ftsk=None) - self.assertEqual(1, mock_add_map.call_count) - mock_add_map.assert_called_once_with(feed[0], 'fake_map') - self.vio_to_vg.update.assert_not_called() - - @mock.patch('pypowervm.wrappers.storage.VG.update', new=mock.Mock()) - @mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.' - '_get_vg_wrap') - def test_delete_disks(self, mock_vg): - # Mocks - self.apt.side_effect = [mock.Mock()] - - mock_remove = mock.MagicMock() - mock_remove.name = 'disk' - - mock_wrapper = mock.MagicMock() - mock_wrapper.virtual_disks = [mock_remove] - mock_vg.return_value = mock_wrapper - - # Invoke the call - local = self.get_ls(self.apt) - local.delete_disks([mock_remove]) - - # Validate the call - self.assertEqual(1, mock_wrapper.update.call_count) - self.assertEqual(0, len(mock_wrapper.virtual_disks)) - - @mock.patch('pypowervm.wrappers.storage.VG', autospec=True) - def test_extend_disk_not_found(self, mock_vg): - local = self.get_ls(self.apt) - inst = mock.Mock() - inst.name = 'Name Of Instance' - inst.uuid = 'd5065c2c-ac43-3fa6-af32-ea84a3960291' - - vdisk = mock.Mock(name='vdisk') - vdisk.name = 'NO_MATCH' - - resp = mock.Mock(name='response') - resp.virtual_disks = [vdisk] - mock_vg.get.return_value = resp - - self.assertRaises(nova_exc.DiskNotFound, local.extend_disk, - inst, dict(type='boot'), 10) - - vdisk.name = 'b_Name_Of__d506' - local.extend_disk(inst, dict(type='boot'), 1000) - # Validate the call - self.assertEqual(1, resp.update.call_count) - self.assertEqual(vdisk.capacity, 1000) - - @mock.patch('pypowervm.wrappers.storage.VG', autospec=True) - def test_extend_disk_file_format(self, mock_vg): - local = self.get_ls(self.apt) - inst = mock.Mock() - inst.name = 'Name Of Instance' - inst.uuid = 'd5065c2c-ac43-3fa6-af32-ea84a3960291' - - vdisk = mock.Mock(name='vdisk') - vdisk.configure_mock(name='/path/to/b_Name_Of__d506', - backstore_type=pvm_stor.BackStoreType.USER_QCOW, - file_format=pvm_stor.FileFormatType.QCOW2) - resp = mock.Mock(name='response') - resp.virtual_disks = [vdisk] - mock_vg.get.return_value = resp - self.assertRaises(nova_exc.ResizeError, local.extend_disk, - inst, dict(type='boot'), 10) - vdisk.file_format = pvm_stor.FileFormatType.RAW - self.assertRaises(nova_exc.ResizeError, local.extend_disk, - inst, dict(type='boot'), 10) - - def _bld_mocks_for_instance_disk(self): - inst = mock.Mock() - inst.name = 'Name Of Instance' - inst.uuid = uuids.inst_uuid - lpar_wrap = mock.Mock() - lpar_wrap.id = 2 - vios1 = self.vio_to_vg - back_stor_name = 'b_Name_Of__' + inst.uuid[:4] - vios1.scsi_mappings[0].backing_storage.name = back_stor_name - return inst, lpar_wrap, vios1 - - def test_get_bootdisk_path(self): - local = self.get_ls(self.apt) - inst = mock.Mock() - inst.name = 'Name Of Instance' - inst.uuid = 'f921620A-EE30-440E-8C2D-9F7BA123F298' - vios1 = self.vio_to_vg - vios1.scsi_mappings[0].server_adapter.backing_dev_name = 'boot_7f81628' - vios1.scsi_mappings[0].backing_storage.name = 'b_Name_Of__f921' - self.mock_vios_get.return_value = vios1 - dev_name = local.get_bootdisk_path(inst, vios1.uuid) - self.assertEqual('boot_7f81628', dev_name) - - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - @mock.patch('pypowervm.wrappers.storage.VG.get', new=mock.Mock()) - def test_get_bootdisk_iter(self, mock_lpar_wrap): - local = self.get_ls(self.apt) - inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk() - mock_lpar_wrap.return_value = lpar_wrap - - # Good path - self.mock_vios_get.return_value = vios1 - for vdisk, vios in local._get_bootdisk_iter(inst): - self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk) - self.assertEqual(vios1.uuid, vios.uuid) - self.mock_vios_get.assert_called_once_with( - self.apt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP]) - - # Not found because no storage of that name - self.mock_vios_get.reset_mock() - self.mock_find_maps.return_value = [] - for vdisk, vios in local._get_bootdisk_iter(inst): - self.fail('Should not have found any storage elements.') - self.mock_vios_get.assert_called_once_with( - self.apt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP]) - - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper') - @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True) - def test_connect_instance_disk_to_mgmt_partition(self, mock_add, mock_lw): - local = self.get_ls(self.apt) - inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk() - mock_lw.return_value = lpar_wrap - - # Good path - self.mock_vios_get.return_value = vios1 - vdisk, vios = local.connect_instance_disk_to_mgmt(inst) - self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk) - self.assertIs(vios1, vios) - self.assertEqual(1, mock_add.call_count) - mock_add.assert_called_with('host_uuid', vios, 'mp_uuid', vdisk) - - # add_vscsi_mapping raises. Show-stopper since only one VIOS. - mock_add.reset_mock() - mock_add.side_effect = Exception("mapping failed") - self.assertRaises(npvmex.InstanceDiskMappingFailed, - local.connect_instance_disk_to_mgmt, inst) - self.assertEqual(1, mock_add.call_count) - - # Not found - mock_add.reset_mock() - self.mock_find_maps.return_value = [] - self.assertRaises(npvmex.InstanceDiskMappingFailed, - local.connect_instance_disk_to_mgmt, inst) - self.assertFalse(mock_add.called) - - @mock.patch('pypowervm.tasks.scsi_mapper.remove_vdisk_mapping', - autospec=True) - def test_disconnect_disk_from_mgmt_partition(self, mock_rm_vdisk_map): - local = self.get_ls(self.apt) - local.disconnect_disk_from_mgmt('vios-uuid', 'disk_name') - mock_rm_vdisk_map.assert_called_with( - local.adapter, 'vios-uuid', 'mp_uuid', disk_names=['disk_name']) - - def test_capabilities_non_mgmt_vios(self): - local = self.get_ls(self.apt) - self.assertFalse(local.capabilities.get('shared_storage')) - self.assertTrue(local.capabilities.get('has_imagecache')) - # With the default setup, the management partition isn't the VIOS. - self.assertFalse(local.capabilities.get('snapshot')) - - def test_capabilities_mgmt_vios(self): - # Make the management partition the VIOS. - self.vio_to_vg.uuid = self.mgmt_uuid.return_value - local = self.get_ls(self.apt) - self.assertFalse(local.capabilities.get('shared_storage')) - self.assertTrue(local.capabilities.get('has_imagecache')) - self.assertTrue(local.capabilities.get('snapshot')) diff --git a/nova_powervm/tests/virt/powervm/disk/test_ssp.py b/nova_powervm/tests/virt/powervm/disk/test_ssp.py deleted file mode 100644 index 05e105c1..00000000 --- a/nova_powervm/tests/virt/powervm/disk/test_ssp.py +++ /dev/null @@ -1,625 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import fixtures -import mock - -from nova.objects import image_meta -from nova import test -from pypowervm import const -from pypowervm.tasks import storage as tsk_stg -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import cluster as pvm_clust -from pypowervm.wrappers import storage as pvm_stg -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm.tests.virt.powervm import fixtures as fx -from nova_powervm.virt.powervm.disk import driver as disk_dvr -from nova_powervm.virt.powervm.disk import ssp as ssp_dvr -from nova_powervm.virt.powervm import exception as npvmex - - -class SSPFixture(fixtures.Fixture): - """Patch out PyPowerVM SSP and Cluster EntryWrapper methods.""" - - def __init__(self): - pass - - def mockpatch(self, methstr): - return self.useFixture(fixtures.MockPatch(methstr)).mock - - def setUp(self): - super(SSPFixture, self).setUp() - self.mock_clust_get = self.mockpatch( - 'pypowervm.wrappers.cluster.Cluster.get') - self.mock_clust_search = self.mockpatch( - 'pypowervm.wrappers.cluster.Cluster.search') - self.mock_ssp_gbhref = self.mockpatch( - 'pypowervm.wrappers.storage.SSP.get_by_href') - self.mock_ssp_update = self.mockpatch( - 'pypowervm.wrappers.storage.SSP.update') - self.mock_get_tier = self.mockpatch( - 'pypowervm.tasks.storage.default_tier_for_ssp') - - -class TestSSPDiskAdapter(test.NoDBTestCase): - """Unit Tests for the LocalDisk storage driver.""" - - def setUp(self): - super(TestSSPDiskAdapter, self).setUp() - - class Instance(object): - uuid = fx.FAKE_INST_UUID - name = 'instance-name' - - self.instance = Instance() - - self.apt = mock.Mock() - self.host_uuid = 'host_uuid' - - self.sspfx = self.useFixture(SSPFixture()) - - self.ssp_wrap = mock.Mock(spec=pvm_stg.SSP) - self.ssp_wrap.refresh.return_value = self.ssp_wrap - self.node1 = mock.Mock() - self.node2 = mock.Mock() - self.clust_wrap = mock.Mock(spec=pvm_clust.Cluster, - nodes=[self.node1, self.node2]) - self.clust_wrap.refresh.return_value = self.clust_wrap - self.vio_wrap = mock.Mock(spec=pvm_vios.VIOS, uuid='uuid') - - # For _fetch_cluster() with no name - self.mock_clust_get = self.sspfx.mock_clust_get - self.mock_clust_get.return_value = [self.clust_wrap] - # For _fetch_cluster() with configured name - self.mock_clust_search = self.sspfx.mock_clust_search - # EntryWrapper.search always returns a list of wrappers. - self.mock_clust_search.return_value = [self.clust_wrap] - - # For _fetch_ssp() fresh - self.mock_ssp_gbhref = self.sspfx.mock_ssp_gbhref - self.mock_ssp_gbhref.return_value = self.ssp_wrap - - # For _tier - self.mock_get_tier = self.sspfx.mock_get_tier - - # By default, assume the config supplied a Cluster name - self.flags(cluster_name='clust1', group='powervm') - - # Return the mgmt uuid - self.mgmt_uuid = self.useFixture(fixtures.MockPatch( - 'nova_powervm.virt.powervm.mgmt.mgmt_uuid')).mock - self.mgmt_uuid.return_value = 'mp_uuid' - - def _get_ssp_stor(self): - return ssp_dvr.SSPDiskAdapter(self.apt, self.host_uuid) - - def test_tier_cache(self): - # default_tier_for_ssp not yet invoked - self.mock_get_tier.assert_not_called() - ssp = self._get_ssp_stor() - # default_tier_for_ssp invoked by constructor - self.mock_get_tier.assert_called_once_with(ssp._ssp_wrap) - self.assertEqual(self.mock_get_tier.return_value, ssp._tier) - # default_tier_for_ssp not called again. - self.assertEqual(1, self.mock_get_tier.call_count) - - def test_capabilities(self): - ssp_stor = self._get_ssp_stor() - self.assertTrue(ssp_stor.capabilities.get('shared_storage')) - self.assertFalse(ssp_stor.capabilities.get('has_imagecache')) - self.assertTrue(ssp_stor.capabilities.get('snapshot')) - - def test_get_info(self): - ssp_stor = self._get_ssp_stor() - expected = {'cluster_name': self.clust_wrap.name, - 'ssp_name': self.ssp_wrap.name, - 'ssp_uuid': self.ssp_wrap.uuid} - # Ensure the base method returns empty dict - self.assertEqual(expected, ssp_stor.get_info()) - - def test_validate(self): - ssp_stor = self._get_ssp_stor() - fake_data = {} - # Ensure returns error message when no data - self.assertIsNotNone(ssp_stor.validate(fake_data)) - - # Get our own data and it should always match! - fake_data = ssp_stor.get_info() - # Ensure returns no error on good data - self.assertIsNone(ssp_stor.validate(fake_data)) - - def test_init_green_with_config(self): - """Bootstrap SSPStorage, testing call to _fetch_cluster. - - Driver init should search for cluster by name. - """ - # Invoke __init__ => _fetch_cluster() - self._get_ssp_stor() - # _fetch_cluster() WITH configured name does a search, but not a get. - # Refresh shouldn't be invoked. - self.mock_clust_search.assert_called_once_with(self.apt, name='clust1') - self.mock_clust_get.assert_not_called() - self.clust_wrap.refresh.assert_not_called() - - def test_init_green_no_config(self): - """No cluster name specified in config; one cluster on host - ok.""" - self.flags(cluster_name='', group='powervm') - self._get_ssp_stor() - # _fetch_cluster() WITHOUT configured name does feed GET, not a search. - # Refresh shouldn't be invoked. - self.mock_clust_search.assert_not_called() - self.mock_clust_get.assert_called_once_with(self.apt) - self.clust_wrap.refresh.assert_not_called() - - def test_init_ClusterNotFoundByName(self): - """Empty feed comes back from search - no cluster by that name.""" - self.mock_clust_search.return_value = [] - self.assertRaises(npvmex.ClusterNotFoundByName, self._get_ssp_stor) - - def test_init_TooManyClustersFound(self): - """Search-by-name returns more than one result.""" - self.mock_clust_search.return_value = ['newclust1', 'newclust2'] - self.assertRaises(npvmex.TooManyClustersFound, self._get_ssp_stor) - - def test_init_NoConfigNoClusterFound(self): - """No cluster name specified in config, no clusters on host.""" - self.flags(cluster_name='', group='powervm') - self.mock_clust_get.return_value = [] - self.assertRaises(npvmex.NoConfigNoClusterFound, self._get_ssp_stor) - - def test_init_NoConfigTooManyClusters(self): - """No SSP name specified in config, more than one SSP on host.""" - self.flags(cluster_name='', group='powervm') - self.mock_clust_get.return_value = ['newclust1', 'newclust2'] - self.assertRaises(npvmex.NoConfigTooManyClusters, self._get_ssp_stor) - - def test_refresh_cluster(self): - """_refresh_cluster with cached wrapper.""" - # Save original cluster wrapper for later comparison - orig_clust_wrap = self.clust_wrap - # Prime _clust_wrap - ssp_stor = self._get_ssp_stor() - # Verify baseline call counts - self.mock_clust_search.assert_called_once_with(self.apt, name='clust1') - self.clust_wrap.refresh.assert_not_called() - clust_wrap = ssp_stor._refresh_cluster() - # This should call refresh - self.mock_clust_search.assert_called_once_with(self.apt, name='clust1') - self.mock_clust_get.assert_not_called() - self.clust_wrap.refresh.assert_called_once_with() - self.assertEqual(clust_wrap.name, orig_clust_wrap.name) - - def test_fetch_ssp(self): - # For later comparison - orig_ssp_wrap = self.ssp_wrap - # Verify baseline call counts - self.mock_ssp_gbhref.assert_not_called() - self.ssp_wrap.refresh.assert_not_called() - # This should prime self._ssp_wrap: calls read_by_href but not refresh. - ssp_stor = self._get_ssp_stor() - self.mock_ssp_gbhref.assert_called_once_with(self.apt, - self.clust_wrap.ssp_uri) - self.ssp_wrap.refresh.assert_not_called() - # Accessing the @property will trigger refresh - ssp_wrap = ssp_stor._ssp - self.mock_ssp_gbhref.assert_called_once_with(self.apt, - self.clust_wrap.ssp_uri) - self.ssp_wrap.refresh.assert_called_once_with() - self.assertEqual(ssp_wrap.name, orig_ssp_wrap.name) - - @mock.patch('pypowervm.util.get_req_path_uuid') - def test_vios_uuids(self, mock_rpu): - mock_rpu.return_value = self.host_uuid - ssp_stor = self._get_ssp_stor() - vios_uuids = ssp_stor.vios_uuids - self.assertEqual({self.node1.vios_uuid, self.node2.vios_uuid}, - set(vios_uuids)) - mock_rpu.assert_has_calls( - [mock.call(node.vios_uri, preserve_case=True, root=True) - for node in [self.node1, self.node2]]) - s = set() - for i in range(1000): - u = ssp_stor._any_vios_uuid() - # Make sure we got a good value - self.assertIn(u, vios_uuids) - s.add(u) - # Make sure we hit all the values over 1000 iterations. This isn't - # guaranteed to work, but the odds of failure should be infinitesimal. - self.assertEqual(set(vios_uuids), s) - - mock_rpu.reset_mock() - - # Test VIOSes on other nodes, which won't have uuid or url - node1 = mock.Mock(vios_uuid=None, vios_uri='uri1') - node2 = mock.Mock(vios_uuid='2', vios_uri=None) - # This mock is good and should be returned - node3 = mock.Mock(vios_uuid='3', vios_uri='uri3') - self.clust_wrap.nodes = [node1, node2, node3] - self.assertEqual(['3'], ssp_stor.vios_uuids) - # get_req_path_uuid was only called on the good one - mock_rpu.assert_called_once_with('uri3', preserve_case=True, root=True) - - def test_capacity(self): - ssp_stor = self._get_ssp_stor() - self.mock_get_tier.return_value.refresh.return_value.capacity = 10 - self.assertAlmostEqual(10.0, ssp_stor.capacity) - - def test_capacity_used(self): - ssp_stor = self._get_ssp_stor() - self.ssp_wrap.capacity = 4.56 - self.ssp_wrap.free_space = 1.23 - self.assertAlmostEqual((4.56 - 1.23), ssp_stor.capacity_used) - - @mock.patch('pypowervm.tasks.cluster_ssp.get_or_upload_image_lu') - @mock.patch('nova_powervm.virt.powervm.disk.driver.DiskAdapter.' - '_get_image_name') - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - '_any_vios_uuid') - @mock.patch('nova_powervm.virt.powervm.disk.driver.DiskAdapter.' - '_get_disk_name') - @mock.patch('pypowervm.tasks.storage.crt_lu') - @mock.patch('nova.image.api.API.download') - @mock.patch('nova_powervm.virt.powervm.disk.driver.IterableToFileAdapter') - def test_create_disk_from_image(self, mock_it2f, mock_dl, mock_crt_lu, - mock_gdn, mock_vuuid, mock_gin, mock_goru): - instance = mock.Mock() - img_meta = mock.Mock() - - ssp = self._get_ssp_stor() - mock_crt_lu.return_value = ssp._ssp_wrap, 'lu' - - mock_gin.return_value = 'img_name' - mock_vuuid.return_value = 'vios_uuid' - - # Default image_type - self.assertEqual('lu', ssp.create_disk_from_image( - 'ctx', instance, img_meta)) - mock_goru.assert_called_once_with( - self.mock_get_tier.return_value, mock_gin.return_value, - mock_vuuid.return_value, mock_it2f.return_value, img_meta.size, - upload_type=tsk_stg.UploadType.IO_STREAM) - mock_dl.assert_called_once_with('ctx', img_meta.id) - mock_it2f.assert_called_once_with(mock_dl.return_value) - mock_gdn.assert_called_once_with(disk_dvr.DiskType.BOOT, instance) - mock_crt_lu.assert_called_once_with( - self.mock_get_tier.return_value, mock_gdn.return_value, - instance.flavor.root_gb, typ=pvm_stg.LUType.DISK, - clone=mock_goru.return_value) - - # Reset - mock_goru.reset_mock() - mock_gdn.reset_mock() - mock_crt_lu.reset_mock() - mock_dl.reset_mock() - mock_it2f.reset_mock() - - # Specified image_type - self.assertEqual('lu', ssp.create_disk_from_image( - 'ctx', instance, img_meta, image_type='imgtyp')) - mock_goru.assert_called_once_with( - self.mock_get_tier.return_value, mock_gin.return_value, - mock_vuuid.return_value, mock_it2f.return_value, img_meta.size, - upload_type=tsk_stg.UploadType.IO_STREAM) - mock_dl.assert_called_once_with('ctx', img_meta.id) - mock_it2f.assert_called_once_with(mock_dl.return_value) - mock_gdn.assert_called_once_with('imgtyp', instance) - mock_crt_lu.assert_called_once_with( - self.mock_get_tier.return_value, mock_gdn.return_value, - instance.flavor.root_gb, typ=pvm_stg.LUType.DISK, - clone=mock_goru.return_value) - - def test_get_image_name(self): - """Generate image name from ImageMeta.""" - ssp = self._get_ssp_stor() - - def verify_image_name(name, checksum, expected): - img_meta = image_meta.ImageMeta(name=name, checksum=checksum) - self.assertEqual(expected, ssp._get_image_name(img_meta)) - self.assertTrue(len(expected) <= const.MaxLen.FILENAME_DEFAULT) - - verify_image_name('foo', 'bar', 'image_foo_bar') - # Ensure a really long name gets truncated properly. Note also '-' - # chars are sanitized. - verify_image_name( - 'Template_zw82enbix_PowerVM-CI-18y2385y9123785192364', - 'b518a8ba2b152b5607aceb5703fac072', - 'image_Template_zw82enbix_PowerVM_CI_18y2385y91' - '_b518a8ba2b152b5607aceb5703fac072') - - @mock.patch('pypowervm.wrappers.storage.LUEnt.search') - @mock.patch('nova_powervm.virt.powervm.disk.driver.DiskAdapter.' - '_get_disk_name') - def test_get_disk_ref(self, mock_dsk_nm, mock_srch): - ssp = self._get_ssp_stor() - self.assertEqual(mock_srch.return_value, ssp.get_disk_ref( - self.instance, disk_dvr.DiskType.BOOT)) - mock_dsk_nm.assert_called_with(disk_dvr.DiskType.BOOT, self.instance) - mock_srch.assert_called_with( - ssp.adapter, parent=self.mock_get_tier.return_value, - name=mock_dsk_nm.return_value, lu_type=pvm_stg.LUType.DISK, - one_result=True) - - # Assert handles not finding it. - mock_srch.return_value = None - self.assertIsNone( - ssp.get_disk_ref(self.instance, disk_dvr.DiskType.BOOT)) - - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.tasks.scsi_mapper.add_map') - @mock.patch('pypowervm.tasks.partition.get_active_vioses') - def test_connect_disk(self, mock_active_vioses, mock_add_map, - mock_build_map, mock_vio_uuids): - # vio is a single-entry response. Wrap it and put it in a list - # to act as the feed for FeedTaskFx and FeedTask. - feed = [self.vio_wrap] - mock_active_vioses.return_value = feed - ft_fx = pvm_fx.FeedTaskFx(feed) - self.useFixture(ft_fx) - - # The mock return values - mock_add_map.return_value = True - mock_build_map.return_value = 'fake_map' - - # Need the driver to return the actual UUID of the VIOS in the feed, - # to match the FeedTask. - ssp = self._get_ssp_stor() - mock_vio_uuids.return_value = [self.vio_wrap.uuid] - inst = mock.Mock(uuid=fx.FAKE_INST_UUID) - - # As initialized above, remove_maps returns True to trigger update. - ssp.connect_disk(inst, mock.Mock(), stg_ftsk=None) - mock_add_map.assert_called_once_with(self.vio_wrap, 'fake_map') - self.vio_wrap.update.assert_called_once_with(timeout=mock.ANY) - - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.tasks.scsi_mapper.add_map') - @mock.patch('pypowervm.tasks.partition.get_active_vioses') - def test_connect_disk_no_update(self, mock_active_vioses, mock_add_map, - mock_build_map, mock_vio_uuids): - # vio is a single-entry response. Wrap it and put it in a list - # to act as the feed for FeedTaskFx and FeedTask. - feed = [self.vio_wrap] - mock_active_vioses.return_value = feed - ft_fx = pvm_fx.FeedTaskFx(feed) - self.useFixture(ft_fx) - - # The mock return values - mock_add_map.return_value = None - mock_build_map.return_value = 'fake_map' - - # Need the driver to return the actual UUID of the VIOS in the feed, - # to match the FeedTask. - ssp = self._get_ssp_stor() - mock_vio_uuids.return_value = [self.vio_wrap.uuid] - inst = mock.Mock(uuid=fx.FAKE_INST_UUID) - - # As initialized above, add_maps returns False to skip update. - ssp.connect_disk(inst, mock.Mock(), stg_ftsk=None) - mock_add_map.assert_called_once_with(self.vio_wrap, 'fake_map') - self.vio_wrap.update.assert_not_called() - - @mock.patch('pypowervm.tasks.storage.rm_tier_storage') - def test_delete_disks(self, mock_rm_tstor): - sspdrv = self._get_ssp_stor() - sspdrv.delete_disks(['disk1', 'disk2']) - mock_rm_tstor.assert_called_once_with(['disk1', 'disk2'], - tier=sspdrv._tier) - - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps') - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.tasks.partition.get_active_vioses') - def test_disconnect_disk(self, mock_active_vioses, mock_build_map, - mock_remove_maps, mock_find_maps, mock_vio_uuids): - # vio is a single-entry response. Wrap it and put it in a list - # to act as the feed for FeedTaskFx and FeedTask. - feed = [self.vio_wrap] - ft_fx = pvm_fx.FeedTaskFx(feed) - mock_active_vioses.return_value = feed - self.useFixture(ft_fx) - - # The mock return values - mock_build_map.return_value = 'fake_map' - - # Need the driver to return the actual UUID of the VIOS in the feed, - # to match the FeedTask. - ssp = self._get_ssp_stor() - mock_vio_uuids.return_value = [self.vio_wrap.uuid] - - # Make the LU's to remove - def mklu(udid): - lu = pvm_stg.LU.bld(None, 'lu_%s' % udid, 1) - lu._udid('27%s' % udid) - return lu - - lu1 = mklu('abc') - lu2 = mklu('def') - - def remove_resp(vios_w, client_lpar_id, match_func=None, - include_orphans=False): - return [mock.Mock(backing_storage=lu1), - mock.Mock(backing_storage=lu2)] - - mock_remove_maps.side_effect = remove_resp - mock_find_maps.side_effect = remove_resp - - # As initialized above, remove_maps returns True to trigger update. - lu_list = ssp.disconnect_disk(self.instance, stg_ftsk=None) - self.assertEqual({lu1, lu2}, set(lu_list)) - mock_remove_maps.assert_called_once_with( - self.vio_wrap, fx.FAKE_INST_UUID_PVM, match_func=mock.ANY) - self.vio_wrap.update.assert_called_once_with(timeout=mock.ANY) - - def test_shared_stg_calls(self): - - # Check the good paths - ssp_stor = self._get_ssp_stor() - data = ssp_stor.check_instance_shared_storage_local('context', 'inst') - self.assertTrue( - ssp_stor.check_instance_shared_storage_remote('context', data)) - ssp_stor.check_instance_shared_storage_cleanup('context', data) - - # Check bad paths... - # No data - self.assertFalse( - ssp_stor.check_instance_shared_storage_remote('context', None)) - # Unexpected data format - self.assertFalse( - ssp_stor.check_instance_shared_storage_remote('context', 'bad')) - # Good data, but not the same SSP uuid - not_same = {'ssp_uuid': 'uuid value not the same'} - self.assertFalse( - ssp_stor.check_instance_shared_storage_remote('context', not_same)) - - def _bld_mocks_for_instance_disk(self): - inst = mock.Mock() - inst.name = 'my-instance-name' - lpar_wrap = mock.Mock() - lpar_wrap.id = 4 - lu_wrap = mock.Mock(spec=pvm_stg.LU) - lu_wrap.configure_mock(name='boot_my_instance_name', udid='lu_udid') - smap = mock.Mock(backing_storage=lu_wrap, - server_adapter=mock.Mock(lpar_id=4)) - # Build mock VIOS Wrappers as the returns from VIOS.wrap. - # vios1 and vios2 will both have the mapping for client ID 4 and LU - # named boot_my_instance_name. - smaps = [mock.Mock(), mock.Mock(), mock.Mock(), smap] - vios1 = mock.Mock(spec=pvm_vios.VIOS) - vios1.configure_mock(name='vios1', uuid='uuid1', scsi_mappings=smaps) - vios2 = mock.Mock(spec=pvm_vios.VIOS) - vios2.configure_mock(name='vios2', uuid='uuid2', scsi_mappings=smaps) - # vios3 will not have the mapping - vios3 = mock.Mock(spec=pvm_vios.VIOS) - vios3.configure_mock(name='vios3', uuid='uuid3', - scsi_mappings=[mock.Mock(), mock.Mock()]) - return inst, lpar_wrap, vios1, vios2, vios3 - - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper') - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') - def test_get_bootdisk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids): - inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk() - mock_lw.return_value = lpar_wrap - mock_vio_uuids.return_value = [1, 2] - ssp_stor = self._get_ssp_stor() - - # Test with two VIOSes, both of which contain the mapping. Force the - # method to get the lpar_wrap. - mock_vio_get.side_effect = [vio1, vio2] - idi = ssp_stor._get_bootdisk_iter(inst) - lu, vios = next(idi) - self.assertEqual('lu_udid', lu.udid) - self.assertEqual('vios1', vios.name) - mock_vio_get.assert_called_once_with(self.apt, uuid=1, - xag=[const.XAG.VIO_SMAP]) - lu, vios = next(idi) - self.assertEqual('lu_udid', lu.udid) - self.assertEqual('vios2', vios.name) - mock_vio_get.assert_called_with(self.apt, uuid=2, - xag=[const.XAG.VIO_SMAP]) - self.assertRaises(StopIteration, next, idi) - self.assertEqual(2, mock_vio_get.call_count) - mock_lw.assert_called_once_with(self.apt, inst) - - # Same, but prove that breaking out of the loop early avoids the second - # get call. - mock_vio_get.reset_mock() - mock_lw.reset_mock() - mock_vio_get.side_effect = [vio1, vio2] - for lu, vios in ssp_stor._get_bootdisk_iter(inst): - self.assertEqual('lu_udid', lu.udid) - self.assertEqual('vios1', vios.name) - break - mock_vio_get.assert_called_once_with(self.apt, uuid=1, - xag=[const.XAG.VIO_SMAP]) - - # Now the first VIOS doesn't have the mapping, but the second does - mock_vio_get.reset_mock() - mock_vio_get.side_effect = [vio3, vio2] - idi = ssp_stor._get_bootdisk_iter(inst) - lu, vios = next(idi) - self.assertEqual('lu_udid', lu.udid) - self.assertEqual('vios2', vios.name) - mock_vio_get.assert_has_calls( - [mock.call(self.apt, uuid=uuid, xag=[const.XAG.VIO_SMAP]) - for uuid in (1, 2)]) - self.assertRaises(StopIteration, next, idi) - self.assertEqual(2, mock_vio_get.call_count) - - # No hits - mock_vio_get.reset_mock() - mock_vio_get.side_effect = [vio3, vio3] - self.assertEqual([], list(ssp_stor._get_bootdisk_iter(inst))) - self.assertEqual(2, mock_vio_get.call_count) - - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper') - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') - @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping') - def test_connect_instance_disk_to_mgmt(self, mock_add, mock_vio_get, - mock_lw, mock_vio_uuids): - inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk() - mock_lw.return_value = lpar_wrap - mock_vio_uuids.return_value = [1, 2] - ssp_stor = self._get_ssp_stor() - - # Test with two VIOSes, both of which contain the mapping - mock_vio_get.side_effect = [vio1, vio2] - lu, vios = ssp_stor.connect_instance_disk_to_mgmt(inst) - self.assertEqual('lu_udid', lu.udid) - # Should hit on the first VIOS - self.assertIs(vio1, vios) - mock_add.assert_called_once_with(self.host_uuid, vio1, 'mp_uuid', lu) - - # Now the first VIOS doesn't have the mapping, but the second does - mock_add.reset_mock() - mock_vio_get.side_effect = [vio3, vio2] - lu, vios = ssp_stor.connect_instance_disk_to_mgmt(inst) - self.assertEqual('lu_udid', lu.udid) - # Should hit on the second VIOS - self.assertIs(vio2, vios) - self.assertEqual(1, mock_add.call_count) - mock_add.assert_called_once_with(self.host_uuid, vio2, 'mp_uuid', lu) - - # No hits - mock_add.reset_mock() - mock_vio_get.side_effect = [vio3, vio3] - self.assertRaises(npvmex.InstanceDiskMappingFailed, - ssp_stor.connect_instance_disk_to_mgmt, inst) - mock_add.assert_not_called() - - # First add_vscsi_mapping call raises - mock_vio_get.side_effect = [vio1, vio2] - mock_add.side_effect = [Exception("mapping failed"), None] - # Should hit on the second VIOS - self.assertIs(vio2, vios) - - @mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping') - def test_disconnect_disk_from_mgmt(self, mock_rm_lu_map): - ssp_stor = self._get_ssp_stor() - ssp_stor.disconnect_disk_from_mgmt('vios_uuid', 'disk_name') - mock_rm_lu_map.assert_called_with(ssp_stor.adapter, 'vios_uuid', - 'mp_uuid', disk_names=['disk_name']) diff --git a/nova_powervm/tests/virt/powervm/fixtures.py b/nova_powervm/tests/virt/powervm/fixtures.py deleted file mode 100644 index 0e690c98..00000000 --- a/nova_powervm/tests/virt/powervm/fixtures.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2015, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import absolute_import - -import fixtures -import mock - -from nova.virt.powervm_ext import driver - -from nova.virt import fake -from pypowervm.tests import test_fixtures as pvm_fx - -FAKE_INST_UUID = 'b6513403-fd7f-4ad0-ab27-f73bacbd3929' -FAKE_INST_UUID_PVM = '36513403-FD7F-4AD0-AB27-F73BACBD3929' - - -class ImageAPI(fixtures.Fixture): - """Mock out the Glance API.""" - - def setUp(self): - super(ImageAPI, self).setUp() - self.img_api_fx = self.useFixture(fixtures.MockPatch('nova.image.API')) - - -class DiskAdapter(fixtures.Fixture): - """Mock out the DiskAdapter.""" - - def setUp(self): - super(DiskAdapter, self).setUp() - self.std_disk_adpt_fx = self.useFixture( - fixtures.MockPatch('nova_powervm.virt.powervm.disk.localdisk.' - 'LocalStorage')) - self.std_disk_adpt = self.std_disk_adpt_fx.mock - - -class HostCPUMetricCache(fixtures.Fixture): - """Mock out the HostCPUMetricCache.""" - - def setUp(self): - super(HostCPUMetricCache, self).setUp() - self.host_cpu_stats = self.useFixture( - fixtures.MockPatch('pypowervm.tasks.monitor.host_cpu.' - 'HostCPUMetricCache')) - - -class ComprehensiveScrub(fixtures.Fixture): - """Mock out the ComprehensiveScrub.""" - - def setUp(self): - super(ComprehensiveScrub, self).setUp() - self.mock_comp_scrub = self.useFixture( - fixtures.MockPatch('pypowervm.tasks.storage.ComprehensiveScrub')) - - -class VolumeAdapter(fixtures.Fixture): - """Mock out the VolumeAdapter.""" - - def __init__(self, patch_class): - self.patch_class = patch_class - - def setUp(self): - super(VolumeAdapter, self).setUp() - self.std_vol_adpt_fx = self.useFixture( - fixtures.MockPatch(self.patch_class, __name__='MockVolumeAdapter')) - self.std_vol_adpt = self.std_vol_adpt_fx.mock - # We want to mock out the connection_info individually so it gives - # back a new mock on every call. That's because the vol id is - # used for task names and we can't have duplicates. Here we have - # just one mock for simplicity of the vol driver but we need - # multiple names. - self.std_vol_adpt.return_value.connection_info.__getitem__\ - .side_effect = mock.MagicMock - self.drv = self.std_vol_adpt.return_value - - -class PowerVMComputeDriver(fixtures.Fixture): - """Construct a fake compute driver.""" - - @mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage') - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._get_adapter') - @mock.patch('pypowervm.tasks.partition.get_this_partition') - @mock.patch('pypowervm.tasks.cna.find_orphaned_trunks') - def _init_host(self, *args): - - self.mock_sys = self.useFixture(fixtures.MockPatch( - 'pypowervm.wrappers.managed_system.System.get')).mock - self.mock_sys.return_value = [mock.Mock( - uuid='host_uuid', - system_name='Server-8247-21L-SN9999999', - proc_compat_modes=('default', 'POWER7', 'POWER8'), - migration_data={'active_migrations_supported': 16, - 'active_migrations_in_progress': 0})] - - # Mock active vios - self.get_active_vios = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.partition.get_active_vioses')).mock - self.get_active_vios.return_value = ['mock_vios'] - - self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.partition.validate_vios_ready')) - - self.drv.session = self.drv.adapter.session - self.drv.init_host('FakeHost') - - def setUp(self): - super(PowerVMComputeDriver, self).setUp() - - # Set up the mock CPU stats (init_host uses it) - self.useFixture(HostCPUMetricCache()) - - self.scrubber = ComprehensiveScrub() - self.useFixture(self.scrubber) - - self.drv = driver.PowerVMDriver(fake.FakeVirtAPI()) - self.drv.adapter = self.useFixture(pvm_fx.AdapterFx()).adpt - self._init_host() - self.drv.image_api = mock.Mock() - - disk_adpt_fx = self.useFixture(DiskAdapter()) - self.drv.disk_dvr = disk_adpt_fx.std_disk_adpt - - def cleanUp(self): - self.scrubber.mock_comp_scrub.mock.assert_called_once() - super(PowerVMComputeDriver, self).cleanUp() - - -class TaskFlow(fixtures.Fixture): - """Construct a fake TaskFlow. - - This fixture makes it easy to check if tasks were added to a task flow - without having to mock each task. - """ - - def __init__(self, linear_flow='taskflow.patterns.linear_flow', - engines='taskflow.engines'): - """Create the fixture. - - :param linear_flow: The import path to patch for the linear flow. - :param engines: The import path to patch for the engines. - """ - super(TaskFlow, self).__init__() - self.linear_flow_import = linear_flow - self.engines_import = engines - - def setUp(self): - super(TaskFlow, self).setUp() - self.tasks_added = [] - self.lf_fix = self.useFixture( - fixtures.MockPatch(self.linear_flow_import)) - self.lf_fix.mock.Flow.return_value.add.side_effect = self._record_tasks - - self.engine_fx = self.useFixture( - fixtures.MockPatch(self.engines_import)) - - def _record_tasks(self, *args, **kwargs): - self.tasks_added.append(args[0]) - - def assert_tasks_added(self, testcase, expected_tasks): - # Ensure the lists are the same size. - testcase.assertEqual(len(expected_tasks), len(self.tasks_added), - 'Expected tasks not added: %s, %s' % - (expected_tasks, - [t.name for t in self.tasks_added])) - - def compare_tasks(expected, observed): - if expected.endswith('*'): - cmplen = len(expected[:-1]) - testcase.assertEqual(expected[:cmplen], observed.name[:cmplen]) - else: - testcase.assertEqual(expected, observed.name) - - # Compare the list of expected against added. - map(compare_tasks, expected_tasks, self.tasks_added) - - -class DriverTaskFlow(TaskFlow): - """Specific TaskFlow fixture for the main compute driver.""" - def __init__(self): - super(DriverTaskFlow, self).__init__( - linear_flow='nova_powervm.virt.powervm.driver.tf_lf', - engines='nova_powervm.virt.powervm.tasks.base.tf_eng') diff --git a/nova_powervm/tests/virt/powervm/nvram/__init__.py b/nova_powervm/tests/virt/powervm/nvram/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/tests/virt/powervm/nvram/fake_api.py b/nova_powervm/tests/virt/powervm/nvram/fake_api.py deleted file mode 100644 index 066d1ddc..00000000 --- a/nova_powervm/tests/virt/powervm/nvram/fake_api.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova_powervm.virt.powervm.nvram import api - - -class NoopNvramStore(api.NvramStore): - - def store(self, instance, data, force=True): - """Store the NVRAM into the storage service. - - :param instance: The nova instance object OR instance UUID. - :param data: the NVRAM data base64 encoded string - :param force: boolean whether an update should always be saved, - otherwise, check to see if it's changed. - """ - pass - - def fetch(self, instance): - """Fetch the NVRAM from the storage service. - - :param instance: The nova instance object OR instance UUID. - :returns: the NVRAM data base64 encoded string - """ - return None - - def delete(self, instance): - """Delete the NVRAM from the storage service. - - :param instance: The nova instance object OR instance UUID. - """ - pass - - -class ExpNvramStore(NoopNvramStore): - - def fetch(self, instance): - """Fetch the NVRAM from the storage service. - - :param instance: The nova instance object OR instance UUID. - :returns: the NVRAM data base64 encoded string - """ - # Raise exception. This is to ensure fetch causes a failure - # when an exception is raised - raise Exception('Error') - - def delete(self, instance): - """Delete the NVRAM from the storage service. - - :param instance: The nova instance object OR instance UUID. - """ - # Raise exception. This is to ensure delete does not fail - # despite an exception being raised - raise Exception('Error') diff --git a/nova_powervm/tests/virt/powervm/nvram/test_manager.py b/nova_powervm/tests/virt/powervm/nvram/test_manager.py deleted file mode 100644 index 8ae9fe0b..00000000 --- a/nova_powervm/tests/virt/powervm/nvram/test_manager.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2016, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock -from nova import test -from pypowervm import exceptions as pvm_exc -import time - -from nova_powervm.tests.virt import powervm -from nova_powervm.tests.virt.powervm.nvram import fake_api -from nova_powervm.virt.powervm.nvram import api -from nova_powervm.virt.powervm.nvram import manager -from nova_powervm.virt.powervm import vm - - -class TestNvramManager(test.NoDBTestCase): - def setUp(self): - super(TestNvramManager, self).setUp() - self.fake_store = fake_api.NoopNvramStore() - self.fake_exp_store = fake_api.ExpNvramStore() - self.mock_store = self.useFixture( - fixtures.MockPatchObject(self.fake_store, 'store')).mock - self.mock_fetch = self.useFixture( - fixtures.MockPatchObject(self.fake_store, 'fetch')).mock - self.mock_remove = self.useFixture( - fixtures.MockPatchObject(self.fake_store, 'delete')).mock - self.mock_exp_remove = self.useFixture( - fixtures.MockPatchObject(self.fake_exp_store, 'delete')).mock - - @mock.patch('nova_powervm.virt.powervm.nvram.manager.LOG.exception', - autospec=True) - @mock.patch.object(vm, 'get_instance_wrapper', autospec=True) - def test_store_with_exception(self, mock_get_inst, mock_log): - mock_get_inst.side_effect = pvm_exc.HttpError(mock.Mock()) - mgr = manager.NvramManager(self.fake_store, mock.Mock(), mock.Mock()) - mgr.store(powervm.TEST_INST1.uuid) - self.assertEqual(1, mock_log.call_count) - - @mock.patch('nova_powervm.virt.powervm.nvram.manager.LOG.warning', - autospec=True) - @mock.patch.object(vm, 'get_instance_wrapper', autospec=True) - def test_store_with_not_found_exc(self, mock_get_inst, mock_log): - mock_get_inst.side_effect = pvm_exc.HttpNotFound(mock.Mock()) - mgr = manager.NvramManager(self.fake_store, mock.Mock(), mock.Mock()) - mgr.store(powervm.TEST_INST1.uuid) - self.assertEqual(0, mock_log.call_count) - - @mock.patch.object(vm, 'get_instance_wrapper', autospec=True) - def test_manager(self, mock_get_inst): - - mgr = manager.NvramManager(self.fake_store, mock.Mock(), mock.Mock()) - mgr.store(powervm.TEST_INST1.uuid) - mgr.store(powervm.TEST_INST2) - - mgr.fetch(powervm.TEST_INST2) - mgr.fetch(powervm.TEST_INST2.uuid) - mgr.remove(powervm.TEST_INST2) - - # Simulate a quick repeated stores of the same LPAR by poking the Q. - mgr._queue.put(powervm.TEST_INST1) - mgr._queue.put(powervm.TEST_INST1) - mgr._queue.put(powervm.TEST_INST2) - time.sleep(0) - - mgr.shutdown() - self.mock_store.assert_has_calls( - [mock.call(powervm.TEST_INST1.uuid, mock.ANY), - mock.call(powervm.TEST_INST2.uuid, mock.ANY)]) - self.mock_fetch.assert_has_calls( - [mock.call(powervm.TEST_INST2.uuid)] * 2) - self.mock_remove.assert_called_once_with(powervm.TEST_INST2.uuid) - - self.mock_remove.reset_mock() - - # Test when fetch returns an exception - mgr_exp = manager.NvramManager(self.fake_exp_store, - mock.Mock(), mock.Mock()) - self.assertRaises(api.NVRAMDownloadException, - mgr_exp.fetch, powervm.TEST_INST2) - - # Test exception being logged but not raised during remove - mgr_exp.remove(powervm.TEST_INST2.uuid) - self.mock_exp_remove.assert_called_once_with(powervm.TEST_INST2.uuid) diff --git a/nova_powervm/tests/virt/powervm/nvram/test_swift.py b/nova_powervm/tests/virt/powervm/nvram/test_swift.py deleted file mode 100644 index b340426f..00000000 --- a/nova_powervm/tests/virt/powervm/nvram/test_swift.py +++ /dev/null @@ -1,319 +0,0 @@ -# Copyright 2016, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from nova import test -from requests.exceptions import RequestException -from swiftclient import exceptions as swft_exc -from swiftclient import service as swft_srv - -from nova_powervm.tests.virt import powervm -from nova_powervm.virt.powervm.nvram import api -from nova_powervm.virt.powervm.nvram import swift - - -class TestSwiftStore(test.NoDBTestCase): - - def setUp(self): - super(TestSwiftStore, self).setUp() - self.flags(swift_password='secret', swift_auth_url='url', - group='powervm') - - self.swift_store = swift.SwiftNvramStore() - - def test_run_operation(self): - - fake_result = [{'key1': 'value1'}, {'2key1', '2value1'}] - fake_result2 = fake_result[0] - - def fake_generator(alist): - for item in alist: - yield item - - # Address the 'list' method that should be called. - list_op = mock.Mock() - self.swift_store.swift_service = mock.Mock(list=list_op) - - # Setup expected results - list_op.return_value = fake_generator(fake_result) - results = self.swift_store._run_operation('list', 1, x=2) - - list_op.assert_called_once_with(1, x=2) - # Returns a copy of the results - self.assertEqual(results, fake_result) - self.assertNotEqual(id(results), id(fake_result)) - - # Try a single result - Setup expected results - list_op.reset_mock() - list_op.return_value = fake_result2 - results = self.swift_store._run_operation('list', 3, x=4) - - list_op.assert_called_once_with(3, x=4) - # Returns the actual result - self.assertEqual(results, fake_result2) - self.assertEqual(id(results), id(fake_result2)) - - # Should raise any swift errors encountered - list_op.side_effect = swft_srv.SwiftError('Error message.') - self.assertRaises(swft_srv.SwiftError, self.swift_store._run_operation, - 'list', 3, x=4) - - def _build_results(self, names): - listing = [{'name': name} for name in names] - return [{'success': True, 'listing': listing}] - - def test_get_name_from_listing(self): - names = self.swift_store._get_name_from_listing( - self._build_results(['snoopy'])) - self.assertEqual(['snoopy'], names) - - def test_get_container_names(self): - with mock.patch.object(self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['container']) - names = self.swift_store._get_container_names() - self.assertEqual(['container'], names) - mock_run.assert_called_once_with('list', - options={'long': True}) - - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_get_container_names', autospec=True) - def test_get_object_names(self, mock_container_names): - with mock.patch.object(self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['obj', 'obj2']) - - # First run, no containers. - mock_container_names.return_value = [] - names = self.swift_store._get_object_names('powervm_nvram') - self.assertEqual([], names) - self.assertEqual(1, mock_container_names.call_count) - - # Test without a prefix - mock_container_names.return_value = ['powervm_nvram'] - names = self.swift_store._get_object_names('powervm_nvram') - self.assertEqual(['obj', 'obj2'], names) - mock_run.assert_called_once_with( - 'list', container='powervm_nvram', - options={'long': True, 'prefix': None}) - self.assertEqual(mock_container_names.call_count, 2) - - # Test with a prefix - names = self.swift_store._get_object_names('powervm_nvram', - prefix='obj') - self.assertEqual(['obj', 'obj2'], names) - mock_run.assert_called_with( - 'list', container='powervm_nvram', - options={'long': True, 'prefix': 'obj'}) - - # Second run should not increment the call count here - self.assertEqual(mock_container_names.call_count, 2) - - @mock.patch('swiftclient.service.SwiftUploadObject', autospec=True) - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_exists', autospec=True) - def test_underscore_store(self, mock_exists, mock_swiftuploadobj): - mock_exists.return_value = True - with mock.patch.object(self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['obj']) - self.swift_store._store(powervm.TEST_INST1.uuid, 'data') - mock_run.assert_called_once_with('upload', 'powervm_nvram', - mock.ANY, options=None) - - # Test unsuccessful upload - mock_result = [{'success': False, - 'error': RequestException('Error Message.')}] - mock_run.return_value = mock_result - self.assertRaises(api.NVRAMUploadException, - self.swift_store._store, powervm.TEST_INST1.uuid, - 'data') - - # Test retry upload - mock_run.reset_mock() - mock_swiftuploadobj.reset_mock() - mock_res_obj = {'success': False, - 'error': swft_exc. - ClientException('Error Message.'), - 'object': '6ecb1386-53ab-43da-9e04-54e986ad4a9d'} - mock_run.side_effect = [mock_res_obj, - self._build_results(['obj'])] - self.swift_store._store(powervm.TEST_INST1.uuid, 'data') - mock_run.assert_called_with('upload', 'powervm_nvram', - mock.ANY, options=None) - self.assertEqual(mock_run.call_count, 2) - self.assertEqual(mock_swiftuploadobj.call_count, 2) - - @mock.patch('swiftclient.service.SwiftUploadObject', autospec=True) - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_exists', autospec=True) - def test_underscore_store_not_exists(self, mock_exists, - mock_swiftuploadobj): - mock_exists.return_value = False - with mock.patch.object(self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['obj']) - self.swift_store._store(powervm.TEST_INST1.uuid, 'data') - mock_run.assert_called_once_with( - 'upload', 'powervm_nvram', mock.ANY, - options={'leave_segments': True}) - - # Test retry upload - mock_run.reset_mock() - mock_swiftuploadobj.reset_mock() - mock_res_obj = {'success': False, - 'error': swft_exc. - ClientException('Error Message.'), - 'object': '6ecb1386-53ab-43da-9e04-54e986ad4a9d'} - mock_run.side_effect = [mock_res_obj, - self._build_results(['obj'])] - self.swift_store._store(powervm.TEST_INST1.uuid, 'data') - mock_run.assert_called_with('upload', 'powervm_nvram', mock.ANY, - options={'leave_segments': True}) - self.assertEqual(mock_run.call_count, 2) - self.assertEqual(mock_swiftuploadobj.call_count, 2) - - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_exists', autospec=True) - def test_store(self, mock_exists): - # Test forcing a update - with mock.patch.object(self.swift_store, '_store') as mock_store: - mock_exists.return_value = False - self.swift_store.store(powervm.TEST_INST1.uuid, 'data', force=True) - mock_store.assert_called_once_with(powervm.TEST_INST1.uuid, - 'data', exists=False) - - with mock.patch.object( - self.swift_store, '_store') as mock_store, mock.patch.object( - self.swift_store, '_run_operation') as mock_run: - - mock_exists.return_value = True - data_md5_hash = '8d777f385d3dfec8815d20f7496026dc' - results = self._build_results(['obj']) - results[0]['headers'] = {'etag': data_md5_hash} - mock_run.return_value = results - self.swift_store.store(powervm.TEST_INST1.uuid, 'data', - force=False) - self.assertFalse(mock_store.called) - mock_run.assert_called_once_with( - 'stat', options={'long': True}, - container='powervm_nvram', objects=[powervm.TEST_INST1.uuid]) - - def test_store_slot_map(self): - # Test forcing a update - with mock.patch.object(self.swift_store, '_store') as mock_store: - self.swift_store.store_slot_map("test_slot", 'data') - mock_store.assert_called_once_with( - 'test_slot', 'data') - - @mock.patch('os.remove', autospec=True) - @mock.patch('tempfile.NamedTemporaryFile', autospec=True) - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_exists', autospec=True) - def test_fetch(self, mock_exists, mock_tmpf, mock_rmv): - mock_exists.return_value = True - with mock.patch('nova_powervm.virt.powervm.nvram.swift.open', - mock.mock_open(read_data='data to read') - ) as m_open, mock.patch.object( - self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['obj']) - mock_tmpf.return_value.__enter__.return_value.name = 'fname' - - data = self.swift_store.fetch(powervm.TEST_INST1) - self.assertEqual('data to read', data) - mock_rmv.assert_called_once_with(m_open.return_value.name) - - # Bad result from the download - mock_run.return_value[0]['success'] = False - self.assertRaises(api.NVRAMDownloadException, - self.swift_store.fetch, powervm.TEST_INST1) - - @mock.patch('os.remove', autospec=True) - @mock.patch('tempfile.NamedTemporaryFile', autospec=True) - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_exists', autospec=True) - def test_fetch_slot_map(self, mock_exists, mock_tmpf, mock_rmv): - mock_exists.return_value = True - with mock.patch('nova_powervm.virt.powervm.nvram.swift.open', - mock.mock_open(read_data='data to read') - ) as m_open, mock.patch.object( - self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['obj']) - mock_tmpf.return_value.__enter__.return_value.name = 'fname' - - data = self.swift_store.fetch_slot_map("test_slot") - self.assertEqual('data to read', data) - mock_rmv.assert_called_once_with(m_open.return_value.name) - - @mock.patch('os.remove', autospec=True) - @mock.patch('tempfile.NamedTemporaryFile', autospec=True) - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_exists', autospec=True) - def test_fetch_slot_map_no_exist(self, mock_exists, mock_tmpf, mock_rmv): - mock_exists.return_value = False - data = self.swift_store.fetch_slot_map("test_slot") - self.assertIsNone(data) - - # Make sure the remove (part of the finally block) is never called. - # Should not get that far. - self.assertFalse(mock_rmv.called) - - def test_delete(self): - with mock.patch.object(self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['obj']) - self.swift_store.delete(powervm.TEST_INST1) - mock_run.assert_called_once_with('delete', - container='powervm_nvram', - objects=[powervm.TEST_INST1.uuid]) - - # Bad result from the operation - mock_run.return_value[0]['success'] = False - self.assertRaises(api.NVRAMDeleteException, - self.swift_store.delete, powervm.TEST_INST1) - - def test_delete_slot_map(self): - with mock.patch.object(self.swift_store, '_run_operation') as mock_run: - mock_run.return_value = self._build_results(['obj']) - self.swift_store.delete_slot_map('test_slot') - mock_run.assert_called_once_with( - 'delete', container='powervm_nvram', objects=['test_slot']) - - # Bad result from the operation - mock_run.return_value[0]['success'] = False - self.assertRaises( - api.NVRAMDeleteException, self.swift_store.delete_slot_map, - 'test_slot') - - @mock.patch('nova_powervm.virt.powervm.nvram.swift.SwiftNvramStore.' - '_get_object_names', autospec=True) - def test_exists(self, mock_get_obj_names): - # Test where there are elements in here - mock_get_obj_names.return_value = ['obj', 'obj1', 'obj2'] - self.assertTrue(self.swift_store._exists('obj')) - - # Test where there are objects that start with the prefix, but aren't - # actually there themselves - mock_get_obj_names.return_value = ['obj1', 'obj2'] - self.assertFalse(self.swift_store._exists('obj')) - - def test_optional_options(self): - """Test optional config values.""" - # Not in the sparse one from setUp() - self.assertIsNone(self.swift_store.options['os_cacert']) - self.assertIsNone(self.swift_store.options['os_endpoint_type']) - # Create a new one with the optional values set - self.flags(swift_cacert='/path/to/ca.pem', group='powervm') - self.flags(swift_endpoint_type='internalURL', group='powervm') - swift_store = swift.SwiftNvramStore() - self.assertEqual('/path/to/ca.pem', swift_store.options['os_cacert']) - self.assertEqual('internalURL', - swift_store.options['os_endpoint_type']) diff --git a/nova_powervm/tests/virt/powervm/tasks/__init__.py b/nova_powervm/tests/virt/powervm/tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/tests/virt/powervm/tasks/test_image.py b/nova_powervm/tests/virt/powervm/tasks/test_image.py deleted file mode 100644 index 878c6c78..00000000 --- a/nova_powervm/tests/virt/powervm/tasks/test_image.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova import test - -from nova_powervm.virt.powervm.tasks import image as tsk_img - - -class TestImage(test.NoDBTestCase): - def test_update_task_state(self): - def func(task_state, expected_state='delirious'): - self.assertEqual('task_state', task_state) - self.assertEqual('delirious', expected_state) - tf = tsk_img.UpdateTaskState(func, 'task_state') - self.assertEqual('update_task_state_task_state', tf.name) - tf.execute() - - def func2(task_state, expected_state=None): - self.assertEqual('task_state', task_state) - self.assertEqual('expected_state', expected_state) - tf = tsk_img.UpdateTaskState(func2, 'task_state', - expected_state='expected_state') - tf.execute() - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tsk_img.UpdateTaskState(func, 'task_state') - tf.assert_called_once_with(name='update_task_state_task_state') - - @mock.patch('nova_powervm.virt.powervm.image.stream_blockdev_to_glance', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.image.generate_snapshot_metadata', - autospec=True) - def test_stream_to_glance(self, mock_metadata, mock_stream): - mock_metadata.return_value = 'metadata' - mock_inst = mock.Mock() - mock_inst.name = 'instance_name' - tf = tsk_img.StreamToGlance('context', 'image_api', 'image_id', - mock_inst) - self.assertEqual('stream_to_glance', tf.name) - tf.execute('disk_path') - mock_metadata.assert_called_with('context', 'image_api', 'image_id', - mock_inst) - mock_stream.assert_called_with('context', 'image_api', 'image_id', - 'metadata', 'disk_path') - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tsk_img.StreamToGlance('context', 'image_api', 'image_id', - mock_inst) - tf.assert_called_once_with(name='stream_to_glance', - requires='disk_path') diff --git a/nova_powervm/tests/virt/powervm/tasks/test_network.py b/nova_powervm/tests/virt/powervm/tasks/test_network.py deleted file mode 100644 index c04e8aa2..00000000 --- a/nova_powervm/tests/virt/powervm/tasks/test_network.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import eventlet -import mock - -from nova import exception -from nova import objects -from nova import test -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import iocard as pvm_card -from pypowervm.wrappers import network as pvm_net - -from nova_powervm.tests.virt import powervm -from nova_powervm.virt.powervm.tasks import network as tf_net - - -def cna(mac): - """Builds a mock Client Network Adapter (or VNIC) for unit tests.""" - nic = mock.MagicMock() - nic.mac = mac - nic.vswitch_uri = 'fake_href' - return nic - - -class TestNetwork(test.NoDBTestCase): - def setUp(self): - super(TestNetwork, self).setUp() - self.flags(host='host1') - self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt - - self.mock_lpar_wrap = mock.MagicMock() - self.mock_lpar_wrap.can_modify_io.return_value = True, None - - @mock.patch('nova_powervm.virt.powervm.vif.unplug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_unplug_vifs(self, mock_vm_get, mock_unplug): - """Tests that a delete of the vif can be done.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Mock up the CNA responses. - cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')] - mock_vm_get.return_value = cnas - - # Mock up the network info. This also validates that they will be - # sanitized to upper case. - net_info = [ - {'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'}, - {'address': 'aa:bb:cc:dd:ee:33'} - ] - - # Mock out the vif driver - def validate_unplug(adapter, host_uuid, instance, vif, - slot_mgr, cna_w_list=None): - self.assertEqual(adapter, self.apt) - self.assertEqual('host_uuid', host_uuid) - self.assertEqual(instance, inst) - self.assertIn(vif, net_info) - self.assertEqual('slot_mgr', slot_mgr) - self.assertEqual(cna_w_list, cnas) - - mock_unplug.side_effect = validate_unplug - - # Run method - p_vifs = tf_net.UnplugVifs(self.apt, inst, net_info, 'host_uuid', - 'slot_mgr') - p_vifs.execute(self.mock_lpar_wrap) - - # Make sure the unplug was invoked, so that we know that the validation - # code was called - self.assertEqual(3, mock_unplug.call_count) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_net.UnplugVifs(self.apt, inst, net_info, 'host_uuid', - 'slot_mgr') - tf.assert_called_once_with(name='unplug_vifs', requires=['lpar_wrap']) - - def test_unplug_vifs_invalid_state(self): - """Tests that the delete raises an exception if bad VM state.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Mock that the state is incorrect - self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad' - - # Run method - p_vifs = tf_net.UnplugVifs(self.apt, inst, mock.Mock(), 'host_uuid', - 'slot_mgr') - self.assertRaises(exception.VirtualInterfaceUnplugException, - p_vifs.execute, self.mock_lpar_wrap) - - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vnics', autospec=True) - def test_plug_vifs_rmc(self, mock_vnic_get, mock_cna_get, mock_plug): - """Tests that a crt vif can be done with secure RMC.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Mock up the CNA response. One should already exist, the other - # should not. - pre_cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')] - mock_cna_get.return_value = copy.deepcopy(pre_cnas) - # Ditto VNIC response. - mock_vnic_get.return_value = [cna('AABBCCDDEE33'), cna('AABBCCDDEE44')] - - # Mock up the network info. This also validates that they will be - # sanitized to upper case. - net_info = [ - {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}, - {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'}, - {'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'direct'}, - {'address': 'aa:bb:cc:dd:ee:55', 'vnic_type': 'direct'} - ] - - # Both updates run first (one CNA, one VNIC); then the CNA create, then - # the VNIC create. - mock_new_cna = mock.Mock(spec=pvm_net.CNA) - mock_new_vnic = mock.Mock(spec=pvm_card.VNIC) - mock_plug.side_effect = ['upd_cna', 'upd_vnic', - mock_new_cna, mock_new_vnic] - - # Run method - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - - all_cnas = p_vifs.execute(self.mock_lpar_wrap) - - # new vif should be created twice. - mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[0], - 'slot_mgr', new_vif=False) - mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[1], - 'slot_mgr', new_vif=True) - mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[2], - 'slot_mgr', new_vif=False) - mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[3], - 'slot_mgr', new_vif=True) - - # The Task provides the list of original CNAs plus only CNAs that were - # created. - self.assertEqual(pre_cnas + [mock_new_cna], all_cnas) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - tf.assert_called_once_with(name='plug_vifs', provides='vm_cnas', - requires=['lpar_wrap']) - - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_plug): - """Verifies if no creates are needed, none are done.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Mock up the CNA response. Both should already exist. - mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')] - - # Mock up the network info. This also validates that they will be - # sanitized to upper case. This also validates that we don't call - # get_vnics if no nets have vnic_type 'direct'. - net_info = [ - {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}, - {'address': 'aa:bb:cc:dd:ee:11', 'vnic_type': 'normal'} - ] - - # Run method - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - p_vifs.execute(self.mock_lpar_wrap) - - # The create should have been called with new_vif as False. - mock_plug.assert_called_with( - self.apt, 'host_uuid', inst, net_info[1], - 'slot_mgr', new_vif=False) - - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_plug_vifs_invalid_state(self, mock_vm_get, mock_plug): - """Tests that a crt_vif fails when the LPAR state is bad.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Mock up the CNA response. Only doing one for simplicity - mock_vm_get.return_value = [] - net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}] - - # Mock that the state is incorrect - self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad' - - # Run method - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - self.assertRaises(exception.VirtualInterfaceCreateException, - p_vifs.execute, self.mock_lpar_wrap) - - # The create should not have been invoked - self.assertEqual(0, mock_plug.call_count) - - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_plug_vifs_timeout(self, mock_vm_get, mock_plug): - """Tests that crt vif failure via loss of neutron callback.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Mock up the CNA response. Only doing one for simplicity - mock_vm_get.return_value = [cna('AABBCCDDEE11')] - - # Mock up the network info. - net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}] - - # Ensure that an exception is raised by a timeout. - mock_plug.side_effect = eventlet.timeout.Timeout() - - # Run method - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - self.assertRaises(exception.VirtualInterfaceCreateException, - p_vifs.execute, self.mock_lpar_wrap) - - # The create should have only been called once. - self.assertEqual(1, mock_plug.call_count) - - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_plug_vifs_diff_host(self, mock_vm_get, mock_plug): - """Tests that crt vif handles bad inst.host value.""" - inst = powervm.TEST_INST1 - - # Set this up as a different host from the inst.host - self.flags(host='host2') - - # Mock up the CNA response. Only doing one for simplicity - mock_vm_get.return_value = [cna('AABBCCDDEE11')] - - # Mock up the network info. - net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}] - - # Run method - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - with mock.patch.object(inst, 'save') as mock_inst_save: - p_vifs.execute(self.mock_lpar_wrap) - - # The create should have only been called once. - self.assertEqual(1, mock_plug.call_count) - # Should have called save to save the new host and then changed it back - self.assertEqual(2, mock_inst_save.call_count) - self.assertEqual('host1', inst.host) - - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_plug_vifs_diff_host_except(self, mock_vm_get, mock_plug): - """Tests that crt vif handles bad inst.host value. - - This test ensures that if we get a timeout exception we still reset - the inst.host value back to the original value - """ - inst = powervm.TEST_INST1 - - # Set this up as a different host from the inst.host - self.flags(host='host2') - - # Mock up the CNA response. Only doing one for simplicity - mock_vm_get.return_value = [cna('AABBCCDDEE11')] - - # Mock up the network info. - net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}] - - # Ensure that an exception is raised by a timeout. - mock_plug.side_effect = eventlet.timeout.Timeout() - - # Run method - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - with mock.patch.object(inst, 'save') as mock_inst_save: - self.assertRaises(exception.VirtualInterfaceCreateException, - p_vifs.execute, self.mock_lpar_wrap) - - # The create should have only been called once. - self.assertEqual(1, mock_plug.call_count) - # Should have called save to save the new host and then changed it back - self.assertEqual(2, mock_inst_save.call_count) - self.assertEqual('host1', inst.host) - - @mock.patch('nova_powervm.virt.powervm.vif.unplug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_plug_vifs_revert(self, mock_vm_get, mock_plug, mock_unplug): - """Tests that the revert flow works properly.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Fake CNA list. The one pre-existing VIF should *not* get reverted. - cna_list = [cna('AABBCCDDEEFF'), cna('FFEEDDCCBBAA')] - mock_vm_get.return_value = cna_list - - # Mock up the network info. Three roll backs. - net_info = [ - {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}, - {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'}, - {'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'normal'} - ] - - # Make sure we test raising an exception - mock_unplug.side_effect = [exception.NovaException(), None] - - # Run method - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - p_vifs.execute(self.mock_lpar_wrap) - p_vifs.revert(self.mock_lpar_wrap, mock.Mock(), mock.Mock()) - - # The unplug should be called twice. The exception shouldn't stop the - # second call. - self.assertEqual(2, mock_unplug.call_count) - - # Make sure each call is invoked correctly. The first plug was not a - # new vif, so it should not be reverted. - c2 = mock.call(self.apt, 'host_uuid', inst, net_info[1], - 'slot_mgr', cna_w_list=cna_list) - c3 = mock.call(self.apt, 'host_uuid', inst, net_info[2], - 'slot_mgr', cna_w_list=cna_list) - mock_unplug.assert_has_calls([c2, c3]) - - @mock.patch('nova_powervm.virt.powervm.vif.plug_secure_rmc_vif', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif.get_secure_rmc_vswitch', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_plug_mgmt_vif(self, mock_vm_get, mock_plug, - mock_get_rmc_vswitch, mock_plug_rmc_vif): - """Tests that a mgmt vif can be created.""" - inst = objects.Instance(**powervm.TEST_INSTANCE) - - # Mock up the rmc vswitch - vswitch_w = mock.MagicMock() - vswitch_w.href = 'fake_mgmt_uri' - mock_get_rmc_vswitch.return_value = vswitch_w - - # Run method such that it triggers a fresh CNA search - p_vifs = tf_net.PlugMgmtVif(self.apt, inst, 'host_uuid', 'slot_mgr') - p_vifs.execute(None) - - # With the default get_cnas mock (which returns a Mock()), we think we - # found an existing management CNA. - self.assertEqual(0, mock_plug_rmc_vif.call_count) - mock_vm_get.assert_called_once_with( - self.apt, inst, vswitch_uri='fake_mgmt_uri') - - # Now mock get_cnas to return no hits - mock_vm_get.reset_mock() - mock_vm_get.return_value = [] - p_vifs.execute(None) - - # Get was called; and since it didn't have the mgmt CNA, so was plug. - self.assertEqual(1, mock_plug_rmc_vif.call_count) - mock_vm_get.assert_called_once_with( - self.apt, inst, vswitch_uri='fake_mgmt_uri') - - # Now pass CNAs, but not the mgmt vif, "from PlugVifs" - cnas = [mock.Mock(vswitch_uri='uri1'), mock.Mock(vswitch_uri='uri2')] - mock_plug_rmc_vif.reset_mock() - mock_vm_get.reset_mock() - p_vifs.execute(cnas) - - # Get wasn't called, since the CNAs were passed "from PlugVifs"; but - # since the mgmt vif wasn't included, plug was called. - self.assertEqual(0, mock_vm_get.call_count) - self.assertEqual(1, mock_plug_rmc_vif.call_count) - - # Finally, pass CNAs including the mgmt. - cnas.append(mock.Mock(vswitch_uri='fake_mgmt_uri')) - mock_plug_rmc_vif.reset_mock() - p_vifs.execute(cnas) - - # Neither get nor plug was called. - self.assertEqual(0, mock_vm_get.call_count) - self.assertEqual(0, mock_plug_rmc_vif.call_count) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_net.PlugMgmtVif(self.apt, inst, 'host_uuid', 'slot_mgr') - tf.assert_called_once_with(name='plug_mgmt_vif', provides='mgmt_cna', - requires=['vm_cnas']) - - def test_get_vif_events(self): - # Set up common mocks. - inst = objects.Instance(**powervm.TEST_INSTANCE) - net_info = [mock.MagicMock(), mock.MagicMock()] - net_info[0]['id'] = 'a' - net_info[0].get.return_value = False - net_info[1]['id'] = 'b' - net_info[1].get.return_value = True - - # Set up the runner. - p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info, - 'host_uuid', 'slot_mgr') - p_vifs.crt_network_infos = net_info - - resp = p_vifs._get_vif_events() - - # Only one should be returned since only one was active. - self.assertEqual(1, len(resp)) diff --git a/nova_powervm/tests/virt/powervm/tasks/test_slot.py b/nova_powervm/tests/virt/powervm/tasks/test_slot.py deleted file mode 100644 index 64c2c1ac..00000000 --- a/nova_powervm/tests/virt/powervm/tasks/test_slot.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2016, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova import test - -from nova_powervm.virt.powervm.tasks import slot - - -class TestSaveSlotStore(test.NoDBTestCase): - - def setUp(self): - super(TestSaveSlotStore, self).setUp() - - def test_execute(self): - slot_mgr = mock.Mock() - save = slot.SaveSlotStore(mock.MagicMock(), slot_mgr) - save.execute() - slot_mgr.save.assert_called_once_with() - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - slot.SaveSlotStore(mock.MagicMock(), slot_mgr) - tf.assert_called_once_with(name='save_slot_store') - - -class TestDeleteSlotStore(test.NoDBTestCase): - - def setUp(self): - super(TestDeleteSlotStore, self).setUp() - - def test_execute(self): - slot_mgr = mock.Mock() - delete = slot.DeleteSlotStore(mock.MagicMock(), slot_mgr) - delete.execute() - slot_mgr.delete.assert_called_once_with() - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - slot.DeleteSlotStore(mock.MagicMock(), slot_mgr) - tf.assert_called_once_with(name='delete_slot_store') diff --git a/nova_powervm/tests/virt/powervm/tasks/test_storage.py b/nova_powervm/tests/virt/powervm/tasks/test_storage.py deleted file mode 100644 index d5be7927..00000000 --- a/nova_powervm/tests/virt/powervm/tasks/test_storage.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock - -from nova import test - -from nova_powervm.virt.powervm import exception as npvmex -from nova_powervm.virt.powervm.tasks import storage as tf_stg - - -class TestStorage(test.NoDBTestCase): - - def setUp(self): - super(TestStorage, self).setUp() - - self.adapter = mock.Mock() - self.disk_dvr = mock.MagicMock() - self.mock_cfg_drv = self.useFixture(fixtures.MockPatch( - 'nova_powervm.virt.powervm.media.ConfigDrivePowerVM')).mock - self.mock_mb = self.mock_cfg_drv.return_value - self.instance = mock.MagicMock() - self.context = 'context' - - def test_create_and_connect_cfg_drive(self): - lpar_w = mock.Mock() - - # Test with no FeedTask - task = tf_stg.CreateAndConnectCfgDrive( - self.adapter, self.instance, 'injected_files', - 'network_info', 'admin_pass') - task.execute(lpar_w, 'mgmt_cna') - self.mock_cfg_drv.assert_called_once_with(self.adapter) - self.mock_mb.create_cfg_drv_vopt.assert_called_once_with( - self.instance, 'injected_files', 'network_info', lpar_w.uuid, - admin_pass='admin_pass', mgmt_cna='mgmt_cna', stg_ftsk=None) - - self.mock_cfg_drv.reset_mock() - self.mock_mb.reset_mock() - - # Normal revert - task.revert(lpar_w, 'mgmt_cna', 'result', 'flow_failures') - self.mock_mb.dlt_vopt.assert_called_once_with(lpar_w.uuid) - - self.mock_mb.reset_mock() - - # Revert when dlt_vopt fails - self.mock_mb.dlt_vopt.side_effect = Exception('fake-exc') - task.revert(lpar_w, 'mgmt_cna', 'result', 'flow_failures') - self.mock_mb.dlt_vopt.assert_called_once_with(lpar_w.uuid) - - self.mock_mb.reset_mock() - - # With a specified FeedTask - task = tf_stg.CreateAndConnectCfgDrive( - self.adapter, self.instance, 'injected_files', - 'network_info', 'admin_pass', stg_ftsk='stg_ftsk') - task.execute(lpar_w, 'mgmt_cna') - self.mock_cfg_drv.assert_called_once_with(self.adapter) - self.mock_mb.create_cfg_drv_vopt.assert_called_once_with( - self.instance, 'injected_files', 'network_info', lpar_w.uuid, - admin_pass='admin_pass', mgmt_cna='mgmt_cna', stg_ftsk='stg_ftsk') - - # Revert when media builder not created - task.mb = None - task.revert(lpar_w, 'mgmt_cna', 'result', 'flow_failures') - self.mock_mb.assert_not_called() - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.CreateAndConnectCfgDrive( - self.adapter, self.instance, 'injected_files', 'network_info', - 'admin_pass') - tf.assert_called_once_with(name='cfg_drive', requires=['lpar_wrap', - 'mgmt_cna']) - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_delete_vopt(self, mock_pvm_uuid): - # Test with no FeedTask - mock_pvm_uuid.return_value = 'pvm_uuid' - task = tf_stg.DeleteVOpt(self.adapter, self.instance) - task.execute() - self.mock_cfg_drv.assert_called_once_with(self.adapter) - self.mock_mb.dlt_vopt.assert_called_once_with( - 'pvm_uuid', stg_ftsk=None) - - self.mock_cfg_drv.reset_mock() - self.mock_mb.reset_mock() - - # With a specified FeedTask - task = tf_stg.DeleteVOpt(self.adapter, self.instance, - stg_ftsk='ftsk') - task.execute() - self.mock_cfg_drv.assert_called_once_with(self.adapter) - self.mock_mb.dlt_vopt.assert_called_once_with( - 'pvm_uuid', stg_ftsk='ftsk') - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.DeleteVOpt(self.adapter, self.instance) - tf.assert_called_once_with(name='vopt_delete') - - def test_delete_disk(self): - stor_adpt_mappings = mock.Mock() - - task = tf_stg.DeleteDisk(self.disk_dvr, self.instance) - task.execute(stor_adpt_mappings) - self.disk_dvr.delete_disks.assert_called_once_with(stor_adpt_mappings) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.DeleteDisk(self.disk_dvr, self.instance) - tf.assert_called_once_with( - name='dlt_storage', requires=['stor_adpt_mappings']) - - def test_detach_disk(self): - disk_type = 'disk_type' - stg_ftsk = mock.Mock() - - task = tf_stg.DetachDisk( - self.disk_dvr, self.instance, stg_ftsk=stg_ftsk, - disk_type=disk_type) - task.execute() - self.disk_dvr.disconnect_disk.assert_called_once_with( - self.instance, stg_ftsk=stg_ftsk, disk_type=disk_type) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.DetachDisk(self.disk_dvr, self.instance) - tf.assert_called_once_with( - name='detach_storage', provides='stor_adpt_mappings') - - def test_connect_disk(self): - stg_ftsk = mock.Mock() - disk_dev_info = mock.Mock() - - task = tf_stg.ConnectDisk( - self.disk_dvr, self.instance, stg_ftsk=stg_ftsk) - task.execute(disk_dev_info) - self.disk_dvr.connect_disk.assert_called_once_with( - self.instance, disk_dev_info, stg_ftsk=stg_ftsk) - - task.revert(disk_dev_info, 'result', 'flow failures') - self.disk_dvr.disconnect_disk.assert_called_once_with(self.instance) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.ConnectDisk(self.disk_dvr, self.instance) - tf.assert_called_once_with( - name='connect_disk', requires=['disk_dev_info']) - - def test_create_disk_for_img(self): - image_meta = mock.Mock() - image_type = mock.Mock() - - task = tf_stg.CreateDiskForImg( - self.disk_dvr, self.context, self.instance, image_meta, - image_type=image_type) - task.execute() - self.disk_dvr.create_disk_from_image.assert_called_once_with( - self.context, self.instance, image_meta, image_type=image_type) - - task.revert('result', 'flow failures') - self.disk_dvr.delete_disks.assert_called_once_with(['result']) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.CreateDiskForImg( - self.disk_dvr, self.context, self.instance, image_meta) - tf.assert_called_once_with( - name='crt_disk_from_img', provides='disk_dev_info') - - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True) - @mock.patch('nova_powervm.virt.powervm.mgmt.discover_vscsi_disk', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.mgmt.remove_block_dev', - autospec=True) - def test_instance_disk_to_mgmt(self, mock_rm, mock_discover, mock_find): - mock_discover.return_value = '/dev/disk' - mock_instance = mock.Mock() - mock_instance.name = 'instance_name' - mock_stg = mock.Mock() - mock_stg.name = 'stg_name' - mock_vwrap = mock.Mock() - mock_vwrap.name = 'vios_name' - mock_vwrap.uuid = 'vios_uuid' - mock_vwrap.scsi_mappings = ['mapping1'] - - disk_dvr = mock.MagicMock() - disk_dvr.mp_uuid = 'mp_uuid' - disk_dvr.connect_instance_disk_to_mgmt.return_value = (mock_stg, - mock_vwrap) - - def reset_mocks(): - mock_find.reset_mock() - mock_discover.reset_mock() - mock_rm.reset_mock() - disk_dvr.reset_mock() - - # Good path - find_maps returns one result - mock_find.return_value = ['one_mapping'] - tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance) - self.assertEqual('instance_disk_to_mgmt', tf.name) - self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute()) - disk_dvr.connect_instance_disk_to_mgmt.assert_called_with( - mock_instance) - mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid', - stg_elem=mock_stg) - mock_discover.assert_called_with('one_mapping') - tf.revert('result', 'failures') - disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid', - 'stg_name') - mock_rm.assert_called_with('/dev/disk') - - # Good path - find_maps returns >1 result - reset_mocks() - mock_find.return_value = ['first_mapping', 'second_mapping'] - tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance) - self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute()) - disk_dvr.connect_instance_disk_to_mgmt.assert_called_with( - mock_instance) - mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid', - stg_elem=mock_stg) - mock_discover.assert_called_with('first_mapping') - tf.revert('result', 'failures') - disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid', - 'stg_name') - mock_rm.assert_called_with('/dev/disk') - - # Management Partition is VIOS and NovaLink hosted storage - reset_mocks() - disk_dvr.vios_uuids = ['mp_uuid'] - dev_name = '/dev/vg/fake_name' - disk_dvr.get_bootdisk_path.return_value = dev_name - tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance) - self.assertEqual((None, None, dev_name), tf.execute()) - - # Management Partition is VIOS and not NovaLink hosted storage - reset_mocks() - disk_dvr.vios_uuids = ['mp_uuid'] - disk_dvr.get_bootdisk_path.return_value = None - tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance) - tf.execute() - disk_dvr.connect_instance_disk_to_mgmt.assert_called_with( - mock_instance) - - # Bad path - find_maps returns no results - reset_mocks() - mock_find.return_value = [] - tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance) - self.assertRaises(npvmex.NewMgmtMappingNotFoundException, tf.execute) - disk_dvr.connect_instance_disk_to_mgmt.assert_called_with( - mock_instance) - # find_maps was still called - mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid', - stg_elem=mock_stg) - # discover_vscsi_disk didn't get called - self.assertEqual(0, mock_discover.call_count) - tf.revert('result', 'failures') - # disconnect_disk_from_mgmt got called - disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid', - 'stg_name') - # ...but remove_block_dev did not. - self.assertEqual(0, mock_rm.call_count) - - # Bad path - connect raises - reset_mocks() - disk_dvr.connect_instance_disk_to_mgmt.side_effect = ( - npvmex.InstanceDiskMappingFailed(instance_name='inst_name')) - tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance) - self.assertRaises(npvmex.InstanceDiskMappingFailed, tf.execute) - disk_dvr.connect_instance_disk_to_mgmt.assert_called_with( - mock_instance) - self.assertEqual(0, mock_find.call_count) - self.assertEqual(0, mock_discover.call_count) - # revert shouldn't call disconnect or remove - tf.revert('result', 'failures') - self.assertEqual(0, disk_dvr.disconnect_disk_from_mgmt.call_count) - self.assertEqual(0, mock_rm.call_count) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance) - tf.assert_called_once_with( - name='instance_disk_to_mgmt', - provides=['stg_elem', 'vios_wrap', 'disk_path']) - - @mock.patch('nova_powervm.virt.powervm.mgmt.remove_block_dev', - autospec=True) - def test_remove_instance_disk_from_mgmt(self, mock_rm): - disk_dvr = mock.MagicMock() - mock_instance = mock.Mock() - mock_instance.name = 'instance_name' - mock_stg = mock.Mock() - mock_stg.name = 'stg_name' - mock_vwrap = mock.Mock() - mock_vwrap.name = 'vios_name' - mock_vwrap.uuid = 'vios_uuid' - - tf = tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance) - self.assertEqual('remove_inst_disk_from_mgmt', tf.name) - tf.execute(mock_stg, mock_vwrap, '/dev/disk') - disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid', - 'stg_name') - mock_rm.assert_called_with('/dev/disk') - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance) - tf.assert_called_once_with( - name='remove_inst_disk_from_mgmt', - requires=['stg_elem', 'vios_wrap', 'disk_path']) - - def test_finddisk(self): - disk_dvr = mock.Mock() - disk_dvr.get_disk_ref.return_value = 'disk_ref' - instance = mock.Mock() - context = 'context' - disk_type = 'disk_type' - - task = tf_stg.FindDisk(disk_dvr, context, instance, disk_type) - ret_disk = task.execute() - disk_dvr.get_disk_ref.assert_called_once_with(instance, disk_type) - self.assertEqual('disk_ref', ret_disk) - - # Bad path for no disk found - disk_dvr.reset_mock() - disk_dvr.get_disk_ref.return_value = None - ret_disk = task.execute() - disk_dvr.get_disk_ref.assert_called_once_with(instance, disk_type) - self.assertIsNone(ret_disk) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.FindDisk(disk_dvr, context, instance, disk_type) - tf.assert_called_once_with(name='find_disk', provides='disk_dev_info') - - def test_save_bdm(self): - mock_bdm = mock.Mock(volume_id=1) - save_bdm = tf_stg.SaveBDM(mock_bdm, 'instance') - save_bdm.execute() - mock_bdm.save.assert_called_once_with() - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.SaveBDM(mock_bdm, 'instance') - tf.assert_called_once_with(name='save_bdm_1') - - def test_extend_disk(self): - disk_dvr = mock.Mock() - instance = mock.Mock() - disk_info = {'type': 'disk_type'} - - task = tf_stg.ExtendDisk(disk_dvr, instance, disk_info, 1024) - task.execute() - disk_dvr.extend_disk.assert_called_once_with(instance, disk_info, 1024) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.ExtendDisk(disk_dvr, instance, disk_info, 1024) - tf.assert_called_once_with(name='extend_disk_disk_type') - - def test_connect_volume(self): - vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}}) - - task = tf_stg.ConnectVolume(vol_dvr, 'slot map') - task.execute() - vol_dvr.connect_volume.assert_called_once_with('slot map') - - task.revert('result', 'flow failures') - vol_dvr.reset_stg_ftsk.assert_called_once_with() - vol_dvr.disconnect_volume.assert_called_once_with('slot map') - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.ConnectVolume(vol_dvr, 'slot map') - tf.assert_called_once_with(name='connect_vol_1') - - def test_disconnect_volume(self): - vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}}) - - task = tf_stg.DisconnectVolume(vol_dvr, 'slot map') - task.execute() - vol_dvr.disconnect_volume.assert_called_once_with('slot map') - - task.revert('result', 'flow failures') - vol_dvr.reset_stg_ftsk.assert_called_once_with() - vol_dvr.connect_volume.assert_called_once_with('slot map') - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_stg.DisconnectVolume(vol_dvr, 'slot map') - tf.assert_called_once_with(name='disconnect_vol_1') diff --git a/nova_powervm/tests/virt/powervm/tasks/test_vm.py b/nova_powervm/tests/virt/powervm/tasks/test_vm.py deleted file mode 100644 index 46234c32..00000000 --- a/nova_powervm/tests/virt/powervm/tasks/test_vm.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova.compute import task_states -from nova import exception -from nova import test - -from nova_powervm.virt.powervm.tasks import vm as tf_vm - -from pypowervm import const as pvmc -from taskflow import engines as tf_eng -from taskflow.patterns import linear_flow as tf_lf -from taskflow import task as tf_tsk - - -class TestVMTasks(test.NoDBTestCase): - def setUp(self): - super(TestVMTasks, self).setUp() - self.apt = mock.Mock() - self.instance = mock.Mock(uuid='fake-uuid') - - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - def test_get(self, mock_inst_wrap): - get = tf_vm.Get(self.apt, 'host_uuid', self.instance) - get.execute() - mock_inst_wrap.assert_called_once_with(self.apt, self.instance) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.Get(self.apt, 'host_uuid', self.instance) - tf.assert_called_once_with(name='get_vm', provides='lpar_wrap') - - @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True) - @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task', - autospec=True) - @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.create_lpar', autospec=True) - def test_create(self, mock_vm_crt, mock_stg, mock_bld, mock_ftsk): - nvram_mgr = mock.Mock() - nvram_mgr.fetch.return_value = 'data' - mock_ftsk.name = 'vio_feed_task' - lpar_entry = mock.Mock() - - # Test create with normal (non-recreate) ftsk - crt = tf_vm.Create(self.apt, 'host_wrapper', self.instance, - stg_ftsk=mock_ftsk, nvram_mgr=nvram_mgr, - slot_mgr='slot_mgr') - mock_vm_crt.return_value = lpar_entry - crt_entry = crt.execute() - - mock_ftsk.execute.assert_not_called() - mock_vm_crt.assert_called_once_with( - self.apt, 'host_wrapper', self.instance, nvram='data', - slot_mgr='slot_mgr') - self.assertEqual(lpar_entry, crt_entry) - nvram_mgr.fetch.assert_called_once_with(self.instance) - - mock_ftsk.name = 'create_scrubber' - mock_bld.return_value = mock_ftsk - # Test create with recreate ftsk - rcrt = tf_vm.Create(self.apt, 'host_wrapper', self.instance, - stg_ftsk=None, nvram_mgr=nvram_mgr, - slot_mgr='slot_mgr') - mock_bld.assert_called_once_with( - self.apt, name='create_scrubber', - xag={pvmc.XAG.VIO_SMAP, pvmc.XAG.VIO_FMAP}) - rcrt.execute() - mock_ftsk.execute.assert_called_once_with() - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.Create(self.apt, 'host_wrapper', self.instance) - tf.assert_called_once_with(name='crt_vm', provides='lpar_wrap') - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('nova_powervm.virt.powervm.tasks.vm.Create.execute', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar', autospec=True) - def test_create_revert(self, mock_vm_dlt, mock_crt_exc, - mock_get_pvm_uuid): - - mock_crt_exc.side_effect = exception.NovaException() - crt = tf_vm.Create(self.apt, 'host_wrapper', self.instance, 'stg_ftsk', - None) - - # Assert that a failure while building does not revert - crt.instance.task_state = task_states.SPAWNING - flow_test = tf_lf.Flow("test_revert") - flow_test.add(crt) - self.assertRaises(exception.NovaException, tf_eng.run, flow_test) - self.assertEqual(0, mock_vm_dlt.call_count) - - # Assert that a failure when rebuild results in revert - crt.instance.task_state = task_states.REBUILD_SPAWNING - flow_test = tf_lf.Flow("test_revert") - flow_test.add(crt) - self.assertRaises(exception.NovaException, tf_eng.run, flow_test) - self.assertEqual(1, mock_vm_dlt.call_count) - - @mock.patch('nova_powervm.virt.powervm.vm.power_on', autospec=True) - def test_power_on(self, mock_pwron): - pwron = tf_vm.PowerOn(self.apt, self.instance, pwr_opts='opt') - pwron.execute() - mock_pwron.assert_called_once_with(self.apt, self.instance, opts='opt') - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.PowerOn(self.apt, self.instance) - tf.assert_called_once_with(name='pwr_vm') - - @mock.patch('nova_powervm.virt.powervm.vm.power_on', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.power_off', autospec=True) - def test_power_on_revert(self, mock_pwroff, mock_pwron): - flow = tf_lf.Flow('revert_power_on') - pwron = tf_vm.PowerOn(self.apt, self.instance, pwr_opts='opt') - flow.add(pwron) - - # Dummy Task that fails, triggering flow revert - def failure(*a, **k): - raise ValueError() - flow.add(tf_tsk.FunctorTask(failure)) - - # When PowerOn.execute doesn't fail, revert calls power_off - self.assertRaises(ValueError, tf_eng.run, flow) - mock_pwron.assert_called_once_with(self.apt, self.instance, opts='opt') - mock_pwroff.assert_called_once_with(self.apt, self.instance, - force_immediate=True) - - mock_pwron.reset_mock() - mock_pwroff.reset_mock() - - # When PowerOn.execute fails, revert doesn't call power_off - mock_pwron.side_effect = exception.NovaException() - self.assertRaises(exception.NovaException, tf_eng.run, flow) - mock_pwron.assert_called_once_with(self.apt, self.instance, opts='opt') - self.assertEqual(0, mock_pwroff.call_count) - - @mock.patch('nova_powervm.virt.powervm.vm.power_off', autospec=True) - def test_power_off(self, mock_pwroff): - # Default force_immediate - pwroff = tf_vm.PowerOff(self.apt, self.instance) - pwroff.execute() - mock_pwroff.assert_called_once_with(self.apt, self.instance, - force_immediate=False) - - mock_pwroff.reset_mock() - - # Explicit force_immediate - pwroff = tf_vm.PowerOff(self.apt, self.instance, force_immediate=True) - pwroff.execute() - mock_pwroff.assert_called_once_with(self.apt, self.instance, - force_immediate=True) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.PowerOff(self.apt, self.instance) - tf.assert_called_once_with(name='pwr_off_vm') - - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar', autospec=True) - def test_delete(self, mock_dlt): - delete = tf_vm.Delete(self.apt, self.instance) - delete.execute() - mock_dlt.assert_called_once_with(self.apt, self.instance) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.Delete(self.apt, self.instance) - tf.assert_called_once_with(name='dlt_vm') - - @mock.patch('nova_powervm.virt.powervm.vm.update', autospec=True) - def test_resize(self, mock_vm_update): - - resize = tf_vm.Resize(self.apt, 'host_wrapper', self.instance, - name='new_name') - mock_vm_update.return_value = 'resized_entry' - resized_entry = resize.execute() - mock_vm_update.assert_called_once_with( - self.apt, 'host_wrapper', self.instance, entry=None, - name='new_name') - self.assertEqual('resized_entry', resized_entry) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.Resize(self.apt, 'host_wrapper', self.instance) - tf.assert_called_once_with(name='resize_vm', provides='lpar_wrap') - - @mock.patch('nova_powervm.virt.powervm.vm.rename', autospec=True) - def test_rename(self, mock_vm_rename): - mock_vm_rename.return_value = 'new_entry' - rename = tf_vm.Rename(self.apt, self.instance, 'new_name') - new_entry = rename.execute() - mock_vm_rename.assert_called_once_with( - self.apt, self.instance, 'new_name') - self.assertEqual('new_entry', new_entry) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.Rename(self.apt, self.instance, 'new_name') - tf.assert_called_once_with( - name='rename_vm_new_name', provides='lpar_wrap') - - def test_store_nvram(self): - nvram_mgr = mock.Mock() - store_nvram = tf_vm.StoreNvram(nvram_mgr, self.instance, - immediate=True) - store_nvram.execute() - nvram_mgr.store.assert_called_once_with(self.instance, - immediate=True) - - # No exception is raised if the NVRAM could not be stored. - nvram_mgr.reset_mock() - nvram_mgr.store.side_effect = ValueError('Not Available') - store_nvram.execute() - nvram_mgr.store.assert_called_once_with(self.instance, - immediate=True) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.StoreNvram(nvram_mgr, self.instance) - tf.assert_called_once_with(name='store_nvram') - - def test_delete_nvram(self): - nvram_mgr = mock.Mock() - delete_nvram = tf_vm.DeleteNvram(nvram_mgr, self.instance) - delete_nvram.execute() - nvram_mgr.remove.assert_called_once_with(self.instance) - - # No exception is raised if the NVRAM could not be stored. - nvram_mgr.reset_mock() - nvram_mgr.remove.side_effect = ValueError('Not Available') - delete_nvram.execute() - nvram_mgr.remove.assert_called_once_with(self.instance) - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.DeleteNvram(nvram_mgr, self.instance) - tf.assert_called_once_with(name='delete_nvram') - - @mock.patch('nova_powervm.virt.powervm.vm.update_ibmi_settings', - autospec=True) - def test_update_ibmi_settings(self, mock_update): - update = tf_vm.UpdateIBMiSettings(self.apt, self.instance, 'boot_type') - update.execute() - mock_update.assert_called_once_with(self.apt, self.instance, - 'boot_type') - - # Validate args on taskflow.task.Task instantiation - with mock.patch('taskflow.task.Task.__init__') as tf: - tf_vm.UpdateIBMiSettings(self.apt, self.instance, 'boot_type') - tf.assert_called_once_with(name='update_ibmi_settings') diff --git a/nova_powervm/tests/virt/powervm/test_driver.py b/nova_powervm/tests/virt/powervm/test_driver.py deleted file mode 100644 index 262df0e8..00000000 --- a/nova_powervm/tests/virt/powervm/test_driver.py +++ /dev/null @@ -1,2219 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -import collections -import contextlib -import logging - -import fixtures -import mock -from nova import block_device as nova_block_device -from nova.compute import provider_tree -from nova.compute import task_states -from nova import conf as cfg -from nova import exception as exc -from nova import objects -from nova.objects import base as obj_base -from nova.objects import block_device as bdmobj -from nova import test -from nova.virt import block_device as nova_virt_bdm -from nova.virt import driver as virt_driver -from nova.virt import fake -from nova.virt import hardware -from nova.virt.powervm_ext import driver -from oslo_serialization import jsonutils -from oslo_utils.fixture import uuidsentinel as uuids -from pypowervm import adapter as pvm_adp -from pypowervm import const as pvm_const -from pypowervm import exceptions as pvm_exc -from pypowervm.helpers import log_helper as log_hlp -from pypowervm.helpers import vios_busy as vio_hlp -from pypowervm.utils import transaction as pvm_tx -from pypowervm.wrappers import logical_partition as pvm_lpar -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm.tests.virt import powervm -from nova_powervm.tests.virt.powervm import fixtures as fx -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm import live_migration as lpm -from nova_powervm.virt.powervm import vm - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -logging.basicConfig() - - -@mock.patch('nova_powervm.virt.powervm.mgmt.mgmt_uuid', new=mock.Mock()) -class TestPowerVMDriverInit(test.NoDBTestCase): - """A test class specifically for the driver setup. - - Handles testing the configuration of the agent with the backing REST API. - """ - def test_driver_capabilities_defaults(self): - """Test the default capabilities.""" - test_driver = driver.PowerVMDriver(fake.FakeVirtAPI()) - self.assertFalse( - test_driver.capabilities['supports_migrate_to_same_host']) - self.assertTrue(test_driver.capabilities['supports_attach_interface']) - self.assertFalse(test_driver.capabilities['supports_device_tagging']) - self.assertFalse( - test_driver.capabilities['supports_tagged_attach_interface']) - self.assertFalse( - test_driver.capabilities['supports_tagged_attach_volume']) - self.assertTrue(test_driver.capabilities['supports_extend_volume']) - self.assertFalse(test_driver.capabilities['supports_multiattach']) - self.assertTrue(test_driver.capabilities['supports_evacuate']) - self.assertNotIn('has_imagecache', test_driver.capabilities) - self.assertEqual(19, len(test_driver.capabilities)) - - @mock.patch('pypowervm.tasks.storage.find_vg', - new=mock.Mock(return_value=(mock.Mock(), mock.Mock()))) - def test_driver_capabilities_from_localdisk_adapter(self): - """Test dynamic capabilities from localdisk adapter.""" - self.flags(disk_driver='localdisk', group='powervm') - self.flags(volume_group_name='foovg', group='powervm') - test_driver = driver.PowerVMDriver(fake.FakeVirtAPI()) - test_driver.adapter = mock.Mock() - test_driver.host_uuid = mock.Mock() - test_driver._setup_disk_adapter() - # Localdisk driver has the image cache capability - self.assertTrue(test_driver.capabilities['has_imagecache']) - self.assertEqual(20, len(test_driver.capabilities)) - - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - '_fetch_cluster', new=mock.Mock()) - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - '_ssp', new=mock.Mock()) - @mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.' - '_tier', new=mock.Mock()) - def test_driver_capabilities_from_ssp_disk_adapter(self): - """Test dynamic capabilities from SSP disk adapter.""" - self.flags(disk_driver='ssp', group='powervm') - test_driver = driver.PowerVMDriver(fake.FakeVirtAPI()) - test_driver.adapter = mock.Mock() - test_driver.host_uuid = mock.Mock() - test_driver._setup_disk_adapter() - # SSP driver doesn't have image cache capability - self.assertFalse(test_driver.capabilities['has_imagecache']) - self.assertEqual(20, len(test_driver.capabilities)) - - @mock.patch('nova_powervm.virt.powervm.event.PowerVMNovaEventHandler', - autospec=True) - @mock.patch('pypowervm.adapter.Adapter', autospec=True) - @mock.patch('pypowervm.adapter.Session', autospec=True) - def test_get_adapter(self, mock_session, mock_adapter, mock_evt_handler): - # Set up the mocks. - mock_evt_listener = (mock_session.return_value.get_event_listener. - return_value) - mock_evt_handler.return_value = 'evt_hdlr' - - # Setup and invoke - drv = driver.PowerVMDriver(fake.FakeVirtAPI()) - drv._get_adapter() - - # Assert the appropriate calls - mock_session.assert_called_once_with(conn_tries=300) - mock_adapter.assert_called_once_with( - mock_session.return_value, - helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper]) - mock_evt_listener.subscribe.assert_called_once_with('evt_hdlr') - - -class TestPowerVMDriver(test.NoDBTestCase): - def setUp(self): - super(TestPowerVMDriver, self).setUp() - - self.flags(disk_driver='localdisk', group='powervm') - self.flags(host='host1', my_ip='127.0.0.1') - self.drv_fix = self.useFixture(fx.PowerVMComputeDriver()) - self.drv = self.drv_fix.drv - self.apt = self.drv.adapter - - self._setup_lpm() - - self.disk_dvr = self.drv.disk_dvr - self.vol_fix = self.useFixture(fx.VolumeAdapter( - 'nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter')) - self.vol_drv = self.vol_fix.drv - self.iscsi_vol_fix = self.useFixture(fx.VolumeAdapter( - 'nova_powervm.virt.powervm.volume.iscsi.IscsiVolumeAdapter')) - self.iscsi_vol_drv = self.iscsi_vol_fix.drv - - self.crt_lpar = self.useFixture(fixtures.MockPatch( - 'nova_powervm.virt.powervm.vm.create_lpar')).mock - - self.get_inst_wrap = self.useFixture(fixtures.MockPatch( - 'nova_powervm.virt.powervm.vm.get_instance_wrapper')).mock - - self.build_tx_feed = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.partition.build_active_vio_feed_task')).mock - - self.stg_ftsk = pvm_tx.FeedTask( - 'fake', [mock.Mock(spec=pvm_vios.VIOS, uuid='uuid')]) - self.build_tx_feed.return_value = self.stg_ftsk - - self.scrub_stg = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.storage.add_lpar_storage_scrub_tasks')).mock - - self.san_lpar_name = self.useFixture(fixtures.MockPatch( - 'pypowervm.util.sanitize_partition_name_for_api')).mock - self.san_lpar_name.side_effect = lambda name: name - - self.validate_vopt = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.vopt.validate_vopt_repo_exists')).mock - self.validate_vopt.return_value = None, None - - self.slot_mgr = self.useFixture(fixtures.MockPatch( - 'nova_powervm.virt.powervm.slot.build_slot_mgr')).mock.return_value - - # Create an instance to test with - self.inst = objects.Instance(**powervm.TEST_INST_SPAWNING) - self.inst_ibmi = objects.Instance(**powervm.TEST_INST_SPAWNING) - self.inst_ibmi.system_metadata = {'image_os_distro': 'ibmi'} - - def test_get_available_nodes(self): - self.flags(host='hostname') - self.assertEqual(['hostname'], self.drv.get_available_nodes('node')) - - def _setup_lpm(self): - """Setup the lpm environment. - - This may have to be called directly by tests since the lpm code - cleans up the dict entry on the last expected lpm method. - """ - self.lpm = mock.Mock() - self.lpm_inst = mock.Mock() - self.lpm_inst.uuid = 'inst1' - self.drv.live_migrations = {'inst1': self.lpm} - - def test_driver_create(self): - """Validates that a driver of the PowerVM type can be initialized.""" - test_drv = driver.PowerVMDriver(fake.FakeVirtAPI()) - self.assertIsNotNone(test_drv) - - def test_cleanup_host(self): - self.drv.cleanup_host('fake_host') - self.assertTrue( - self.drv.session.get_event_listener.return_value.shutdown.called) - - @mock.patch('nova_powervm.virt.powervm.volume.iscsi.get_iscsi_initiators') - def test_get_volume_connector(self, mock_initiators): - """Tests that a volume connector can be built.""" - - initiators = [('1300C76F-9814-4A4D-B1F0-5B69352A7DEA', 'fake_iqn1'), - ('7DBBE705-E4C4-4458-8223-3EBE07015CA9', 'fake_iqn2')] - initiators = collections.OrderedDict(initiators) - - mock_initiators.return_value = initiators - - vol_connector = self.drv.get_volume_connector(mock.Mock()) - self.assertIsNotNone(vol_connector['wwpns']) - self.assertEqual(vol_connector['host'], CONF.host) - self.assertEqual('fake_iqn1', vol_connector['initiator']) - - def test_setup_disk_adapter(self): - # Ensure we can handle upper case option and we instantiate the class - self.flags(disk_driver='LoCaLDisK', group='powervm') - self.drv.disk_dvr = None - self.drv._setup_disk_adapter() - # The local disk driver has been mocked, so we just compare the name - self.assertIn('LocalStorage()', str(self.drv.disk_dvr)) - - @mock.patch('nova_powervm.virt.powervm.nvram.manager.NvramManager', - autospec=True) - @mock.patch('oslo_utils.importutils.import_object', autospec=True) - @mock.patch('nova.utils.spawn', autospec=True) - def test_setup_rebuild_store(self, mock_spawn, mock_import, mock_mgr): - self.flags(nvram_store='NoNe', group='powervm') - self.drv._setup_rebuild_store() - self.assertFalse(mock_import.called) - self.assertFalse(mock_mgr.called) - self.assertFalse(mock_spawn.called) - - self.flags(nvram_store='swift', group='powervm') - self.drv._setup_rebuild_store() - self.assertTrue(mock_import.called) - self.assertTrue(mock_mgr.called) - self.assertTrue(mock_spawn.called) - self.assertIsNotNone(self.drv.store_api) - - @mock.patch.object(vm, 'get_lpars', autospec=True) - @mock.patch.object(vm, 'get_instance', autospec=True) - def test_nvram_host_startup(self, mock_get_inst, mock_get_lpars): - - mock_lpar_wrapper = mock.Mock() - mock_lpar_wrapper.uuid = 'uuid_value' - mock_get_lpars.return_value = [mock_lpar_wrapper, - mock_lpar_wrapper, - mock_lpar_wrapper] - mock_get_inst.side_effect = [powervm.TEST_INST1, - None, - powervm.TEST_INST2] - - self.drv.nvram_mgr = mock.Mock() - self.drv._nvram_host_startup() - self.drv.nvram_mgr.store.assert_has_calls( - [mock.call(powervm.TEST_INST1), mock.call(powervm.TEST_INST2)]) - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm._translate_vm_state', - autospec=True) - def test_get_info(self, mock_tx_state, mock_qp, mock_uuid): - mock_tx_state.return_value = 'fake-state' - self.assertEqual(hardware.InstanceInfo('fake-state'), - self.drv.get_info('inst')) - mock_uuid.assert_called_once_with('inst') - mock_qp.assert_called_once_with( - self.drv.adapter, mock_uuid.return_value, 'PartitionState') - mock_tx_state.assert_called_once_with(mock_qp.return_value) - - @mock.patch('nova_powervm.virt.powervm.vm.get_lpar_names') - def test_list_instances(self, mock_names): - mock_names.return_value = ['one', 'two', 'three'] - self.assertEqual(['one', 'two', 'three'], self.drv.list_instances()) - mock_names.assert_called_once_with(self.drv.adapter) - - @mock.patch('nova_powervm.virt.powervm.vm.instance_exists') - def test_instance_exists(self, mock_inst_exists): - mock_inst_exists.side_effect = [True, False] - self.assertTrue(self.drv.instance_exists(mock.Mock())) - self.assertFalse(self.drv.instance_exists(mock.Mock())) - - def test_instance_on_disk(self): - """Validates the instance_on_disk method.""" - - @mock.patch.object(self.drv, '_is_booted_from_volume') - @mock.patch.object(self.drv, '_get_block_device_info') - @mock.patch.object(self.disk_dvr, 'capabilities') - @mock.patch.object(self.disk_dvr, 'get_disk_ref') - def inst_on_disk(mock_disk_ref, mock_capb, mock_block, mock_boot): - # Test boot from volume. - mock_boot.return_value = True - self.assertTrue(self.drv.instance_on_disk(self.inst)) - - mock_boot.return_value = False - # Disk driver is shared storage and can find the disk - mock_capb['shared_storage'] = True - mock_disk_ref.return_value = 'disk_reference' - self.assertTrue(self.drv.instance_on_disk(self.inst)) - - # Disk driver can't find it - mock_disk_ref.return_value = None - self.assertFalse(self.drv.instance_on_disk(self.inst)) - - # Disk driver exception - mock_disk_ref.side_effect = ValueError('Bad disk') - self.assertFalse(self.drv.instance_on_disk(self.inst)) - mock_disk_ref.side_effect = None - - # Not on shared storage - mock_capb['shared_storage'] = False - self.assertFalse(self.drv.instance_on_disk(self.inst)) - - inst_on_disk() - - @mock.patch('pypowervm.tasks.power_opts.PowerOnOpts') - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'get_cfg_drv_name') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.' - 'CreateAndConnectCfgDrive.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.ConnectVolume' - '.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg' - '.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._vol_drv_iter') - def test_spawn_ops(self, mock_vdi, mock_pwron, mock_cfg_drv, - mock_plug_vifs, mock_plug_mgmt_vif, mock_boot_from_vol, - mock_crt_disk_img, mock_conn_vol, mock_crt_cfg_drv, - mock_cfg_name, mock_pwron_opts): - """Validates the 'typical' spawn flow of the spawn of an instance. - - Uses a basic disk image, attaching networks and powering on. - """ - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - mock_boot_from_vol.return_value = False - mock_pwron_opts.return_value = 'fake-opts' - # Invoke the method. - self.drv.spawn('context', self.inst, powervm.IMAGE1, - 'injected_files', 'admin_password', {}) - - # _vol_drv_iter not called from spawn because not recreate; but still - # called from _add_volume_connection_tasks. - mock_vdi.assert_called_once_with( - self.inst, [], stg_ftsk=self.build_tx_feed.return_value) - # Assert the correct tasks were called - self.assertTrue(mock_plug_vifs.called) - self.assertTrue(mock_plug_mgmt_vif.called) - self.assertTrue(mock_crt_disk_img.called) - self.crt_lpar.assert_called_with( - self.apt, self.drv.host_wrapper, self.inst, nvram=None, - slot_mgr=self.slot_mgr) - mock_pwron.assert_called_once_with(self.apt, self.inst, - opts='fake-opts') - mock_cfg_name.assert_not_called() - # Assert that tasks that are not supposed to be called are not called - self.assertFalse(mock_conn_vol.called) - self.assertFalse(mock_crt_cfg_drv.called) - self.scrub_stg.assert_called_with(mock.ANY, self.stg_ftsk, - lpars_exist=True) - - @mock.patch('pypowervm.tasks.power_opts.PowerOnOpts') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'get_cfg_drv_name') - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'create_cfg_drv_vopt') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - def test_spawn_with_cfg(self, mock_pwron, mock_cfg_drv, mock_cfg_vopt, - mock_cfg_name, mock_plug_vifs, - mock_plug_mgmt_vif, mock_pwron_opts): - """Validates the PowerVM spawn w/ config drive operations.""" - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = True - mock_cfg_name.return_value = 'fake-name' - self.flags(remove_vopt_media_on_boot=True, group='powervm') - mock_opts = mock.MagicMock() - mock_pwron_opts.return_value = mock_opts - - # Invoke the method. - self.drv.spawn('context', self.inst, powervm.IMAGE1, - 'injected_files', 'admin_password', {}) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, nvram=None, - slot_mgr=self.slot_mgr) - # Config drive was called - self.assertTrue(mock_cfg_vopt.called) - self.assertTrue(self.validate_vopt.called) - - # Power on was called - mock_pwron.assert_called_once_with(self.apt, self.inst, opts=mock_opts) - mock_opts.remove_optical.assert_called_with('fake-name', time=60) - mock_cfg_name.assert_called_with(self.inst) - self.scrub_stg.assert_called_with(mock.ANY, self.stg_ftsk, - lpars_exist=True) - - @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg' - '.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - def test_spawn_with_bdms(self, mock_pwron, mock_cfg_drv, mock_plug_vifs, - mock_plug_mgmt_vif, mock_boot_from_vol, - mock_crt_img, mock_save): - """Validates the PowerVM spawn. - - Specific Test: spawn of an image that has a disk image and block device - mappings are passed into spawn which originated from either the image - metadata itself or the create server request. In particular, test when - the BDMs passed in do not have the root device for the instance. - """ - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - mock_boot_from_vol.return_value = False - - # Create some fake BDMs - block_device_info = self._fake_bdms() - - # Invoke the method. - self.drv.spawn('context', self.inst, powervm.IMAGE1, - 'injected_files', 'admin_password', {}, - block_device_info=block_device_info) - - self.assertTrue(mock_boot_from_vol.called) - # Since the root device is not in the BDMs we expect the image disk to - # be created. - self.assertTrue(mock_crt_img.called) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, nvram=None, - slot_mgr=self.slot_mgr) - # Power on was called - mock_pwron.assert_called_once_with(self.apt, self.inst, opts=mock.ANY) - - # Check that the connect volume was called - self.assertEqual(2, self.vol_drv.connect_volume.call_count) - - # Make sure the save was invoked - self.assertEqual(2, mock_save.call_count) - - self.scrub_stg.assert_called_with(mock.ANY, self.stg_ftsk, - lpars_exist=True) - - @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg' - '.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - def test_spawn_with_image_meta_root_bdm(self, mock_pwron, mock_cfg_drv, - mock_plug_vifs, mock_plug_mgmt_vif, - mock_boot_from_vol, - mock_crt_img, mock_save): - - """Validates the PowerVM spawn. - - Specific Test: spawn of an image that does not have a disk image but - rather the block device mappings are passed into spawn. These - originated from either the image metadata itself or the create server - request. In particular, test when the BDMs passed in have the root - device for the instance and image metadata from an image is also - passed. - - Note this tests the ability to spawn an image that does not - contain a disk image but rather contains block device mappings - containing the root BDM. The - nova.compute.api.API.snapshot_volume_backed flow produces such images. - """ - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - mock_boot_from_vol.return_value = True - - # Create some fake BDMs - block_device_info = self._fake_bdms() - # Invoke the method. - self.drv.spawn('context', self.inst, powervm.IMAGE1, - 'injected_files', 'admin_password', {}, - block_device_info=block_device_info) - - self.assertTrue(mock_boot_from_vol.called) - # Since the root device is in the BDMs we do not expect the image disk - # to be created. - self.assertFalse(mock_crt_img.called) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, nvram=None, - slot_mgr=self.slot_mgr) - # Power on was called - mock_pwron.assert_called_once_with(self.apt, self.inst, opts=mock.ANY) - - # Check that the connect volume was called - self.assertEqual(2, self.vol_drv.connect_volume.call_count) - - self.scrub_stg.assert_called_with(mock.ANY, self.stg_ftsk, - lpars_exist=True) - - @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg' - '.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - def test_spawn_with_root_bdm(self, mock_pwron, mock_cfg_drv, - mock_plug_vifs, mock_plug_mgmt_vif, - mock_boot_from_vol, mock_crt_img, mock_save): - """Validates the PowerVM spawn. - - Specific test: when no image is given and only block device mappings - are given on the create server request. - """ - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - mock_boot_from_vol.return_value = True - - # Create some fake BDMs - block_device_info = self._fake_bdms() - # Invoke the method. - self.drv.spawn('context', self.inst, powervm.IMAGE1, - 'injected_files', 'admin_password', {}, - block_device_info=block_device_info) - - self.assertTrue(mock_boot_from_vol.called) - # Since the root device is in the BDMs we do not expect the image disk - # to be created. - self.assertFalse(mock_crt_img.called) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, nvram=None, - slot_mgr=self.slot_mgr) - # Power on was called - mock_pwron.assert_called_once_with(self.apt, self.inst, opts=mock.ANY) - - # Check that the connect volume was called - self.assertEqual(2, self.vol_drv.connect_volume.call_count) - - # Make sure the BDM save was invoked twice. - self.assertEqual(2, mock_save.call_count) - - self.scrub_stg.assert_called_with(mock.ANY, self.stg_ftsk, - lpars_exist=True) - - @mock.patch('nova_powervm.virt.powervm.tasks.storage.' - 'CreateAndConnectCfgDrive') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.ConnectVolume') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.ConnectDisk') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.FindDisk') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.tasks.vm.PowerOn') - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._vol_drv_iter') - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr') - @mock.patch('taskflow.patterns.linear_flow.Flow') - @mock.patch('nova_powervm.virt.powervm.tasks.base.run') - def test_spawn_recreate(self, mock_tf_run, mock_flow, mock_build_slot_mgr, - mock_vol_drv_iter, mock_pwron, mock_cfg_drv, - mock_plug_vifs, mock_plug_mgmt_vif, - mock_boot_from_vol, mock_find_disk, mock_conn_disk, - mock_conn_vol, mock_crt_cfg_drv): - """Validates the 'recreate' spawn flow. - - Uses a basic disk image, attaching networks and powering on. - """ - # Set up the mocks to the tasks. - self.drv.nvram_mgr = mock.Mock() - self.drv.nvram_mgr.fetch.return_value = 'nvram data' - mock_cfg_drv.return_value = False - mock_boot_from_vol.return_value = False - # Some tasks are mocked; some are not. Have Flow.add "execute" them so - # we can verify the code thereunder. - mock_flow.return_value.add.side_effect = lambda task: task.execute() - self.inst.task_state = task_states.REBUILD_SPAWNING - # Invoke the method. - self.drv.spawn('context', self.inst, powervm.EMPTY_IMAGE, - 'injected_files', 'admin_password', {}) - xags = {pvm_const.XAG.VIO_FMAP, pvm_const.XAG.VIO_SMAP, - pvm_const.XAG.VIO_STOR} - calls = [mock.call(self.drv.adapter, xag=xags), - mock.call(self.drv.adapter, name='create_scrubber', - xag=xags - {pvm_const.XAG.VIO_STOR})] - # Recreate uses all XAGs, builds special FeedTask for immediate - # scrubbing. - self.build_tx_feed.assert_has_calls(calls) - # _vol_drv_iter gets called once in spawn itself, and once under - # _add_volume_connection_tasks. - # TODO(IBM): Find a way to make the call just once. Unless it's cheap. - mock_vol_drv_iter.assert_has_calls([mock.call( - self.inst, [], stg_ftsk=self.build_tx_feed.return_value)] * 2) - mock_build_slot_mgr.assert_called_once_with( - self.inst, self.drv.store_api, adapter=self.drv.adapter, - vol_drv_iter=mock_vol_drv_iter.return_value) - # Assert the correct tasks were called - mock_plug_vifs.assert_called_once_with( - self.drv.virtapi, self.drv.adapter, self.inst, None, - self.drv.host_uuid, mock_build_slot_mgr.return_value) - mock_plug_mgmt_vif.assert_called_once_with( - self.drv.adapter, self.inst, self.drv.host_uuid, - mock_build_slot_mgr.return_value) - self.assertTrue(mock_plug_mgmt_vif.called) - self.assertTrue(mock_find_disk.called) - self.crt_lpar.assert_called_with( - self.apt, self.drv.host_wrapper, self.inst, nvram='nvram data', - slot_mgr=mock_build_slot_mgr.return_value) - # SaveSlotStore.execute - mock_build_slot_mgr.return_value.save.assert_called_once_with() - self.assertTrue(mock_pwron.called) - # Assert that tasks that are not supposed to be called are not called - self.assertFalse(mock_conn_vol.called) - self.assertFalse(mock_crt_cfg_drv.called) - self.scrub_stg.assert_called_with(mock.ANY, self.stg_ftsk, - lpars_exist=True) - - @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - @mock.patch('nova_powervm.virt.powervm.vm.power_off') - def test_spawn_ops_rollback(self, mock_pwroff, mock_pwron, mock_cfg_drv, - mock_dlt, mock_plug_vifs, mock_plug_mgmt_vifs, - mock_save): - """Validates the PowerVM driver operations. Will do a rollback.""" - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - block_device_info = self._fake_bdms() - - # Make sure power on fails. - mock_pwron.side_effect = exc.Forbidden() - - # Invoke the method. - self.assertRaises(exc.Forbidden, self.drv.spawn, 'context', self.inst, - powervm.IMAGE1, 'injected_files', 'admin_password', - {}, block_device_info=block_device_info) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, nvram=None, - slot_mgr=self.slot_mgr) - self.assertEqual(2, self.vol_drv.connect_volume.call_count) - - # Power on was called - mock_pwron.assert_called_once_with(self.apt, self.inst, opts=mock.ANY) - - # Validate the rollbacks were called - self.assertEqual(2, self.vol_drv.disconnect_volume.call_count) - - @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg.' - 'execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.tasks.vm.UpdateIBMiSettings.' - 'execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_get_boot_connectivity_type') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - def test_spawn_ibmi(self, mock_pwron, mock_boot_conn_type, - mock_update_lod_src, mock_cfg_drv, mock_plug_vifs, - mock_plug_mgmt_vif, mock_boot_from_vol, mock_crt_img, - mock_save): - """Validates the PowerVM spawn to create an IBMi server.""" - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - mock_boot_from_vol.return_value = True - mock_boot_conn_type.return_value = 'vscsi' - # Create some fake BDMs - block_device_info = self._fake_bdms() - # Invoke the method. - self.drv.spawn('context', self.inst_ibmi, powervm.IMAGE1, - 'injected_files', 'admin_password', {}, - block_device_info=block_device_info) - - self.assertTrue(mock_boot_from_vol.called) - # Since the root device is in the BDMs we do not expect the image disk - # to be created. - self.assertFalse(mock_crt_img.called) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst_ibmi, nvram=None, - slot_mgr=self.slot_mgr) - - self.assertTrue(mock_boot_conn_type.called) - self.assertTrue(mock_update_lod_src.called) - - # Power on was called - mock_pwron.assert_called_once_with(self.apt, self.inst_ibmi, - opts=mock.ANY) - - # Check that the connect volume was called - self.assertEqual(2, self.vol_drv.connect_volume.call_count) - - # Make sure the BDM save was invoked twice. - self.assertEqual(2, mock_save.call_count) - - @mock.patch('nova_powervm.virt.powervm.tasks.storage.' - 'CreateAndConnectCfgDrive.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.ConnectVolume' - '.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg' - '.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.tasks.vm.UpdateIBMiSettings' - '.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_get_boot_connectivity_type') - @mock.patch('nova_powervm.virt.powervm.vm.power_on') - def test_spawn_ibmi_without_bdms(self, mock_pwron, mock_boot_conn_type, - mock_update_lod_src, mock_cfg_drv, - mock_plug_vifs, mock_plug_mgmt_vif, - mock_boot_from_vol, mock_crt_disk_img, - mock_conn_vol, mock_crt_cfg_drv): - """Validates the 'typical' spawn flow for IBMi - - Perform an UT using an image with local disk, attaching networks - and powering on. - """ - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - mock_boot_from_vol.return_value = False - mock_boot_conn_type.return_value = 'vscsi' - # Invoke the method. - self.drv.spawn('context', self.inst_ibmi, powervm.IMAGE1, - 'injected_files', 'admin_password', {}) - - # Assert the correct tasks were called - self.assertTrue(mock_plug_vifs.called) - self.assertTrue(mock_plug_mgmt_vif.called) - self.assertTrue(mock_crt_disk_img.called) - self.crt_lpar.assert_called_with( - self.apt, self.drv.host_wrapper, self.inst_ibmi, nvram=None, - slot_mgr=self.slot_mgr) - self.assertTrue(mock_update_lod_src.called) - mock_pwron.assert_called_once_with(self.apt, self.inst_ibmi, - opts=mock.ANY) - # Assert that tasks that are not supposed to be called are not called - self.assertFalse(mock_conn_vol.called) - self.assertFalse(mock_crt_cfg_drv.called) - - @mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.' - 'delete_disks') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg.' - 'execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar') - @mock.patch('nova.virt.configdrive.required_by') - def test_spawn_ops_rollback_disk(self, mock_cfg_drv, mock_dlt, - mock_plug_vifs, mock_plug_mgmt_vifs, - mock_crt_disk, mock_delete_disks): - """Validates the rollback if failure occurs on disk create.""" - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - - # Make sure power on fails. - mock_crt_disk.side_effect = exc.Forbidden() - - # Invoke the method. - self.assertRaises(exc.Forbidden, self.drv.spawn, 'context', self.inst, - powervm.IMAGE1, 'injected_files', 'admin_password', - {}, block_device_info=None) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, nvram=None, - slot_mgr=self.slot_mgr) - - # Since the create disks method failed, the delete disks should not - # have been called - self.assertFalse(mock_delete_disks.called) - - @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute') - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('pypowervm.tasks.power.power_on') - @mock.patch('pypowervm.tasks.power.power_off') - def test_spawn_ops_rollback_on_vol_connect(self, mock_pwroff, mock_pwron, - mock_cfg_drv, mock_dlt, - mock_plug_vifs, - mock_plug_mgmt_vifs, mock_save): - """Validates the rollbacks on a volume connect failure.""" - # Set up the mocks to the tasks. - mock_cfg_drv.return_value = False - block_device_info = self._fake_bdms() - - # Have the connect fail. Also fail the disconnect on revert. Should - # not block the rollback. - self.vol_drv.connect_volume.side_effect = exc.Forbidden() - self.vol_drv.disconnect_volume.side_effect = p_exc.VolumeDetachFailed( - volume_id='1', instance_name=self.inst.name, reason='Test Case') - - # Invoke the method. - self.assertRaises(exc.Forbidden, self.drv.spawn, 'context', self.inst, - powervm.IMAGE1, 'injected_files', 'admin_password', - {}, block_device_info=block_device_info) - - # Create LPAR was called - self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, nvram=None, - slot_mgr=self.slot_mgr) - self.assertEqual(1, self.vol_drv.connect_volume.call_count) - - # Power on should not be called. Shouldn't get that far in flow. - self.assertFalse(mock_pwron.called) - - # Disconnect should, as it may need to remove from one of the VIOSes - # (but maybe failed on another). - self.assertTrue(self.vol_drv.disconnect_volume.called) - - @mock.patch('nova.block_device.get_root_bdm', autospec=True) - @mock.patch('nova.virt.driver.block_device_info_get_mapping', - autospec=True) - def test_is_booted_from_volume(self, mock_get_mapping, mock_get_root_bdm): - block_device_info = self._fake_bdms() - ret = self.drv._is_booted_from_volume(block_device_info) - mock_get_root_bdm.assert_called_once_with( - mock_get_mapping.return_value) - self.assertTrue(ret) - self.assertEqual(1, mock_get_mapping.call_count) - - mock_get_mapping.reset_mock() - mock_get_root_bdm.return_value = None - ret = self.drv._is_booted_from_volume(block_device_info) - self.assertFalse(ret) - self.assertEqual(1, mock_get_mapping.call_count) - - # Test if block_device_info is None - ret = self.drv._is_booted_from_volume(None) - self.assertFalse(ret) - - def test_get_inst_xag(self): - # No volumes - should be just the SCSI mapping - xag = self.drv._get_inst_xag(mock.Mock(), None) - self.assertEqual([pvm_const.XAG.VIO_SMAP], xag) - - # The vSCSI Volume attach - only needs the SCSI mapping. - self.flags(fc_attach_strategy='vscsi', group='powervm') - mock_bdm = {'connection_info': - {'driver_volume_type': 'fibre_channel'}} - xag = self.drv._get_inst_xag(mock.Mock(), [mock_bdm]) - self.assertEqual([pvm_const.XAG.VIO_SMAP], xag) - - # The NPIV volume attach - requires SCSI, Storage and FC Mapping - self.flags(fc_attach_strategy='npiv', group='powervm') - xag = self.drv._get_inst_xag(mock.Mock(), [mock_bdm]) - self.assertEqual({pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP, - pvm_const.XAG.VIO_FMAP}, set(xag)) - - # The vSCSI Volume attach - Ensure case insensitive. - self.flags(fc_attach_strategy='VSCSI', group='powervm') - xag = self.drv._get_inst_xag(mock.Mock(), [mock_bdm]) - self.assertEqual([pvm_const.XAG.VIO_SMAP], xag) - - # Validate the other volume types only return SCSI mappings - vol_types = ['iscsi', 'gpfs', 'local', 'nfs'] - for vol_type in vol_types: - mock_bdm = {'connection_info': - {'driver_volume_type': vol_type}} - xag = self.drv._get_inst_xag(mock.Mock(), [mock_bdm]) - self.assertEqual([pvm_const.XAG.VIO_SMAP], xag) - - # If a recreate, all should be returned - xag = self.drv._get_inst_xag(mock.Mock(), [mock_bdm], recreate=True) - self.assertEqual({pvm_const.XAG.VIO_STOR, - pvm_const.XAG.VIO_SMAP, - pvm_const.XAG.VIO_FMAP}, set(xag)) - - @mock.patch('nova_powervm.virt.powervm.tasks.storage.ConnectVolume', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.tasks.storage.SaveBDM', - autospec=True) - def test_add_vol_conn_task(self, mock_save_bdm, mock_conn_vol): - bdm1, bdm2, vol_drv1, vol_drv2 = [mock.Mock()] * 4 - flow = mock.Mock() - mock_save_bdm.side_effect = 'save_bdm1', 'save_bdm2' - mock_conn_vol.side_effect = 'conn_vol1', 'conn_vol2' - vals = [(bdm1, vol_drv1), (bdm2, vol_drv2)] - with mock.patch.object(self.drv, '_vol_drv_iter', - return_value=vals) as mock_vdi: - self.drv._add_volume_connection_tasks( - 'context', 'instance', 'bdms', flow, 'stg_ftsk', 'slot_mgr') - mock_vdi.assert_called_once_with('instance', 'bdms', - stg_ftsk='stg_ftsk') - mock_conn_vol.assert_has_calls([mock.call(vol_drv1, 'slot_mgr'), - mock.call(vol_drv2, 'slot_mgr')]) - mock_save_bdm.assert_has_calls([mock.call(bdm1, 'instance'), - mock.call(bdm2, 'instance')]) - flow.add.assert_has_calls([ - mock.call('conn_vol1'), mock.call('save_bdm1'), - mock.call('conn_vol2'), mock.call('save_bdm2')]) - - @mock.patch('nova_powervm.virt.powervm.tasks.storage.DisconnectVolume', - autospec=True) - def test_add_vol_disconn_task(self, mock_disconn_vol): - vol_drv1, vol_drv2 = [mock.Mock()] * 2 - flow = mock.Mock() - mock_disconn_vol.side_effect = 'disconn_vol1', 'disconn_vol2' - vals = [('bdm', vol_drv1), ('bdm', vol_drv2)] - with mock.patch.object(self.drv, '_vol_drv_iter', - return_value=vals) as mock_vdi: - self.drv._add_volume_disconnection_tasks( - 'context', 'instance', 'bdms', flow, 'stg_ftsk', 'slot_mgr') - mock_vdi.assert_called_once_with('instance', 'bdms', - stg_ftsk='stg_ftsk') - mock_disconn_vol.assert_has_calls([mock.call(vol_drv1, 'slot_mgr'), - mock.call(vol_drv2, 'slot_mgr')]) - flow.add.assert_has_calls([mock.call('disconn_vol1'), - mock.call('disconn_vol2')]) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.' - '_is_booted_from_volume', new=mock.Mock(return_value=False)) - def test_get_boot_connectivity_type_no_bfv(self): - # Boot connectivity type defaults to vscsi when not booted from volume. - self.assertEqual('vscsi', self.drv._get_boot_connectivity_type( - 'bdms', 'block_device_info')) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.' - '_is_booted_from_volume', new=mock.Mock(return_value=True)) - def test_get_boot_connectivity_type_no_bdms(self): - # Boot connectivity type defaults to vscsi when no BDMs - self.assertEqual('vscsi', self.drv._get_boot_connectivity_type( - None, 'block_device_info')) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.' - '_is_booted_from_volume', new=mock.Mock(return_value=True)) - def test_get_boot_connectivity_type_no_boot_bdm(self): - # Boot connectivity type defaults to vscsi when no BDM has a boot_index - # of zero (which should actually never happen IRL). - self.assertEqual('vscsi', self.drv._get_boot_connectivity_type( - [{'boot_index': 1}], 'block_device_info')) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.' - '_is_booted_from_volume', new=mock.Mock(return_value=True)) - def test_get_boot_connectivity_type_driver_volume_type(self): - # Boot connectivity type discovered via BDM driver_volume_type. - bdms = self._fake_bdms(set_boot_index=True)['block_device_mapping'] - self.assertEqual('fibre_channel', self.drv._get_boot_connectivity_type( - bdms, 'block_device_info')) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.' - '_is_booted_from_volume', new=mock.Mock(return_value=True)) - def test_get_boot_connectivity_type_data_driver_volume_type(self): - # Boot connectivity type discovered via BDM driver_volume_type in - # conn_info['data'], which I think is bogus, but preserved for - # compatibility. - bdms = self._fake_bdms( - set_boot_index=True, - driver_volume_type_in_data=True)['block_device_mapping'] - self.assertEqual('fibre_channel', self.drv._get_boot_connectivity_type( - bdms, 'block_device_info')) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.' - '_is_booted_from_volume', new=mock.Mock(return_value=True)) - def test_get_boot_connectivity_type_connection_type(self): - # Boot connectivity type discovered from BDM's connectivity-type - bdms = self._fake_bdms(connection_type='hello', - set_boot_index=True)['block_device_mapping'] - self.assertEqual('hello', self.drv._get_boot_connectivity_type( - bdms, 'block_device_info')) - # We convert 'pv_vscsi' to 'vscsi' - bdms = self._fake_bdms(connection_type='pv_vscsi', - set_boot_index=True)['block_device_mapping'] - self.assertEqual('vscsi', self.drv._get_boot_connectivity_type( - bdms, 'block_device_info')) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.' - '_is_booted_from_volume', new=mock.Mock(return_value=True)) - def test_get_boot_connectivity_type_driver_volume_type_unset(self): - # Boot connectivity type defaults to vscsi when BDM driver_volume_type - # is unset. - bdms = self._fake_bdms(driver_volume_type=None, - set_boot_index=True)['block_device_mapping'] - self.assertEqual('vscsi', self.drv._get_boot_connectivity_type( - bdms, 'block_device_info')) - - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.update', - new=mock.Mock()) - @mock.patch('nova_powervm.virt.powervm.tasks.network.UnplugVifs.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar') - @mock.patch('nova_powervm.virt.powervm.vm.power_off') - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'dlt_vopt') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr') - def test_destroy_internal( - self, mock_bld_slot_mgr, mock_cfg, mock_pvmuuid, - mock_dlt_vopt, mock_pwroff, mock_dlt, mock_boot_from_vol, - mock_unplug_vifs, mock_rm_maps): - """Validates the basic PowerVM destroy.""" - # NVRAM Manager - self.drv.nvram_mgr = mock.Mock() - mock_cfg.return_value = True - - # BDMs - mock_bdms = self._fake_bdms() - mock_boot_from_vol.return_value = False - - def validate_rm_maps(vwrap, lpar_uuid): - self.assertIsInstance(vwrap, pvm_vios.VIOS) - self.assertEqual(mock_pvmuuid.return_value, lpar_uuid) - mock_rm_maps.side_effect = validate_rm_maps - # Invoke the method. - self.drv.destroy('context', self.inst, ['net'], - block_device_info=mock_bdms) - - # Power off was called - mock_pwroff.assert_called_with(self.drv.adapter, self.inst, - force_immediate=True) - - mock_bld_slot_mgr.assert_called_once_with(self.inst, - self.drv.store_api) - # Unplug should have been called - # TODO(IBM): Find a way to verify UnplugVifs(..., slot_mgr) - self.assertTrue(mock_unplug_vifs.called) - - # Validate that the vopt delete was called - self.assertTrue(mock_dlt_vopt.called) - - # Validate that the volume detach was called - self.vol_drv.disconnect_volume.assert_has_calls( - [mock.call(mock_bld_slot_mgr.return_value)] * 2) - - # Post-scrub was invoked - mock_rm_maps.assert_called() - - # Delete LPAR was called - mock_dlt.assert_called_with(self.apt, mock.ANY) - - # Validate root device in bdm was checked. - mock_boot_from_vol.assert_called_with(mock_bdms) - - # Validate disk driver detach and delete disk methods were called. - self.assertTrue(self.drv.disk_dvr.delete_disks.called) - self.assertTrue(self.drv.disk_dvr.disconnect_disk.called) - - # NVRAM was deleted - self.drv.nvram_mgr.remove.assert_called_once_with(self.inst) - # Slot store was deleted - mock_bld_slot_mgr.return_value.delete.assert_called_once_with() - - def reset_mocks(): - # Reset the mocks - for mk in [mock_pwroff, mock_dlt, mock_dlt_vopt, - self.vol_drv, mock_dlt, - mock_boot_from_vol]: - mk.reset_mock() - - def assert_not_called(): - # Power off was not called - self.assertFalse(mock_pwroff.called) - - # Validate that the vopt delete was not called - self.assertFalse(mock_dlt_vopt.called) - - # Validate that the volume detach was not called - self.assertFalse(self.vol_drv.disconnect_volume.called) - - # Delete LPAR was not called - self.assertFalse(mock_dlt.called) - - # Test when the VM's root device is a BDM. - reset_mocks() - mock_boot_from_vol.return_value = True - self.drv.disk_dvr.delete_disks.reset_mock() - self.drv.disk_dvr.disconnect_disk.reset_mock() - - # Invoke the method. - self.drv.destroy('context', self.inst, None, - block_device_info=mock_bdms) - - # Validate root device in bdm was checked. - mock_boot_from_vol.assert_called_with(mock_bdms) - - # Validate disk driver detach and delete disk methods were called. - self.assertFalse(self.drv.disk_dvr.delete_disks.called) - self.assertFalse(self.drv.disk_dvr.disconnect_disk.called) - - # Test when destroy_disks set to False. - reset_mocks() - mock_boot_from_vol.return_value = True - self.drv.disk_dvr.delete_disks.reset_mock() - self.drv.disk_dvr.disconnect_disk.reset_mock() - - # Invoke the method. - self.drv.destroy('context', self.inst, None, - block_device_info=mock_bdms, destroy_disks=False) - - mock_pwroff.assert_called_with(self.drv.adapter, self.inst, - force_immediate=False) - - # Start negative tests - reset_mocks() - # Pretend we didn't find the VM on the system - mock_pvmuuid.side_effect = exc.InstanceNotFound( - instance_id=self.inst.name) - - # Invoke the method. - self.drv.destroy('context', self.inst, None, - block_device_info=mock_bdms) - assert_not_called() - - reset_mocks() - - # Test generic exception - mock_pvmuuid.side_effect = ValueError('Some error') - # Invoke the method. - self.assertRaises(exc.InstanceTerminationFailure, - self.drv.destroy, 'context', self.inst, - [], block_device_info=mock_bdms) - assert_not_called() - - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('nova_powervm.virt.powervm.tasks.network.UnplugVifs.execute') - @mock.patch('nova.virt.powervm_ext.driver.PowerVMDriver.' - '_is_booted_from_volume') - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar') - @mock.patch('nova_powervm.virt.powervm.vm.power_off') - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'dlt_vopt') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr') - def test_destroy_internal_no_nvram_cleanup( - self, mock_bld_slot_mgr, mock_cfg, mock_pvmuuid, mock_dlt_vopt, - mock_pwroff, mock_dlt, mock_boot_from_vol, mock_unplug_vifs, - mock_rm_maps): - """Validates the basic PowerVM destroy, without NVRAM cleanup. - - Used to validate the behavior when destroying evacuated instances. - It should not clean up NVRAM as the instance is still on another host. - """ - mock_rm_maps.return_value = [] - # NVRAM Manager - self.drv.nvram_mgr = mock.Mock() - self.inst.host = 'other' - mock_cfg.return_value = True - - # BDMs - mock_bdms = self._fake_bdms() - mock_boot_from_vol.return_value = False - - # Invoke the method. - self.drv.destroy('context', self.inst, ['net'], - block_device_info=mock_bdms) - - # Power off was called - mock_pwroff.assert_called_with(self.drv.adapter, self.inst, - force_immediate=True) - - mock_bld_slot_mgr.assert_called_once_with(self.inst, - self.drv.store_api) - # Unplug should have been called - # TODO(IBM): Find a way to verify UnplugVifs(..., slot_mgr) - self.assertTrue(mock_unplug_vifs.called) - - # Validate that the vopt delete was called - self.assertTrue(mock_dlt_vopt.called) - - # Validate that the volume detach was called - self.vol_drv.disconnect_volume.assert_has_calls( - [mock.call(mock_bld_slot_mgr.return_value)] * 2) - # Delete LPAR was called - mock_dlt.assert_called_with(self.apt, mock.ANY) - - # Validate root device in bdm was checked. - mock_boot_from_vol.assert_called_with(mock_bdms) - - # Validate disk driver detach and delete disk methods were called. - self.assertTrue(self.drv.disk_dvr.delete_disks.called) - self.assertTrue(self.drv.disk_dvr.disconnect_disk.called) - - # NVRAM was NOT deleted - self.assertFalse(self.drv.nvram_mgr.remove.called) - self.assertFalse(mock_bld_slot_mgr.return_value.delete.called) - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp', autospec=True) - def test_destroy(self, mock_getqp, mock_getuuid): - """Validates the basic PowerVM destroy.""" - # BDMs - mock_bdms = self._fake_bdms() - - with mock.patch.object(self.drv, '_destroy') as mock_dst_int: - # Invoke the method. - self.drv.destroy('context', self.inst, [], - block_device_info=mock_bdms) - mock_dst_int.assert_called_with( - 'context', self.inst, block_device_info=mock_bdms, - destroy_disks=True, shutdown=True, network_info=[]) - self.san_lpar_name.assert_not_called() - - # Test delete during migrate / resize - self.inst.task_state = task_states.RESIZE_REVERTING - mock_getqp.return_value = 'resize_' + self.inst.name - with mock.patch.object(self.drv, '_destroy') as mock_dst_int: - # Invoke the method. - self.drv.destroy('context', self.inst, [], - block_device_info=mock_bdms) - # We shouldn't delete our resize_ instances - mock_dst_int.assert_not_called() - self.san_lpar_name.assert_called_with('resize_' + self.inst.name) - self.san_lpar_name.reset_mock() - - # Now test migrating... - mock_getqp.return_value = 'migrate_' + self.inst.name - with mock.patch.object(self.drv, '_destroy') as mock_dst_int: - # Invoke the method. - self.drv.destroy('context', self.inst, [], - block_device_info=mock_bdms) - # If it is a migrated instance, it should be deleted. - mock_dst_int.assert_called_with( - 'context', self.inst, block_device_info=mock_bdms, - destroy_disks=True, shutdown=True, network_info=[]) - - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr', autospec=True) - def test_attach_volume(self, mock_bld_slot_mgr): - """Validates the basic PowerVM attach volume.""" - # BDMs - mock_bdm = self._fake_bdms()['block_device_mapping'][0] - - with mock.patch.object(self.inst, 'save') as mock_save: - # Invoke the method. - self.drv.attach_volume('context', mock_bdm.get('connection_info'), - self.inst, mock.sentinel.stg_ftsk) - - mock_bld_slot_mgr.assert_called_once_with(self.inst, - self.drv.store_api) - # Verify the connect volume was invoked - self.vol_drv.connect_volume.assert_called_once_with( - mock_bld_slot_mgr.return_value) - mock_bld_slot_mgr.return_value.save.assert_called_once_with() - mock_save.assert_called_once_with() - - @mock.patch('nova_powervm.virt.powervm.vm.instance_exists', autospec=True) - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr', autospec=True) - def test_detach_volume(self, mock_bld_slot_mgr, mock_inst_exists): - """Validates the basic PowerVM detach volume.""" - # Mock that the instance exists for the first test, then not. - mock_inst_exists.side_effect = [True, False, False] - - # BDMs - mock_bdm = self._fake_bdms()['block_device_mapping'][0] - # Invoke the method, good path test. - self.drv.detach_volume('context', mock_bdm.get('connection_info'), - self.inst, mock.sentinel.stg_ftsk) - - mock_bld_slot_mgr.assert_called_once_with(self.inst, - self.drv.store_api) - # Verify the disconnect volume was invoked - self.vol_drv.disconnect_volume.assert_called_once_with( - mock_bld_slot_mgr.return_value) - mock_bld_slot_mgr.return_value.save.assert_called_once_with() - - # Invoke the method, instance doesn't exist, no migration - self.vol_drv.disconnect_volume.reset_mock() - self.drv.detach_volume('context', mock_bdm.get('connection_info'), - self.inst, mock.sentinel.stg_ftsk) - # Verify the disconnect volume was not invoked - self.assertEqual(0, self.vol_drv.disconnect_volume.call_count) - - # Test instance doesn't exist, migration cleanup - self.vol_drv.disconnect_volume.reset_mock() - mig = lpm.LiveMigrationDest(self.drv, self.inst) - self.drv.live_migrations[self.inst.uuid] = mig - with mock.patch.object(mig, 'cleanup_volume') as mock_clnup: - self.drv.detach_volume('context', mock_bdm.get('connection_info'), - self.inst, mock.sentinel.stg_ftsk) - # The cleanup should have been called since there was a migration - self.assertEqual(1, mock_clnup.call_count) - # Verify the disconnect volume was not invoked - self.assertEqual(0, self.vol_drv.disconnect_volume.call_count) - - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('nova_powervm.virt.powervm.tasks.network.UnplugVifs.execute') - @mock.patch('nova_powervm.virt.powervm.vm.delete_lpar') - @mock.patch('nova_powervm.virt.powervm.vm.power_off') - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'dlt_vopt') - @mock.patch('nova.virt.configdrive.required_by') - def test_destroy_rollback(self, mock_cfg, mock_dlt_vopt, mock_pwroff, - mock_dlt, mock_unplug_vifs, mock_rm_maps): - """Validates the basic PowerVM destroy rollback mechanism works.""" - # Set up the mocks to the tasks. - mock_rm_maps.return_value = [] - mock_cfg.return_value = True - - # BDMs - mock_bdms = self._fake_bdms() - - # Fire a failure in the power off. - mock_dlt.side_effect = exc.Forbidden() - - # Have the connect volume fail on the rollback. Should not block the - # full rollback. - self.vol_drv.connect_volume.side_effect = p_exc.VolumeAttachFailed( - volume_id='1', instance_name=self.inst.name, reason='Test Case') - - # Invoke the method. - self.assertRaises(exc.InstanceTerminationFailure, self.drv.destroy, - 'context', self.inst, [], - block_device_info=mock_bdms) - - # Validate that the vopt delete was called - self.assertTrue(mock_dlt_vopt.called) - self.assertTrue(mock_unplug_vifs.called) - - # Validate that the volume detach was called - self.assertEqual(2, self.vol_drv.disconnect_volume.call_count) - - # Delete LPAR was called - mock_dlt.assert_called_with(self.apt, mock.ANY) - - # Validate the rollbacks were called. - self.assertEqual(2, self.vol_drv.connect_volume.call_count) - - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr', autospec=True) - def test_migrate_disk_and_power_off(self, mock_bld_slot_mgr): - """Validates the PowerVM driver migrate / resize operation.""" - # Set up the mocks to the migrate / resize operation. - host = self.drv.get_host_ip_addr() - resp = pvm_adp.Response('method', 'path', 'status', 'reason', {}) - resp.entry = pvm_lpar.LPAR._bld(None).entry - self.apt.read.return_value = resp - - # BDMs - mock_bdms = self._fake_bdms() - - # Catch root disk resize smaller. - small_root = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=9) - self.assertRaises( - exc.InstanceFaultRollback, self.drv.migrate_disk_and_power_off, - 'context', self.inst, 'dest', small_root, 'network_info', - mock_bdms) - - # Boot disk resize - boot_flav = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=12) - # Tasks expected to be added for migrate - expected = [ - 'pwr_off_vm', - 'store_nvram', - 'extend_disk_boot', - 'disconnect_vol_*', - 'disconnect_vol_*', - 'fake', - 'rename_vm_migrate_instance-00000001', - ] - dest_host = host + '1' - with fx.DriverTaskFlow() as taskflow_fix: - self.drv.migrate_disk_and_power_off( - 'context', self.inst, dest_host, boot_flav, 'network_info', - mock_bdms) - taskflow_fix.assert_tasks_added(self, expected) - mock_bld_slot_mgr.assert_called_once_with( - self.inst, self.drv.store_api, adapter=self.drv.adapter, - vol_drv_iter=mock.ANY) - # Check the size set in the resize task - extend_task = taskflow_fix.tasks_added[ - expected.index('extend_disk_boot')] - self.assertEqual(extend_task.size, 12) - # Ensure slot manager was passed to disconnect - self.assertEqual(mock_bld_slot_mgr.return_value, - taskflow_fix.tasks_added[3].slot_mgr) - self.assertEqual(mock_bld_slot_mgr.return_value, - taskflow_fix.tasks_added[4].slot_mgr) - self.san_lpar_name.assert_called_with('migrate_' + self.inst.name) - - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr', autospec=True) - def test_finish_migration(self, mock_bld_slot_mgr): - mock_bdms = self._fake_bdms() - mig = objects.Migration(**powervm.TEST_MIGRATION) - mig_same_host = objects.Migration(**powervm.TEST_MIGRATION_SAME_HOST) - disk_info = {} - - # The first test is different hosts but local storage, should fail - self.assertRaises(exc.InstanceFaultRollback, - self.drv.finish_migration, - 'context', mig, self.inst, disk_info, 'network_info', - powervm.IMAGE1, 'resize_instance', mock_bdms) - - # The rest of the test need to pass the shared disk test - self.disk_dvr.validate.return_value = None - - # Tasks expected to be added for migration to different host - expected = [ - 'crt_vm', - 'plug_vifs', - 'plug_mgmt_vif', - 'find_disk', - 'connect_disk', - 'connect_vol_*', - 'save_bdm_fake_vol1', - 'connect_vol_*', - 'save_bdm_fake_vol2', - 'fake', - 'pwr_vm', - ] - with fx.DriverTaskFlow() as taskflow_fix: - self.drv.finish_migration( - 'context', mig, self.inst, disk_info, 'network_info', - powervm.IMAGE1, 'resize_instance', block_device_info=mock_bdms) - mock_bld_slot_mgr.assert_called_once_with( - self.inst, self.drv.store_api, adapter=self.drv.adapter, - vol_drv_iter=mock.ANY) - taskflow_fix.assert_tasks_added(self, expected) - # Slot manager was passed to Create, PlugVifs, PlugMgmtVif, and - # connect_volume (twice) - for idx in (0, 1, 2, 5, 7): - self.assertEqual(mock_bld_slot_mgr.return_value, - taskflow_fix.tasks_added[idx].slot_mgr) - self.san_lpar_name.assert_not_called() - - mock_bld_slot_mgr.reset_mock() - - # Tasks expected to be added for resize to the same host - expected = [ - 'resize_vm', - 'connect_vol_*', - 'save_bdm_fake_vol1', - 'connect_vol_*', - 'save_bdm_fake_vol2', - 'fake', - 'pwr_vm', - ] - with fx.DriverTaskFlow() as taskflow_fix: - self.drv.finish_migration( - 'context', mig_same_host, self.inst, disk_info, 'network_info', - powervm.IMAGE1, 'resize_instance', block_device_info=mock_bdms) - taskflow_fix.assert_tasks_added(self, expected) - mock_bld_slot_mgr.assert_called_once_with( - self.inst, self.drv.store_api, adapter=self.drv.adapter, - vol_drv_iter=mock.ANY) - # Slot manager was passed to connect_volume (twice) - for idx in (1, 3): - self.assertEqual(mock_bld_slot_mgr.return_value, - taskflow_fix.tasks_added[idx].slot_mgr) - self.san_lpar_name.assert_called_with('resize_' + self.inst.name) - self.san_lpar_name.reset_mock() - - mock_bld_slot_mgr.reset_mock() - - # Tasks expected to be added for resize to the same host, no BDMS, - # and no power_on - expected = [ - 'resize_vm', - ] - with fx.DriverTaskFlow() as taskflow_fix: - self.drv.finish_migration( - 'context', mig_same_host, self.inst, disk_info, 'network_info', - powervm.IMAGE1, 'resize_instance', power_on=False) - taskflow_fix.assert_tasks_added(self, expected) - # Don't need the slot manager on a pure resize (no BDMs and same host) - mock_bld_slot_mgr.assert_not_called() - self.san_lpar_name.assert_called_with('resize_' + self.inst.name) - - @mock.patch('nova_powervm.virt.powervm.vm.power_on', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.update', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.power_off', autospec=True) - def test_finish_revert_migration(self, mock_off, mock_update, mock_on): - """Validates that the finish revert migration works.""" - mock_instance = mock.Mock() - - # Validate with a default power on - self.drv.finish_revert_migration('context', mock_instance, None) - - # Asserts - mock_off.assert_called_once_with(self.apt, mock_instance) - mock_update.assert_called_once_with( - self.apt, self.drv.host_wrapper, mock_instance) - mock_on.assert_called_once_with(self.apt, mock_instance) - - @mock.patch('nova_powervm.virt.powervm.vm.power_on', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.update', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.power_off', autospec=True) - def test_finish_revert_migration_no_power_on(self, mock_off, mock_update, - mock_on): - """Validates that the finish revert migration works, no power_on.""" - mock_instance = mock.Mock() - - # Validate with power_on set to false - self.drv.finish_revert_migration( - 'context', mock_instance, None, power_on=False) - - # Asserts - mock_off.assert_called_once_with(self.apt, mock_instance) - mock_update.assert_called_once_with( - self.apt, self.drv.host_wrapper, mock_instance) - self.assertEqual(0, mock_on.call_count) - - @mock.patch('nova_powervm.virt.powervm.vm.power_off', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.power_on', autospec=True) - def test_rescue(self, mock_pwron, mock_pwroff): - """Validates the PowerVM driver rescue operation.""" - with mock.patch.object(self.drv, 'disk_dvr') as mock_disk_dvr: - # Invoke the method. - self.drv.rescue('context', self.inst, mock.MagicMock(), - powervm.TEST_IMAGE1, 'rescue_psswd') - - mock_pwroff.assert_called_once_with(self.apt, self.inst, - force_immediate=False) - self.assertTrue(mock_disk_dvr.create_disk_from_image.called) - self.assertTrue(mock_disk_dvr.connect_disk.called) - mock_pwron.assert_called_once_with(self.apt, self.inst, opts=mock.ANY) - self.assertEqual('PowerOn(bootmode=sms)', - str(mock_pwron.call_args[1]['opts'])) - - @mock.patch('nova_powervm.virt.powervm.vm.power_off', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.power_on', autospec=True) - def test_unrescue(self, mock_pwron, mock_pwroff): - """Validates the PowerVM driver rescue operation.""" - with mock.patch.object(self.drv, 'disk_dvr') as mock_disk_dvr: - # Invoke the method. - self.drv.unrescue(self.inst, 'network_info') - - mock_pwroff.assert_called_once_with( - self.apt, self.inst, force_immediate=False) - self.assertTrue(mock_disk_dvr.disconnect_disk.called) - self.assertTrue(mock_disk_dvr.delete_disks.called) - mock_pwron.assert_called_once_with(self.apt, self.inst, opts=None) - - @mock.patch('nova_powervm.virt.powervm.driver.LOG', autospec=True) - def test_log_op(self, mock_log): - """Validates the log_operations.""" - self.drv._log_operation('fake_op', self.inst) - entry = (r'Operation: %(op)s. Virtual machine display ' - 'name: %(display_name)s, name: %(name)s') - msg_dict = {'display_name': u'Fake Instance', - 'name': 'instance-00000001', - 'op': 'fake_op'} - mock_log.info.assert_called_with(entry, msg_dict, instance=self.inst) - - @mock.patch('pypowervm.wrappers.managed_system.System.get', - new=mock.Mock(return_value=[mock.Mock()])) - @mock.patch('nova_powervm.virt.powervm.host.build_host_resource_from_ms') - def test_host_resources(self, mock_bhrfm): - mock_bhrfm.return_value = {} - stats = self.drv.get_available_resource('nodename') - self.assertIsNotNone(stats) - - # Check for the presence of fields added to host stats - fields = ('local_gb', 'local_gb_used') - - for fld in fields: - value = stats.get(fld, None) - self.assertIsNotNone(value) - - @contextlib.contextmanager - def _update_provider_tree(self, allocations=None): - """Host resource dict gets converted properly to provider tree inv.""" - - with mock.patch('nova_powervm.virt.powervm.host.' - 'build_host_resource_from_ms') as mock_bhrfm: - mock_bhrfm.return_value = { - 'vcpus': 8, - 'memory_mb': 2048, - } - self.drv.host_wrapper = 'host_wrapper' - # Validate that this gets converted to int with floor - self.drv.disk_dvr = mock.Mock(capacity=2091.8) - exp_inv = { - 'VCPU': { - 'total': 8, - 'max_unit': 8, - 'allocation_ratio': 16.0, - 'reserved': 0, - }, - 'MEMORY_MB': { - 'total': 2048, - 'max_unit': 2048, - 'allocation_ratio': 1.5, - 'reserved': 512, - }, - 'DISK_GB': { - 'total': 2091, - 'max_unit': 2091, - 'allocation_ratio': 1.0, - 'reserved': 0, - }, - } - ptree = provider_tree.ProviderTree() - ptree.new_root('compute_host', uuids.cn) - # Let the caller muck with these - yield ptree, exp_inv - self.drv.update_provider_tree(ptree, 'compute_host', - allocations=allocations) - self.assertEqual(exp_inv, ptree.data('compute_host').inventory) - mock_bhrfm.assert_called_once_with('host_wrapper') - - def test_update_provider_tree(self): - # Basic: no inventory already on the provider, no extra providers, no - # aggregates or traits. - with self._update_provider_tree(): - pass - - def test_update_provider_tree_ignore_allocations(self): - with self._update_provider_tree(allocations="This is ignored"): - pass - - def test_update_provider_tree_conf_overrides(self): - # Non-default CONF values for allocation ratios and reserved. - self.flags(cpu_allocation_ratio=12.3, - reserved_host_cpus=4, - ram_allocation_ratio=4.5, - reserved_host_memory_mb=32, - disk_allocation_ratio=6.7, - # This gets int(ceil)'d - reserved_host_disk_mb=5432.1) - with self._update_provider_tree() as (_, exp_inv): - exp_inv['VCPU']['allocation_ratio'] = 12.3 - exp_inv['VCPU']['reserved'] = 4 - exp_inv['MEMORY_MB']['allocation_ratio'] = 4.5 - exp_inv['MEMORY_MB']['reserved'] = 32 - exp_inv['DISK_GB']['allocation_ratio'] = 6.7 - exp_inv['DISK_GB']['reserved'] = 6 - - def test_update_provider_tree_complex_ptree(self): - # Overrides inventory already on the provider; leaves other providers - # and aggregates/traits alone. - with self._update_provider_tree() as (ptree, _): - ptree.update_inventory('compute_host', { - # these should get blown away - 'VCPU': { - 'total': 16, - 'max_unit': 2, - 'allocation_ratio': 1.0, - 'reserved': 10, - }, - 'CUSTOM_BOGUS': { - 'total': 1234, - } - }) - ptree.update_aggregates('compute_host', - [uuids.ss_agg, uuids.other_agg]) - ptree.update_traits('compute_host', ['CUSTOM_FOO', 'CUSTOM_BAR']) - ptree.new_root('ssp', uuids.ssp) - ptree.update_inventory('ssp', {'sentinel': 'inventory', - 'for': 'ssp'}) - ptree.update_aggregates('ssp', [uuids.ss_agg]) - ptree.new_child('sriov', 'compute_host', uuid=uuids.sriov) - - # Make sure the compute's agg and traits were left alone - cndata = ptree.data('compute_host') - self.assertEqual(set([uuids.ss_agg, uuids.other_agg]), - cndata.aggregates) - self.assertEqual(set(['CUSTOM_FOO', 'CUSTOM_BAR']), cndata.traits) - # And the other providers were left alone - self.assertEqual(set([uuids.cn, uuids.ssp, uuids.sriov]), - set(ptree.get_provider_uuids())) - # ...including the ssp's aggregates - self.assertEqual(set([uuids.ss_agg]), ptree.data('ssp').aggregates) - - @mock.patch('nova_powervm.virt.powervm.vif.plug_secure_rmc_vif') - @mock.patch('nova_powervm.virt.powervm.vif.get_secure_rmc_vswitch') - @mock.patch('nova_powervm.virt.powervm.vif.plug') - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas') - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper') - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr') - def test_plug_vifs(self, mock_bld_slot_mgr, mock_wrap, mock_vm_get, - mock_plug_vif, mock_get_rmc_vswitch, mock_plug_rmc_vif): - # Mock up the CNA response - cnas = [mock.MagicMock(), mock.MagicMock()] - cnas[0].mac = 'AABBCCDDEEFF' - cnas[0].vswitch_uri = 'fake_uri' - cnas[1].mac = 'AABBCCDDEE11' - cnas[1].vswitch_uri = 'fake_mgmt_uri' - mock_vm_get.return_value = cnas - - mock_lpar_wrapper = mock.MagicMock() - mock_lpar_wrapper.can_modify_io = mock.MagicMock( - return_value=(True, None)) - mock_wrap.return_value = mock_lpar_wrapper - - # Mock up the network info. They get sanitized to upper case. - net_info = [ - {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}, - {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'} - ] - - # Mock up the rmc vswitch - vswitch_w = mock.MagicMock() - vswitch_w.href = 'fake_mgmt_uri' - mock_get_rmc_vswitch.return_value = vswitch_w - - # Run method - self.drv.plug_vifs(self.inst, net_info) - - mock_bld_slot_mgr.assert_called_once_with(self.inst, - self.drv.store_api) - - # The create should have only been called once. The other was already - # existing. - mock_plug_vif.assert_called_with( - self.drv.adapter, self.drv.host_uuid, self.inst, net_info[1], - mock_bld_slot_mgr.return_value, new_vif=True) - mock_bld_slot_mgr.return_value.save.assert_called_once_with() - self.assertEqual(0, mock_plug_rmc_vif.call_count) - - @mock.patch('nova_powervm.virt.powervm.tasks.vm.Get', autospec=True) - def test_plug_vif_failures(self, mock_vm): - # Test instance not found handling - mock_vm.execute.side_effect = exc.InstanceNotFound( - instance_id=self.inst) - - # Run method - self.assertRaises(exc.VirtualInterfacePlugException, - self.drv.plug_vifs, self.inst, {}) - - # Test a random Exception - mock_vm.execute.side_effect = ValueError() - - # Run method - self.assertRaises(exc.VirtualInterfacePlugException, - self.drv.plug_vifs, self.inst, {}) - - @mock.patch('nova_powervm.virt.powervm.vif.unplug', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr', autospec=True) - def test_unplug_vifs(self, mock_bld_slot_mgr, mock_wrap, mock_vm_get, - mock_unplug_vif): - # Mock up the CNA response - cnas = [mock.MagicMock(), mock.MagicMock()] - cnas[0].mac = 'AABBCCDDEEFF' - cnas[0].vswitch_uri = 'fake_uri' - cnas[1].mac = 'AABBCCDDEE11' - cnas[1].vswitch_uri = 'fake_mgmt_uri' - mock_vm_get.return_value = cnas - - mock_lpar_wrapper = mock.MagicMock() - mock_lpar_wrapper.can_modify_io = mock.MagicMock( - return_value=(True, None)) - mock_wrap.return_value = mock_lpar_wrapper - - # Mock up the network info. They get sanitized to upper case. - net_info = [ - {'address': 'aa:bb:cc:dd:ee:ff'}, - {'address': 'aa:bb:cc:dd:ee:22'} - ] - - # Run method - self.drv.unplug_vifs(self.inst, net_info) - - mock_bld_slot_mgr.assert_called_once_with(self.inst, - self.drv.store_api) - - # The create should have only been called once. The other was already - # existing. - mock_unplug_vif.assert_has_calls( - [mock.call(self.drv.adapter, self.drv.host_uuid, self.inst, - net_inf, mock_bld_slot_mgr.return_value, - cna_w_list=cnas) for net_inf in net_info]) - mock_bld_slot_mgr.return_value.save.assert_called_once_with() - - @mock.patch('nova_powervm.virt.powervm.tasks.vm.Get.execute', - autospec=True) - def test_unplug_vif_failures(self, mock_vm): - # Test instance not found handling - mock_vm.side_effect = exc.InstanceNotFound( - instance_id=self.inst) - - # Run method - self.drv.unplug_vifs(self.inst, {}) - self.assertEqual(1, mock_vm.call_count) - - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - def test_unplug_vif_failures_httperror(self, mock_wrap): - # Test instance not found handling - mock_wrap.side_effect = exc.InstanceNotFound( - instance_id=self.inst.name) - - # Backing API: Instance does not exist - # Nova Response: No exceptions should be raised. - self.drv.unplug_vifs(self.inst, {}) - self.assertEqual(1, mock_wrap.call_count) - - def test_extract_bdm(self): - """Tests the _extract_bdm method.""" - self.assertEqual([], self.drv._extract_bdm(None)) - self.assertEqual([], self.drv._extract_bdm({'fake': 'val'})) - - fake_bdi = {'block_device_mapping': ['content']} - self.assertListEqual(['content'], self.drv._extract_bdm(fake_bdi)) - - def test_get_host_ip_addr(self): - self.assertEqual(self.drv.get_host_ip_addr(), '127.0.0.1') - - @mock.patch('nova.virt.powervm_ext.driver.LOG.warning', autospec=True) - @mock.patch('nova.compute.utils.get_machine_ips', autospec=True) - def test_get_host_ip_addr_failure(self, mock_ips, mock_log): - mock_ips.return_value = ['1.1.1.1'] - self.drv.get_host_ip_addr() - mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was ' - u'not found on any of the ' - u'interfaces: %(ifaces)s', - {'ifaces': '1.1.1.1', - 'my_ip': mock.ANY}) - - def test_shared_stg_calls(self): - data = self.drv.check_instance_shared_storage_local('context', 'inst') - self.assertTrue( - self.drv.disk_dvr.check_instance_shared_storage_local.called) - - self.drv.check_instance_shared_storage_remote('context', data) - self.assertTrue( - self.drv.disk_dvr.check_instance_shared_storage_remote.called) - - self.drv.check_instance_shared_storage_cleanup('context', data) - self.assertTrue( - self.drv.disk_dvr.check_instance_shared_storage_cleanup.called) - - @mock.patch('nova_powervm.virt.powervm.vm.reboot', autospec=True) - def test_reboot(self, mock_reboot): - inst = mock.Mock() - self.drv.reboot('context', inst, 'network_info', 'SOFT') - mock_reboot.assert_called_once_with(self.apt, inst, False) - mock_reboot.reset_mock() - self.drv.reboot('context', inst, 'network_info', 'HARD') - mock_reboot.assert_called_once_with(self.apt, inst, True) - - @mock.patch('pypowervm.tasks.vterm.open_remotable_vnc_vterm') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', - new=mock.Mock(return_value='uuid')) - def test_get_vnc_console(self, mock_vterm): - # Success - mock_vterm.return_value = '10' - resp = self.drv.get_vnc_console(mock.ANY, self.inst) - self.assertEqual('127.0.0.1', resp.host) - self.assertEqual('10', resp.port) - self.assertEqual('uuid', resp.internal_access_path) - mock_vterm.assert_called_once_with( - mock.ANY, 'uuid', mock.ANY, vnc_path='uuid', use_x509_auth=False, - ca_certs=None, server_cert=None, server_key=None) - - # Failure - mock_vterm.side_effect = pvm_exc.VNCBasedTerminalFailedToOpen(err='xx') - self.assertRaises(exc.ConsoleTypeUnavailable, self.drv.get_vnc_console, - mock.ANY, self.inst) - - # 404 - mock_vterm.side_effect = pvm_exc.HttpNotFound(mock.Mock(status=404)) - self.assertRaises(exc.InstanceNotFound, self.drv.get_vnc_console, - mock.ANY, self.inst) - - @staticmethod - def _fake_bdms(set_boot_index=False, connection_type=None, - driver_volume_type='fibre_channel', - driver_volume_type_in_data=False): - def _fake_bdm(volume_id, target_lun): - conninfo = {'data': {'volume_id': volume_id, - 'target_lun': target_lun, - 'initiator_target_map': - {'21000024F5': ['50050768']}}} - if connection_type is not None: - conninfo['data']['connection-type'] = connection_type - if driver_volume_type is not None: - if driver_volume_type_in_data: - conninfo['data']['driver_volume_type'] = driver_volume_type - else: - conninfo['driver_volume_type'] = driver_volume_type - mapping_dict = {'source_type': 'volume', 'volume_id': volume_id, - 'destination_type': 'volume', - 'connection_info': jsonutils.dumps(conninfo)} - if set_boot_index: - mapping_dict['boot_index'] = target_lun - bdm_dict = nova_block_device.BlockDeviceDict(mapping_dict) - bdm_obj = bdmobj.BlockDeviceMapping(**bdm_dict) - - return nova_virt_bdm.DriverVolumeBlockDevice(bdm_obj) - - bdm_list = [_fake_bdm('fake_vol1', 0), _fake_bdm('fake_vol2', 1)] - block_device_info = {'block_device_mapping': bdm_list} - - return block_device_info - - @mock.patch('nova_powervm.virt.powervm.tasks.image.UpdateTaskState.' - 'execute') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.InstanceDiskToMgmt.' - 'execute') - @mock.patch('nova_powervm.virt.powervm.tasks.image.StreamToGlance.execute') - @mock.patch('nova_powervm.virt.powervm.tasks.storage.' - 'RemoveInstanceDiskFromMgmt.execute') - def test_snapshot(self, mock_rm, mock_stream, mock_conn, mock_update): - mock_conn.return_value = 'stg_elem', 'vios_wrap', 'disk_path' - self.drv.snapshot('context', self.inst, 'image_id', - 'update_task_state') - self.assertEqual(2, mock_update.call_count) - self.assertEqual(1, mock_conn.call_count) - mock_stream.assert_called_with(disk_path='disk_path') - mock_rm.assert_called_with(stg_elem='stg_elem', vios_wrap='vios_wrap', - disk_path='disk_path') - - # snapshot operation not supported - self.drv.disk_dvr.capabilities = {'snapshot': False} - self.assertRaises(exc.NotSupportedWithOption, - self.drv.snapshot, 'context', self.inst, 'image_id', - 'update_task_state') - - @mock.patch('nova_powervm.virt.powervm.live_migration.LiveMigrationDest', - autospec=True) - def test_can_migrate_dest(self, mock_lpm): - mock_lpm.return_value.check_destination.return_value = 'dest_data' - dest_data = self.drv.check_can_live_migrate_destination( - 'context', mock.Mock(), 'src_compute_info', 'dst_compute_info') - self.assertEqual('dest_data', dest_data) - - def test_can_live_mig_dest_clnup(self): - self.drv.cleanup_live_migration_destination_check( - 'context', 'dest_data') - - @mock.patch('nova_powervm.virt.powervm.live_migration.LiveMigrationSrc', - autospec=True) - def test_can_live_mig_src(self, mock_lpm): - mock_lpm.return_value.check_source.return_value = ( - 'src_data') - src_data = self.drv.check_can_live_migrate_source( - 'context', mock.Mock(), 'dest_check_data') - self.assertEqual('src_data', src_data) - - def test_pre_live_migr(self): - block_device_info = self._fake_bdms() - resp = self.drv.pre_live_migration( - 'context', self.lpm_inst, block_device_info, 'network_info', - 'disk_info', migrate_data='migrate_data') - self.assertIsNotNone(resp) - - def test_live_migration(self): - mock_post_meth = mock.Mock() - mock_rec_meth = mock.Mock() - - # Good path - self.drv.live_migration( - 'context', self.lpm_inst, 'dest', mock_post_meth, mock_rec_meth, - 'block_mig', 'migrate_data') - - mock_post_meth.assert_called_once_with( - 'context', self.lpm_inst, 'dest', mock.ANY, mock.ANY) - self.assertEqual(0, mock_rec_meth.call_count) - - # Abort invocation path - self._setup_lpm() - mock_post_meth.reset_mock() - mock_kwargs = {'operation_name': 'op', 'seconds': 10} - self.lpm.live_migration.side_effect = ( - pvm_exc.JobRequestTimedOut(**mock_kwargs)) - self.assertRaises( - lpm.LiveMigrationFailed, self.drv.live_migration, - 'context', self.lpm_inst, 'dest', mock_post_meth, mock_rec_meth, - 'block_mig', 'migrate_data') - self.lpm.migration_abort.assert_called_once_with() - mock_rec_meth.assert_called_once_with( - 'context', self.lpm_inst, 'dest', migrate_data=mock.ANY) - self.lpm.rollback_live_migration.assert_called_once_with('context') - self.assertEqual(0, mock_post_meth.call_count) - - # Exception path - self._setup_lpm() - mock_post_meth.reset_mock() - mock_rec_meth.reset_mock() - self.lpm.live_migration.side_effect = ValueError() - self.assertRaises( - lpm.LiveMigrationFailed, self.drv.live_migration, - 'context', self.lpm_inst, 'dest', mock_post_meth, mock_rec_meth, - 'block_mig', 'migrate_data') - mock_rec_meth.assert_called_once_with( - 'context', self.lpm_inst, 'dest', migrate_data=mock.ANY) - self.lpm.rollback_live_migration.assert_called_once_with('context') - self.assertEqual(0, mock_post_meth.call_count) - - # Ensure we get LiveMigrationFailed even if recovery fails. - self._setup_lpm() - mock_post_meth.reset_mock() - mock_rec_meth.reset_mock() - self.lpm.live_migration.side_effect = ValueError() - # Cause the recovery method to fail with an exception. - mock_rec_meth.side_effect = ValueError() - self.assertRaises( - lpm.LiveMigrationFailed, self.drv.live_migration, - 'context', self.lpm_inst, 'dest', mock_post_meth, mock_rec_meth, - 'block_mig', 'migrate_data') - mock_rec_meth.assert_called_once_with( - 'context', self.lpm_inst, 'dest', migrate_data=mock.ANY) - self.lpm.rollback_live_migration.assert_called_once_with('context') - self.assertEqual(0, mock_post_meth.call_count) - - def test_rollbk_lpm_dest(self): - self.drv.rollback_live_migration_at_destination( - 'context', self.lpm_inst, 'network_info', 'block_device_info') - mock_rollback = self.lpm.rollback_live_migration_at_destination - mock_rollback.assert_called_once_with( - 'context', self.lpm_inst, 'network_info', 'block_device_info', - destroy_disks=True, migrate_data=None) - self.assertRaises( - KeyError, lambda: self.drv.live_migrations[self.lpm_inst.uuid]) - - def test_post_live_mig(self): - self.drv.post_live_migration('context', self.lpm_inst, None) - self.lpm.post_live_migration.assert_called_once_with([], None) - - def test_post_live_mig_src(self): - self.drv.post_live_migration_at_source('context', self.lpm_inst, - 'network_info') - self.lpm.post_live_migration_at_source.assert_called_once_with( - 'network_info') - - @mock.patch('nova_powervm.virt.powervm.vm.power_off', autospec=True) - def test_power_off(self, mock_power_off): - self.drv.power_off(self.inst) - mock_power_off.assert_called_once_with( - self.drv.adapter, self.inst, force_immediate=True, timeout=None) - - # Long timeout (retry interval means nothing on powervm) - mock_power_off.reset_mock() - self.drv.power_off(self.inst, timeout=500, retry_interval=10) - mock_power_off.assert_called_once_with( - self.drv.adapter, self.inst, force_immediate=False, timeout=500) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._destroy') - def test_confirm_migration_diff_host(self, mock_destroy): - mock_mig = mock.Mock(source_compute='host1', dest_compute='host2') - self.drv.confirm_migration('context', mock_mig, self.lpm_inst, - 'network_info') - mock_destroy.assert_called_once_with( - 'context', self.lpm_inst, block_device_info=None, - destroy_disks=False, shutdown=False) - - @mock.patch('nova_powervm.virt.powervm.vm.rename', autospec=True) - def test_confirm_migration_same_host(self, mock_rename): - mock_mig = mock.Mock(source_compute='host1', dest_compute='host1') - self.drv.confirm_migration('context', mock_mig, self.lpm_inst, - 'network_info') - mock_rename.assert_called_once_with( - self.drv.adapter, self.lpm_inst, self.lpm_inst.name) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._vol_drv_iter', - autospec=True) - def test_post_live_mig_dest(self, mock_vdi): - vals = [(None, None)] - mock_vdi.return_value = vals - self.drv.post_live_migration_at_destination( - 'context', self.lpm_inst, 'network_info') - self.lpm.post_live_migration_at_destination.assert_called_once_with( - 'network_info', vals) - - @mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._vol_drv_iter', - autospec=True) - def test_post_live_mig_dest_vol_drv(self, mock_vdi): - bdm1 = mock.Mock() - fake_bdi = {'block_device_mapping': [bdm1]} - vals = [(bdm1, self.iscsi_vol_drv)] - mock_vdi.return_value = vals - self.drv.post_live_migration_at_destination( - 'context', self.lpm_inst, 'network_info', fake_bdi) - self.lpm.post_live_migration_at_destination.assert_called_once_with( - 'network_info', vals) - - @mock.patch('pypowervm.tasks.memory.calculate_memory_overhead_on_host', - autospec=True) - def test_estimate_instance_overhead(self, mock_calc_over): - mock_calc_over.return_value = ('2048', '96') - - inst_info = self.inst.get_flavor() - inst_info.extra_specs = {} - overhead = self.drv.estimate_instance_overhead(inst_info) - self.assertEqual({'memory_mb': '2048'}, overhead) - - # Make sure the cache works - mock_calc_over.reset_mock() - overhead = self.drv.estimate_instance_overhead(inst_info) - self.assertEqual({'memory_mb': '2048'}, overhead) - self.assertEqual(0, mock_calc_over.call_count) - - # Reset the cache every time from now on - self.drv._inst_overhead_cache = {} - - # Flavor having extra_specs - inst_info.extra_specs = {'powervm:max_mem': 4096} - overhead = self.drv.estimate_instance_overhead(inst_info) - mock_calc_over.assert_called_with(self.apt, self.drv.host_uuid, - {'max_mem': 4096}) - self.assertEqual({'memory_mb': '2048'}, overhead) - - self.drv._inst_overhead_cache = {} - - # Test when instance passed is dict - inst_info = obj_base.obj_to_primitive(inst_info) - overhead = self.drv.estimate_instance_overhead(inst_info) - self.assertEqual({'memory_mb': '2048'}, overhead) - - self.drv._inst_overhead_cache = {} - - # When instance_info is None - overhead = self.drv.estimate_instance_overhead(None) - self.assertEqual({'memory_mb': 0}, overhead) - - self.drv._inst_overhead_cache = {} - - # Test when instance Object is passed - overhead = self.drv.estimate_instance_overhead(self.inst) - self.assertEqual({'memory_mb': '2048'}, overhead) - - def test_vol_drv_iter(self): - block_device_info = self._fake_bdms() - bdms = self.drv._extract_bdm(block_device_info) - vol_adpt = mock.Mock() - - def _get_results(bdms): - # Patch so we get the same mock back each time. - with mock.patch('nova_powervm.virt.powervm.volume.' - 'build_volume_driver', return_value=vol_adpt): - return [(bdm, vol_drv) for bdm, vol_drv in - self.drv._vol_drv_iter(self.inst, bdms)] - - results = _get_results(bdms) - self.assertEqual( - 'fake_vol1', - results[0][0]['connection_info']['data']['volume_id']) - self.assertEqual(vol_adpt, results[0][1]) - self.assertEqual( - 'fake_vol2', - results[1][0]['connection_info']['data']['volume_id']) - self.assertEqual(vol_adpt, results[1][1]) - - # Test with empty bdms - self.assertEqual([], _get_results([])) - - def test_build_vol_drivers(self): - # This utility just returns a list of drivers from the _vol_drv_iter() - # iterator so mock it and ensure the drivers are returned. - vals = [('bdm0', 'drv0'), ('bdm1', 'drv1')] - with mock.patch.object(self.drv, '_vol_drv_iter', return_value=vals): - drivers = self.drv._build_vol_drivers('context', 'instance', None) - - self.assertEqual(['drv0', 'drv1'], drivers) - - @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') - @mock.patch.object(virt_driver, 'get_block_device_info') - def test_get_block_device_info(self, mock_bk_dev, mock_bdml): - mock_bk_dev.return_value = 'info' - self.assertEqual('info', - self.drv._get_block_device_info('ctx', self.inst)) - - def test_deallocate_networks_on_reschedule(self): - candeallocate = self.drv.deallocate_networks_on_reschedule(mock.Mock()) - self.assertTrue(candeallocate) - - @mock.patch('pypowervm.tasks.cna.find_orphaned_trunks', autospec=True) - def test_cleanup_orphan_adapters(self, mock_find_orphans): - mock_orphan = mock.MagicMock() - mock_find_orphans.return_value = [mock_orphan] - self.drv._cleanup_orphan_adapters('my_vswitch') - mock_orphan.delete.assert_called_once_with() - - def test_get_host_cpu_stats(self): - hcpu_stats = self.drv.get_host_cpu_stats() - total_cycles = self.drv.host_cpu_cache.total_cycles - total_user_cycles = self.drv.host_cpu_cache.total_user_cycles - total_fw_cycles = self.drv.host_cpu_cache.total_fw_cycles - expected_stats = { - 'kernel': self.drv.host_cpu_cache.total_fw_cycles, - 'user': self.drv.host_cpu_cache.total_user_cycles, - 'idle': (total_cycles - total_user_cycles - total_fw_cycles), - 'iowait': 0, - 'frequency': self.drv.host_cpu_cache.cpu_freq} - self.assertEqual(expected_stats, hcpu_stats) - self.drv.host_cpu_cache.refresh.assert_called_once() diff --git a/nova_powervm/tests/virt/powervm/test_event.py b/nova_powervm/tests/virt/powervm/test_event.py deleted file mode 100644 index 3ab69cf3..00000000 --- a/nova_powervm/tests/virt/powervm/test_event.py +++ /dev/null @@ -1,393 +0,0 @@ -# Copyright 2014, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock -from nova.compute import power_state -from nova import exception -from nova import test -from pypowervm.wrappers import event as pvm_evt - -from nova_powervm.virt.powervm import event - - -class TestGetInstance(test.NoDBTestCase): - @mock.patch('nova.context.get_admin_context', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance', autospec=True) - def test_get_instance(self, mock_get_inst, mock_get_context): - # If instance provided, vm.get_instance not called - self.assertEqual('inst', event._get_instance('inst', 'uuid')) - self.assertEqual(0, mock_get_inst.call_count) - # Note that we can only guarantee get_admin_context wasn't called - # because _get_instance is mocked everywhere else in this suite. - # Otherwise it could run from another test case executing in parallel. - self.assertEqual(0, mock_get_context.call_count) - - # If instance not provided, vm.get_instance is called - mock_get_inst.return_value = 'inst2' - for _ in range(2): - # Doing it the second time doesn't call get_admin_context() again. - self.assertEqual('inst2', event._get_instance(None, 'uuid')) - mock_get_context.assert_called_once_with() - mock_get_inst.assert_called_once_with( - mock_get_context.return_value, 'uuid') - mock_get_inst.reset_mock() - # Don't reset mock_get_context - - -class TestPowerVMNovaEventHandler(test.NoDBTestCase): - def setUp(self): - super(TestPowerVMNovaEventHandler, self).setUp() - lceh_process_p = mock.patch( - 'nova_powervm.virt.powervm.event.PowerVMLifecycleEventHandler.' - 'process') - self.addCleanup(lceh_process_p.stop) - self.mock_lceh_process = lceh_process_p.start() - self.mock_driver = mock.Mock() - self.handler = event.PowerVMNovaEventHandler(self.mock_driver) - - @mock.patch('nova_powervm.virt.powervm.event._get_instance', autospec=True) - def test_get_inst_uuid(self, mock_get_instance): - fake_inst1 = mock.Mock(uuid='uuid1') - fake_inst2 = mock.Mock(uuid='uuid2') - mock_get_instance.side_effect = lambda i, u: { - 'fake_pvm_uuid1': fake_inst1, - 'fake_pvm_uuid2': fake_inst2}.get(u) - - self.assertEqual( - (fake_inst1, 'uuid1'), - self.handler._get_inst_uuid(fake_inst1, 'fake_pvm_uuid1')) - self.assertEqual( - (fake_inst2, 'uuid2'), - self.handler._get_inst_uuid(fake_inst2, 'fake_pvm_uuid2')) - self.assertEqual( - (None, 'uuid1'), - self.handler._get_inst_uuid(None, 'fake_pvm_uuid1')) - self.assertEqual( - (fake_inst2, 'uuid2'), - self.handler._get_inst_uuid(fake_inst2, 'fake_pvm_uuid2')) - self.assertEqual( - (fake_inst1, 'uuid1'), - self.handler._get_inst_uuid(fake_inst1, 'fake_pvm_uuid1')) - mock_get_instance.assert_has_calls( - [mock.call(fake_inst1, 'fake_pvm_uuid1'), - mock.call(fake_inst2, 'fake_pvm_uuid2')]) - - @mock.patch('nova_powervm.virt.powervm.event._get_instance', autospec=True) - def test_handle_inst_event(self, mock_get_instance): - # If no event we care about, or NVRAM but no nvram_mgr, nothing happens - self.mock_driver.nvram_mgr = None - for dets in ([], ['foo', 'bar', 'baz'], ['NVRAM']): - self.assertEqual('inst', self.handler._handle_inst_event( - 'inst', 'uuid', dets)) - self.assertEqual(0, mock_get_instance.call_count) - self.mock_lceh_process.assert_not_called() - - self.mock_driver.nvram_mgr = mock.Mock() - - # PartitionState only: no NVRAM handling, and inst is passed through. - self.assertEqual('inst', self.handler._handle_inst_event( - 'inst', 'uuid', ['foo', 'PartitionState', 'bar'])) - self.assertEqual(0, mock_get_instance.call_count) - self.mock_driver.nvram_mgr.store.assert_not_called() - self.mock_lceh_process.assert_called_once_with('inst', 'uuid') - - self.mock_lceh_process.reset_mock() - - # No instance; nothing happens (we skip PartitionState handling too) - mock_get_instance.return_value = None - self.assertIsNone(self.handler._handle_inst_event( - 'inst', 'uuid', ['NVRAM', 'PartitionState'])) - mock_get_instance.assert_called_once_with('inst', 'uuid') - self.mock_driver.nvram_mgr.store.assert_not_called() - self.mock_lceh_process.assert_not_called() - - mock_get_instance.reset_mock() - fake_inst = mock.Mock(uuid='fake-uuid') - mock_get_instance.return_value = fake_inst - - # NVRAM only - no PartitionState handling, instance is returned - self.assertEqual(fake_inst, self.handler._handle_inst_event( - None, 'uuid', ['NVRAM', 'baz'])) - mock_get_instance.assert_called_once_with(None, 'uuid') - self.mock_driver.nvram_mgr.store.assert_called_once_with('fake-uuid') - self.mock_lceh_process.assert_not_called() - - mock_get_instance.reset_mock() - self.mock_driver.nvram_mgr.store.reset_mock() - self.handler._uuid_cache.clear() - - # Both event types - self.assertEqual(fake_inst, self.handler._handle_inst_event( - None, 'uuid', ['PartitionState', 'NVRAM'])) - mock_get_instance.assert_called_once_with(None, 'uuid') - self.mock_driver.nvram_mgr.store.assert_called_once_with('fake-uuid') - self.mock_lceh_process.assert_called_once_with(fake_inst, 'uuid') - - mock_get_instance.reset_mock() - self.mock_driver.nvram_mgr.store.reset_mock() - self.handler._uuid_cache.clear() - - # Handle multiple NVRAM and PartitionState events - self.assertEqual(fake_inst, self.handler._handle_inst_event( - None, 'uuid', ['NVRAM'])) - self.assertEqual(None, self.handler._handle_inst_event( - None, 'uuid', ['NVRAM'])) - self.assertEqual(None, self.handler._handle_inst_event( - None, 'uuid', ['PartitionState'])) - self.assertEqual(fake_inst, self.handler._handle_inst_event( - fake_inst, 'uuid', ['NVRAM'])) - self.assertEqual(fake_inst, self.handler._handle_inst_event( - fake_inst, 'uuid', ['NVRAM', 'PartitionState'])) - mock_get_instance.assert_called_once_with(None, 'uuid') - self.mock_driver.nvram_mgr.store.assert_has_calls( - [mock.call('fake-uuid')] * 4) - self.mock_lceh_process.assert_has_calls( - [mock.call(None, 'uuid'), - mock.call(fake_inst, 'uuid')]) - - @mock.patch('nova_powervm.virt.powervm.event.PowerVMNovaEventHandler.' - '_handle_inst_event') - @mock.patch('pypowervm.util.get_req_path_uuid', autospec=True) - def test_process(self, mock_get_rpu, mock_handle): - # NEW_CLIENT/CACHE_CLEARED events are ignored - events = [mock.Mock(etype=pvm_evt.EventType.NEW_CLIENT), - mock.Mock(etype=pvm_evt.EventType.CACHE_CLEARED)] - self.handler.process(events) - self.assertEqual(0, mock_get_rpu.call_count) - mock_handle.assert_not_called() - - moduri = pvm_evt.EventType.MODIFY_URI - # If get_req_path_uuid doesn't find a UUID, or not a LogicalPartition - # URI, or details is empty, or has no actions we care about, no action - # is taken. - mock_get_rpu.side_effect = [None, 'uuid1', 'uuid2', 'uuid3'] - events = [ - mock.Mock(etype=moduri, data='foo/LogicalPartition/None', - details='NVRAM,PartitionState'), - mock.Mock(etype=moduri, data='bar/VirtualIOServer/uuid1', - details='NVRAM,PartitionState'), - mock.Mock(etype=moduri, data='baz/LogicalPartition/uuid2', - detail=''), - mock.Mock(etype=moduri, data='blah/LogicalPartition/uuid3', - detail='do,not,care')] - self.handler.process(events) - mock_get_rpu.assert_has_calls( - [mock.call(uri, preserve_case=True) - for uri in ('bar/VirtualIOServer/uuid1', - 'baz/LogicalPartition/uuid2', - 'blah/LogicalPartition/uuid3')]) - mock_handle.assert_not_called() - - mock_get_rpu.reset_mock() - - # The stars align, and we handle some events. - uuid_det = (('uuid1', 'NVRAM'), - ('uuid2', 'this,one,ignored'), - ('uuid3', 'PartitionState,baz,NVRAM'), - # Repeat uuid1 to test the cache - ('uuid1', 'blah,PartitionState'), - ('uuid5', 'also,ignored')) - mock_get_rpu.side_effect = [ud[0] for ud in uuid_det] - events = [ - mock.Mock(etype=moduri, data='LogicalPartition/' + uuid, - detail=detail) for uuid, detail in uuid_det] - # Set up _handle_inst_event to test the cache and the exception path - mock_handle.side_effect = ['inst1', None, ValueError] - # Run it! - self.handler.process(events) - mock_get_rpu.assert_has_calls( - [mock.call(uri, preserve_case=True) for uri in - ('LogicalPartition/' + ud[0] for ud in uuid_det)]) - mock_handle.assert_has_calls( - [mock.call(None, 'uuid1', ['NVRAM']), - mock.call(None, 'uuid3', ['PartitionState', 'baz', 'NVRAM']), - # inst1 pulled from the cache based on uuid1 - mock.call('inst1', 'uuid1', ['blah', 'PartitionState'])]) - - @mock.patch('nova_powervm.virt.powervm.event._get_instance', autospec=True) - @mock.patch('pypowervm.util.get_req_path_uuid', autospec=True) - def test_uuid_cache(self, mock_get_rpu, mock_get_instance): - deluri = pvm_evt.EventType.DELETE_URI - moduri = pvm_evt.EventType.MODIFY_URI - - fake_inst1 = mock.Mock(uuid='uuid1') - fake_inst2 = mock.Mock(uuid='uuid2') - fake_inst4 = mock.Mock(uuid='uuid4') - mock_get_instance.side_effect = lambda i, u: { - 'fake_pvm_uuid1': fake_inst1, - 'fake_pvm_uuid2': fake_inst2, - 'fake_pvm_uuid4': fake_inst4}.get(u) - mock_get_rpu.side_effect = lambda d, **k: d.split('/')[1] - - uuid_det = (('fake_pvm_uuid1', 'NVRAM', moduri), - ('fake_pvm_uuid2', 'NVRAM', moduri), - ('fake_pvm_uuid4', 'NVRAM', moduri), - ('fake_pvm_uuid1', 'NVRAM', moduri), - ('fake_pvm_uuid2', '', deluri), - ('fake_pvm_uuid2', 'NVRAM', moduri), - ('fake_pvm_uuid1', '', deluri), - ('fake_pvm_uuid3', '', deluri)) - events = [ - mock.Mock(etype=etype, data='LogicalPartition/' + uuid, - detail=detail) for uuid, detail, etype in uuid_det] - self.handler.process(events[0:4]) - mock_get_instance.assert_has_calls([ - mock.call(None, 'fake_pvm_uuid1'), - mock.call(None, 'fake_pvm_uuid2'), - mock.call(None, 'fake_pvm_uuid4')]) - self.assertEqual({ - 'fake_pvm_uuid1': 'uuid1', - 'fake_pvm_uuid2': 'uuid2', - 'fake_pvm_uuid4': 'uuid4'}, self.handler._uuid_cache) - - mock_get_instance.reset_mock() - - # Test the cache with a second process call - self.handler.process(events[4:7]) - mock_get_instance.assert_has_calls([ - mock.call(None, 'fake_pvm_uuid2')]) - self.assertEqual({ - 'fake_pvm_uuid2': 'uuid2', - 'fake_pvm_uuid4': 'uuid4'}, self.handler._uuid_cache) - - mock_get_instance.reset_mock() - - # Make sure a delete to a non-cached UUID doesn't blow up - self.handler.process([events[7]]) - self.assertEqual(0, mock_get_instance.call_count) - - mock_get_rpu.reset_mock() - mock_get_instance.reset_mock() - - clear_events = [mock.Mock(etype=pvm_evt.EventType.NEW_CLIENT), - mock.Mock(etype=pvm_evt.EventType.CACHE_CLEARED)] - # This should clear the cache - self.handler.process(clear_events) - self.assertEqual(dict(), self.handler._uuid_cache) - self.assertEqual(0, mock_get_rpu.call_count) - self.assertEqual(0, mock_get_instance.call_count) - - -class TestPowerVMLifecycleEventHandler(test.NoDBTestCase): - def setUp(self): - super(TestPowerVMLifecycleEventHandler, self).setUp() - self.mock_driver = mock.MagicMock() - self.handler = event.PowerVMLifecycleEventHandler(self.mock_driver) - - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp', autospec=True) - @mock.patch('nova_powervm.virt.powervm.event._get_instance', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.translate_event', autospec=True) - @mock.patch('nova.virt.event.LifecycleEvent', autospec=True) - def test_emit_event(self, mock_lce, mock_tx_evt, mock_get_inst, mock_qp): - def assert_qp(): - mock_qp.assert_called_once_with( - self.mock_driver.adapter, 'uuid', 'PartitionState') - mock_qp.reset_mock() - - def assert_get_inst(): - mock_get_inst.assert_called_once_with('inst', 'uuid') - mock_get_inst.reset_mock() - - # Ignore if LPAR is gone - mock_qp.side_effect = exception.InstanceNotFound(instance_id='uuid') - self.handler._emit_event('uuid', None) - assert_qp() - self.assertEqual(0, mock_get_inst.call_count) - self.assertEqual(0, mock_tx_evt.call_count) - self.assertEqual(0, mock_lce.call_count) - self.mock_driver.emit_event.assert_not_called() - - # Let get_vm_qp return its usual mock from now on - mock_qp.side_effect = None - - # Ignore if instance is gone - mock_get_inst.return_value = None - self.handler._emit_event('uuid', 'inst') - assert_qp() - assert_get_inst() - self.assertEqual(0, mock_tx_evt.call_count) - self.assertEqual(0, mock_lce.call_count) - self.mock_driver.emit_event.assert_not_called() - - # Ignore if task_state isn't one we care about - for task_state in event._NO_EVENT_TASK_STATES: - mock_get_inst.return_value = mock.Mock(task_state=task_state) - self.handler._emit_event('uuid', 'inst') - assert_qp() - assert_get_inst() - self.assertEqual(0, mock_tx_evt.call_count) - self.assertEqual(0, mock_lce.call_count) - self.mock_driver.emit_event.assert_not_called() - - # Task state we care about from now on - inst = mock.Mock(task_state='scheduling', - power_state=power_state.RUNNING) - mock_get_inst.return_value = inst - - # Ignore if not a transition we care about - mock_tx_evt.return_value = None - self.handler._emit_event('uuid', 'inst') - assert_qp() - assert_get_inst() - mock_tx_evt.assert_called_once_with( - mock_qp.return_value, power_state.RUNNING) - mock_lce.assert_not_called() - self.mock_driver.emit_event.assert_not_called() - - mock_tx_evt.reset_mock() - - # Good path - mock_tx_evt.return_value = 'transition' - self.handler._delayed_event_threads = {'uuid': 'thread1', - 'uuid2': 'thread2'} - self.handler._emit_event('uuid', 'inst') - assert_qp() - assert_get_inst() - mock_tx_evt.assert_called_once_with( - mock_qp.return_value, power_state.RUNNING) - mock_lce.assert_called_once_with(inst.uuid, 'transition') - self.mock_driver.emit_event.assert_called_once_with( - mock_lce.return_value) - # The thread was removed - self.assertEqual({'uuid2': 'thread2'}, - self.handler._delayed_event_threads) - - @mock.patch('eventlet.greenthread.spawn_after', autospec=True) - def test_process(self, mock_spawn): - thread1 = mock.Mock() - thread2 = mock.Mock() - mock_spawn.side_effect = [thread1, thread2] - # First call populates the delay queue - self.assertEqual({}, self.handler._delayed_event_threads) - self.handler.process(None, 'uuid') - mock_spawn.assert_called_once_with(15, self.handler._emit_event, - 'uuid', None) - self.assertEqual({'uuid': thread1}, - self.handler._delayed_event_threads) - thread1.cancel.assert_not_called() - thread2.cancel.assert_not_called() - - mock_spawn.reset_mock() - - # Second call cancels the first thread and replaces it in delay queue - self.handler.process('inst', 'uuid') - mock_spawn.assert_called_once_with(15, self.handler._emit_event, - 'uuid', 'inst') - self.assertEqual({'uuid': thread2}, - self.handler._delayed_event_threads) - thread1.cancel.assert_called_once_with() - thread2.cancel.assert_not_called() diff --git a/nova_powervm/tests/virt/powervm/test_host.py b/nova_powervm/tests/virt/powervm/test_host.py deleted file mode 100644 index 79a5d40c..00000000 --- a/nova_powervm/tests/virt/powervm/test_host.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2014, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock - -import logging -from nova import test -from oslo_serialization import jsonutils -from pypowervm.wrappers import iocard as pvm_card -from pypowervm.wrappers import managed_system as pvm_ms - -from nova_powervm.virt.powervm import host as pvm_host - -LOG = logging.getLogger(__name__) -logging.basicConfig() - - -def mock_sriov(adap_id, pports): - sriov = mock.create_autospec(pvm_card.SRIOVAdapter, spec_set=True) - sriov.configure_mock(sriov_adap_id=adap_id, phys_ports=pports) - return sriov - - -def mock_pport(port_id, label, maxlps): - port = mock.create_autospec(pvm_card.SRIOVEthPPort, spec_set=True) - port.configure_mock(port_id=port_id, label=label, supp_max_lps=maxlps) - return port - - -class TestPowerVMHost(test.NoDBTestCase): - def test_host_resources(self): - # Create objects to test with - sriov_adaps = [ - mock_sriov(1, [mock_pport(2, 'foo', 1), mock_pport(3, '', 2)]), - mock_sriov(4, [mock_pport(5, 'bar', 3)])] - ms_wrapper = mock.create_autospec(pvm_ms.System, spec_set=True) - asio = mock.create_autospec(pvm_ms.ASIOConfig, spec_set=True) - asio.configure_mock(sriov_adapters=sriov_adaps) - ms_wrapper.configure_mock( - proc_units_configurable=500, - proc_units_avail=500, - memory_configurable=5242880, - memory_free=5242752, - memory_region_size='big', - asio_config=asio) - self.flags(host='the_hostname') - - # Run the actual test - stats = pvm_host.build_host_resource_from_ms(ms_wrapper) - self.assertIsNotNone(stats) - - # Check for the presence of fields - fields = (('vcpus', 500), ('vcpus_used', 0), - ('memory_mb', 5242880), ('memory_mb_used', 128), - 'hypervisor_type', 'hypervisor_version', - ('hypervisor_hostname', 'the_hostname'), 'cpu_info', - 'supported_instances', 'stats', 'pci_passthrough_devices') - for fld in fields: - if isinstance(fld, tuple): - value = stats.get(fld[0], None) - self.assertEqual(value, fld[1]) - else: - value = stats.get(fld, None) - self.assertIsNotNone(value) - # Check for individual stats - hstats = (('proc_units', '500.00'), ('proc_units_used', '0.00')) - for stat in hstats: - if isinstance(stat, tuple): - value = stats['stats'].get(stat[0], None) - self.assertEqual(value, stat[1]) - else: - value = stats['stats'].get(stat, None) - self.assertIsNotNone(value) - # pci_passthrough_devices. Parse json - entries can be in any order. - ppdstr = stats['pci_passthrough_devices'] - ppdlist = jsonutils.loads(ppdstr) - self.assertEqual({'foo', 'bar', 'default'}, {ppd['physical_network'] - for ppd in ppdlist}) - self.assertEqual({'foo', 'bar', 'default'}, {ppd['label'] - for ppd in ppdlist}) - self.assertEqual({'*:1:2.0', '*:1:3.0', '*:1:3.1', '*:4:5.0', - '*:4:5.1', '*:4:5.2'}, - {ppd['address'] for ppd in ppdlist}) - for ppd in ppdlist: - self.assertEqual('type-VF', ppd['dev_type']) - self.assertEqual('*:*:*.*', ppd['parent_addr']) - self.assertEqual('*', ppd['vendor_id']) - self.assertEqual('*', ppd['product_id']) - self.assertEqual(1, ppd['numa_node']) diff --git a/nova_powervm/tests/virt/powervm/test_image.py b/nova_powervm/tests/virt/powervm/test_image.py deleted file mode 100644 index c5d963b5..00000000 --- a/nova_powervm/tests/virt/powervm/test_image.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from nova import test - -from nova_powervm.virt.powervm import image - -if six.PY2: - _BUILTIN = '__builtin__' -else: - _BUILTIN = 'builtins' - - -class TestImage(test.NoDBTestCase): - - @mock.patch('nova.utils.temporary_chown', autospec=True) - @mock.patch(_BUILTIN + '.open', autospec=True) - @mock.patch('nova.image.api.API', autospec=True) - def test_stream_blockdev_to_glance(self, mock_api, mock_open, mock_chown): - mock_open.return_value.__enter__.return_value = 'mock_stream' - image.stream_blockdev_to_glance('context', mock_api, 'image_id', - 'metadata', '/dev/disk') - mock_chown.assert_called_with('/dev/disk') - mock_open.assert_called_with('/dev/disk', 'rb') - mock_api.update.assert_called_with('context', 'image_id', 'metadata', - 'mock_stream') - - @mock.patch('nova.image.api.API', autospec=True) - def test_generate_snapshot_metadata(self, mock_api): - mock_api.get.return_value = {'name': 'image_name'} - mock_instance = mock.Mock() - mock_instance.project_id = 'project_id' - ret = image.generate_snapshot_metadata('context', mock_api, 'image_id', - mock_instance) - mock_api.get.assert_called_with('context', 'image_id') - self.assertEqual({ - 'name': 'image_name', - 'status': 'active', - 'disk_format': 'raw', - 'container_format': 'bare', - 'properties': { - 'image_location': 'snapshot', - 'image_state': 'available', - 'owner_id': 'project_id', - } - }, ret) diff --git a/nova_powervm/tests/virt/powervm/test_live_migration.py b/nova_powervm/tests/virt/powervm/test_live_migration.py deleted file mode 100644 index 1cd7e028..00000000 --- a/nova_powervm/tests/virt/powervm/test_live_migration.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2015, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import absolute_import - -import fixtures -import mock - -from nova import exception -from nova import objects -from nova.objects import migrate_data as mig_obj -from nova import test -from nova.tests.unit import fake_network - -from nova_powervm.tests.virt import powervm -from nova_powervm.tests.virt.powervm import fixtures as fx -from nova_powervm.virt.powervm import live_migration as lpm - - -class TestLPM(test.NoDBTestCase): - def setUp(self): - super(TestLPM, self).setUp() - - self.flags(disk_driver='localdisk', group='powervm') - self.drv_fix = self.useFixture(fx.PowerVMComputeDriver()) - self.drv = self.drv_fix.drv - self.apt = self.drv.adapter - - self.inst = objects.Instance(**powervm.TEST_INSTANCE) - - self.network_infos = fake_network.fake_get_instance_nw_info(self, 1) - self.inst.info_cache = objects.InstanceInfoCache( - network_info=self.network_infos) - - self.mig_data = mig_obj.PowerVMLiveMigrateData() - self.mig_data.host_mig_data = {} - self.mig_data.dest_ip = '1' - self.mig_data.dest_user_id = 'neo' - self.mig_data.dest_sys_name = 'a' - self.mig_data.public_key = 'PublicKey' - self.mig_data.dest_proc_compat = 'a,b,c' - self.mig_data.vol_data = {} - self.mig_data.vea_vlan_mappings = {} - - self.lpmsrc = lpm.LiveMigrationSrc(self.drv, self.inst, self.mig_data) - self.lpmdst = lpm.LiveMigrationDest(self.drv, self.inst) - - self.add_key = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.management_console.add_authorized_key')).mock - self.get_key = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.management_console.get_public_key')).mock - self.get_key.return_value = 'PublicKey' - - # Short path to the host's migration_data - self.host_mig_data = self.drv.host_wrapper.migration_data - - @mock.patch('pypowervm.tasks.storage.ScrubOrphanStorageForLpar', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - @mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True) - def test_lpm_source(self, mock_vterm_close, mock_get_wrap, - mock_cd, mock_scrub): - self.host_mig_data['active_migrations_supported'] = 4 - self.host_mig_data['active_migrations_in_progress'] = 2 - - with mock.patch.object( - self.lpmsrc, '_check_migration_ready', return_value=None): - - # Test the bad path first, then patch in values to make succeed - mock_wrap = mock.Mock(id=123) - mock_get_wrap.return_value = mock_wrap - - self.assertRaises(exception.MigrationPreCheckError, - self.lpmsrc.check_source, 'context', - 'block_device_info', []) - - # Patch the proc compat fields, to get further - pm = mock.PropertyMock(return_value='b') - type(mock_wrap).proc_compat_mode = pm - - self.assertRaises(exception.MigrationPreCheckError, - self.lpmsrc.check_source, 'context', - 'block_device_info', []) - - pm = mock.PropertyMock(return_value='Not_Migrating') - type(mock_wrap).migration_state = pm - - # Get a volume driver. - mock_vol_drv = mock.MagicMock() - - # Finally, good path. - self.lpmsrc.check_source('context', 'block_device_info', - [mock_vol_drv]) - # Ensure we built a scrubber. - mock_scrub.assert_called_with(mock.ANY, 123) - # Ensure we added the subtasks to remove the vopts. - mock_cd.return_value.dlt_vopt.assert_called_once_with( - mock.ANY, stg_ftsk=mock_scrub.return_value, - remove_mappings=False) - # And ensure the scrubber was executed - mock_scrub.return_value.execute.assert_called_once_with() - mock_vol_drv.pre_live_migration_on_source.assert_called_once_with( - {}) - - # Ensure migration counts are validated - self.host_mig_data['active_migrations_in_progress'] = 4 - self.assertRaises(exception.MigrationPreCheckError, - self.lpmsrc.check_source, 'context', - 'block_device_info', []) - - # Ensure the vterm was closed - mock_vterm_close.assert_called_once_with( - self.apt, mock_wrap.uuid) - - def test_lpm_dest(self): - src_compute_info = {'stats': {'memory_region_size': 1}} - dst_compute_info = {'stats': {'memory_region_size': 1}} - - self.host_mig_data['active_migrations_supported'] = 4 - self.host_mig_data['active_migrations_in_progress'] = 2 - with mock.patch.object(self.drv.host_wrapper, 'refresh') as mock_rfh: - - self.lpmdst.check_destination( - 'context', src_compute_info, dst_compute_info) - mock_rfh.assert_called_once_with() - - # Ensure migration counts are validated - self.host_mig_data['active_migrations_in_progress'] = 4 - self.assertRaises(exception.MigrationPreCheckError, - self.lpmdst.check_destination, 'context', - src_compute_info, dst_compute_info) - # Repair the stat - self.host_mig_data['active_migrations_in_progress'] = 2 - - # Ensure diff memory sizes raises an exception - dst_compute_info['stats']['memory_region_size'] = 2 - self.assertRaises(exception.MigrationPreCheckError, - self.lpmdst.check_destination, 'context', - src_compute_info, dst_compute_info) - - @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif.' - 'pre_live_migrate_at_destination', autospec=True) - def test_pre_live_mig(self, mock_vif_pre, mock_scrub): - vol_drv = mock.MagicMock() - network_infos = [{'type': 'pvm_sea'}] - - def update_vea_mapping(adapter, host_uuid, instance, network_info, - vea_vlan_mappings): - # Make sure what comes in is None, but that we change it. - self.assertEqual(vea_vlan_mappings, {}) - vea_vlan_mappings['test'] = 'resp' - - mock_vif_pre.side_effect = update_vea_mapping - - resp = self.lpmdst.pre_live_migration( - 'context', 'block_device_info', network_infos, 'disk_info', - self.mig_data, [vol_drv]) - - # Make sure the pre_live_migrate_at_destination was invoked for the vif - mock_vif_pre.assert_called_once_with( - self.drv.adapter, self.drv.host_uuid, self.inst, network_infos[0], - mock.ANY) - self.assertEqual({'test': 'resp'}, self.mig_data.vea_vlan_mappings) - - # Make sure we get something back, and that the volume driver was - # invoked. - self.assertIsNotNone(resp) - vol_drv.pre_live_migration_on_destination.assert_called_once_with( - self.mig_data.vol_data) - self.assertEqual(1, mock_scrub.call_count) - self.add_key.assert_called_once_with(self.apt, 'PublicKey') - - vol_drv.reset_mock() - raising_vol_drv = mock.Mock() - raising_vol_drv.pre_live_migration_on_destination.side_effect = ( - Exception('foo')) - self.assertRaises( - exception.MigrationPreCheckError, self.lpmdst.pre_live_migration, - 'context', 'block_device_info', network_infos, 'disk_info', - self.mig_data, [vol_drv, raising_vol_drv]) - vol_drv.pre_live_migration_on_destination.assert_called_once_with({}) - (raising_vol_drv.pre_live_migration_on_destination. - assert_called_once_with({})) - - def test_src_cleanup(self): - vol_drv = mock.Mock() - self.lpmdst.cleanup_volume(vol_drv) - # Ensure the volume driver is not called - self.assertEqual(0, vol_drv.cleanup_volume_at_destination.call_count) - - def test_src_cleanup_valid(self): - vol_drv = mock.Mock() - self.lpmdst.pre_live_vol_data = {'vscsi-vol-id': 'fake_udid'} - self.lpmdst.cleanup_volume(vol_drv) - # Ensure the volume driver was called to clean up the volume. - vol_drv.cleanup_volume_at_destination.assert_called_once() - - @mock.patch('pypowervm.tasks.migration.migrate_lpar', autospec=True) - @mock.patch('nova_powervm.virt.powervm.live_migration.LiveMigrationSrc.' - '_convert_nl_io_mappings', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif.pre_live_migrate_at_source', - autospec=True) - def test_live_migration(self, mock_vif_pre_lpm, mock_convert_mappings, - mock_migr): - mock_trunk = mock.MagicMock() - mock_vif_pre_lpm.return_value = [mock_trunk] - mock_convert_mappings.return_value = ['AABBCCDDEEFF/5'] - - self.lpmsrc.lpar_w = mock.Mock() - self.lpmsrc.live_migration('context', self.mig_data) - mock_migr.assert_called_once_with( - self.lpmsrc.lpar_w, 'a', sdn_override=True, tgt_mgmt_svr='1', - tgt_mgmt_usr='neo', validate_only=False, - virtual_fc_mappings=None, virtual_scsi_mappings=None, - vlan_check_override=True, vlan_mappings=['AABBCCDDEEFF/5']) - - # Network assertions - mock_vif_pre_lpm.assert_called_once_with( - self.drv.adapter, self.drv.host_uuid, self.inst, mock.ANY) - mock_trunk.delete.assert_called_once() - - # Test that we raise errors received during migration - mock_migr.side_effect = ValueError() - self.assertRaises(ValueError, self.lpmsrc.live_migration, 'context', - self.mig_data) - mock_migr.assert_called_with( - self.lpmsrc.lpar_w, 'a', sdn_override=True, tgt_mgmt_svr='1', - tgt_mgmt_usr='neo', validate_only=False, - virtual_fc_mappings=None, virtual_scsi_mappings=None, - vlan_mappings=['AABBCCDDEEFF/5'], vlan_check_override=True) - - def test_convert_nl_io_mappings(self): - # Test simple None case - self.assertIsNone(self.lpmsrc._convert_nl_io_mappings(None)) - - # Do some mappings - test_mappings = {'aa:bb:cc:dd:ee:ff': 5, 'aa:bb:cc:dd:ee:ee': 126} - expected = ['AABBCCDDEEFF/5', 'AABBCCDDEEEE/126'] - self.assertEqual( - set(expected), - set(self.lpmsrc._convert_nl_io_mappings(test_mappings))) - - @mock.patch('pypowervm.tasks.migration.migrate_recover', autospec=True) - def test_rollback(self, mock_migr): - self.lpmsrc.lpar_w = mock.Mock() - - # Test no need to rollback - self.lpmsrc.lpar_w.migration_state = 'Not_Migrating' - self.lpmsrc.rollback_live_migration('context') - self.assertTrue(self.lpmsrc.lpar_w.refresh.called) - self.assertFalse(mock_migr.called) - - # Test calling the rollback - self.lpmsrc.lpar_w.reset_mock() - self.lpmsrc.lpar_w.migration_state = 'Pretend its Migrating' - self.lpmsrc.rollback_live_migration('context') - self.assertTrue(self.lpmsrc.lpar_w.refresh.called) - mock_migr.assert_called_once_with(self.lpmsrc.lpar_w, force=True) - - # Test exception from rollback - mock_migr.reset_mock() - self.lpmsrc.lpar_w.reset_mock() - mock_migr.side_effect = ValueError() - self.lpmsrc.rollback_live_migration('context') - self.assertTrue(self.lpmsrc.lpar_w.refresh.called) - mock_migr.assert_called_once_with(self.lpmsrc.lpar_w, force=True) - - def test_check_migration_ready(self): - lpar_w, host_w = mock.Mock(), mock.Mock() - lpar_w.can_lpm.return_value = (True, None) - self.lpmsrc._check_migration_ready(lpar_w, host_w) - lpar_w.can_lpm.assert_called_once_with(host_w, migr_data={}) - - lpar_w.can_lpm.return_value = (False, 'This is the reason message.') - self.assertRaises(exception.MigrationPreCheckError, - self.lpmsrc._check_migration_ready, lpar_w, host_w) - - @mock.patch('pypowervm.tasks.migration.migrate_abort', autospec=True) - def test_migration_abort(self, mock_mig_abort): - self.lpmsrc.lpar_w = mock.Mock() - self.lpmsrc.migration_abort() - mock_mig_abort.assert_called_once_with(self.lpmsrc.lpar_w) - - @mock.patch('pypowervm.tasks.migration.migrate_recover', autospec=True) - def test_migration_recover(self, mock_mig_recover): - self.lpmsrc.lpar_w = mock.Mock() - self.lpmsrc.migration_recover() - mock_mig_recover.assert_called_once_with( - self.lpmsrc.lpar_w, force=True) - - @mock.patch('nova_powervm.virt.powervm.vif.post_live_migrate_at_source', - autospec=True) - def test_post_live_migration_at_source(self, mock_vif_post_lpm_at_source): - network_infos = [{'devname': 'tap-dev1', 'address': 'mac-addr1', - 'network': {'bridge': 'br-int'}, 'id': 'vif_id_1'}, - {'devname': 'tap-dev2', 'address': 'mac-addr2', - 'network': {'bridge': 'br-int'}, 'id': 'vif_id_2'}] - self.lpmsrc.post_live_migration_at_source(network_infos) - # Assertions - for network_info in network_infos: - mock_vif_post_lpm_at_source.assert_any_call(mock.ANY, mock.ANY, - mock.ANY, network_info) - - @mock.patch('nova_powervm.virt.powervm.tasks.storage.SaveBDM.execute', - autospec=True) - def test_post_live_migration_at_dest(self, mock_save_bdm): - bdm1, bdm2, vol_drv1, vol_drv2 = [mock.Mock()] * 4 - vals = [(bdm1, vol_drv1), (bdm2, vol_drv2)] - self.lpmdst.pre_live_vol_data = {'vscsi-vol-id': 'fake_udid', - 'vscsi-vol-id2': 'fake_udid2'} - self.lpmdst.post_live_migration_at_destination('network_infos', vals) - # Assertions - - for bdm, vol_drv in vals: - vol_drv.post_live_migration_at_destination.assert_called_with( - mock.ANY) - self.assertEqual(len(vals), mock_save_bdm.call_count) diff --git a/nova_powervm/tests/virt/powervm/test_media.py b/nova_powervm/tests/virt/powervm/test_media.py deleted file mode 100644 index cbb9e041..00000000 --- a/nova_powervm/tests/virt/powervm/test_media.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -import fixtures -import mock -from nova import test -from oslo_utils.fixture import uuidsentinel -from pypowervm import const as pvm_const -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import storage as pvm_stg -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm.virt.powervm import media as m - - -class TestConfigDrivePowerVM(test.NoDBTestCase): - """Unit Tests for the ConfigDrivePowerVM class.""" - - def setUp(self): - super(TestConfigDrivePowerVM, self).setUp() - - self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt - - self.validate_vopt = self.useFixture(fixtures.MockPatch( - 'pypowervm.tasks.vopt.validate_vopt_repo_exists')).mock - self.validate_vopt.return_value = None, None - - @mock.patch('nova.api.metadata.base.InstanceMetadata', autospec=True) - @mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_crt_cfg_dr_iso(self, mock_pvm_uuid, mock_mkdrv, mock_meta): - """Validates that the image creation method works.""" - cfg_dr_builder = m.ConfigDrivePowerVM(self.apt) - self.assertTrue(self.validate_vopt.called) - mock_instance = mock.MagicMock() - mock_instance.uuid = '1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f' - mock_files = mock.MagicMock() - mock_net = mock.MagicMock() - iso_path = '/tmp/cfgdrv.iso' - cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net, - iso_path) - self.assertTrue(mock_pvm_uuid.called) - self.assertEqual(mock_mkdrv.call_count, 1) - - # Test retry iso create - mock_mkdrv.reset_mock() - mock_mkdrv.side_effect = [OSError, mock_mkdrv] - cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net, - iso_path) - self.assertEqual(mock_mkdrv.call_count, 2) - - def test_get_cfg_drv_name(self): - cfg_dr_builder = m.ConfigDrivePowerVM(self.apt) - mock_instance = mock.MagicMock() - mock_instance.uuid = uuidsentinel.inst_id - - # calculate expected file name - expected_file_name = 'cfg_' + mock_instance.uuid.replace('-', '') - allowed_len = pvm_const.MaxLen.VOPT_NAME - 4 # '.iso' is 4 chars - expected_file_name = expected_file_name[:allowed_len] + '.iso' - - name = cfg_dr_builder.get_cfg_drv_name(mock_instance) - self.assertEqual(name, expected_file_name) - - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'get_cfg_drv_name') - @mock.patch('tempfile.NamedTemporaryFile', autospec=True) - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - '_attach_vopt') - @mock.patch('os.path.getsize', autospec=True) - @mock.patch('pypowervm.tasks.storage.upload_vopt', autospec=True) - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - '_create_cfg_dr_iso', autospec=True) - def test_crt_cfg_drv_vopt(self, mock_ccdi, mock_upl, mock_getsize, - mock_attach, mock_ntf, mock_name): - # Mock Returns - cfg_dr_builder = m.ConfigDrivePowerVM(self.apt) - cfg_dr_builder.vios_uuid = 'vios_uuid' - mock_instance = mock.MagicMock() - mock_instance.uuid = uuidsentinel.inst_id - mock_upl.return_value = 'vopt', 'f_uuid' - fh = mock_ntf.return_value.__enter__.return_value - fh.name = 'iso_path' - mock_name.return_value = 'fake-name' - - # Run - cfg_dr_builder.create_cfg_drv_vopt(mock_instance, 'files', 'netinfo', - 'fake_lpar', admin_pass='pass') - mock_ntf.assert_called_once_with(mode='rb') - mock_ccdi.assert_called_once_with(mock_instance, - 'files', 'netinfo', 'iso_path', - admin_pass='pass') - mock_getsize.assert_called_once_with('iso_path') - mock_upl.assert_called_once_with(self.apt, 'vios_uuid', fh, - 'fake-name', - mock_getsize.return_value) - mock_attach.assert_called_once_with(mock_instance, 'fake_lpar', - 'vopt', None) - - @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.utils.transaction.WrapperTask', autospec=True) - def test_attach_vopt(self, mock_class_wrapper_task, mock_build_map, - mock_add_map): - # Create objects to test with - mock_instance = mock.MagicMock(name='fake-instance') - cfg_dr_builder = m.ConfigDrivePowerVM(self.apt) - vopt = mock.Mock() - mock_vios = mock.Mock(spec=pvm_vios.VIOS) - mock_vios.configure_mock(name='vios name') - - # Mock methods not currently under test - mock_wrapper_task = mock.MagicMock() - mock_class_wrapper_task.return_value = mock_wrapper_task - - def call_param(param): - param(mock_vios) - mock_wrapper_task.add_functor_subtask.side_effect = call_param - - def validate_build(host_uuid, vios_w, lpar_uuid, vopt_elem): - self.assertEqual(None, host_uuid) - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('lpar_uuid', lpar_uuid) - self.assertEqual(vopt, vopt_elem) - return 'map' - mock_build_map.side_effect = validate_build - - def validate_add(vios_w, mapping): - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual(mapping, 'map') - return 'added' - mock_add_map.side_effect = validate_add - - # Run the actual test - cfg_dr_builder._attach_vopt(mock_instance, 'lpar_uuid', vopt) - - # Make sure they were called and validated - self.assertTrue(mock_wrapper_task.execute.called) - self.assertEqual(1, mock_build_map.call_count) - self.assertEqual(1, mock_add_map.call_count) - self.assertTrue(self.validate_vopt.called) - - def test_sanitize_network_info(self): - network_info = [{'type': 'lbr'}, {'type': 'pvm_sea'}, - {'type': 'ovs'}] - - cfg_dr_builder = m.ConfigDrivePowerVM(self.apt) - - resp = cfg_dr_builder._sanitize_network_info(network_info) - expected_ret = [{'type': 'vif'}, {'type': 'vif'}, - {'type': 'ovs'}] - self.assertEqual(resp, expected_ret) - - def test_mgmt_cna_to_vif(self): - mock_cna = mock.MagicMock() - mock_cna.mac = "FAD4433ED120" - - # Run - cfg_dr_builder = m.ConfigDrivePowerVM(self.apt) - vif = cfg_dr_builder._mgmt_cna_to_vif(mock_cna) - - # Validate - self.assertEqual(vif.get('address'), "fa:d4:43:3e:d1:20") - self.assertEqual(vif.get('id'), 'mgmt_vif') - self.assertIsNotNone(vif.get('network')) - self.assertEqual(1, len(vif.get('network').get('subnets'))) - subnet = vif.get('network').get('subnets')[0] - self.assertEqual(6, subnet.get('version')) - self.assertEqual('fe80::/64', subnet.get('cidr')) - ip = subnet.get('ips')[0] - self.assertEqual('fe80::f8d4:43ff:fe3e:d120', ip.get('address')) - - def test_mac_to_link_local(self): - mac = 'fa:d4:43:3e:d1:20' - self.assertEqual('fe80::f8d4:43ff:fe3e:d120', - m.ConfigDrivePowerVM._mac_to_link_local(mac)) - - mac = '00:00:00:00:00:00' - self.assertEqual('fe80::0200:00ff:fe00:0000', - m.ConfigDrivePowerVM._mac_to_link_local(mac)) - - mac = 'ff:ff:ff:ff:ff:ff' - self.assertEqual('fe80::fdff:ffff:feff:ffff', - m.ConfigDrivePowerVM._mac_to_link_local(mac)) - - @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.' - 'add_dlt_vopt_tasks') - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap', - new=mock.MagicMock()) - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps') - @mock.patch('pypowervm.utils.transaction.FeedTask') - @mock.patch('pypowervm.utils.transaction.FeedTask.execute') - def test_dlt_vopt_no_map(self, mock_execute, mock_class_feed_task, - mock_add_dlt_vopt_tasks, mock_find_maps): - # Init objects to test with - mock_feed_task = mock.MagicMock() - mock_class_feed_task.return_value = mock_feed_task - mock_find_maps.return_value = [] - - # Invoke the operation - cfg_dr = m.ConfigDrivePowerVM(self.apt) - cfg_dr.dlt_vopt('2', remove_mappings=False) - - # Verify expected methods were called - mock_add_dlt_vopt_tasks.assert_not_called() - self.assertTrue(mock_feed_task.execute.called) - - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True) - def test_add_dlt_vopt_tasks(self, mock_find_maps, mock_gen_match_func, - mock_vm_id): - # Init objects to test with - cfg_dr = m.ConfigDrivePowerVM(self.apt) - stg_ftsk = mock.MagicMock() - cfg_dr.vios_uuid = 'vios_uuid' - lpar_uuid = 'lpar_uuid' - mock_find_maps.return_value = [mock.Mock(backing_storage='stor')] - mock_vm_id.return_value = '2' - - # Run - cfg_dr.add_dlt_vopt_tasks(lpar_uuid, stg_ftsk) - - # Validate - mock_gen_match_func.assert_called_with(pvm_stg.VOptMedia) - mock_find_maps.assert_called_with( - stg_ftsk.get_wrapper().scsi_mappings, client_lpar_id='2', - match_func=mock_gen_match_func.return_value) - self.assertTrue(stg_ftsk.add_post_execute.called) - self.assertTrue( - stg_ftsk.wrapper_tasks['vios_uuid'].add_functor_subtask.called) diff --git a/nova_powervm/tests/virt/powervm/test_mgmt.py b/nova_powervm/tests/virt/powervm/test_mgmt.py deleted file mode 100644 index 621b663f..00000000 --- a/nova_powervm/tests/virt/powervm/test_mgmt.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2015, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import retrying - -from nova import exception -from nova import test -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.tests.test_utils import pvmhttp - -from nova_powervm.virt.powervm import exception as npvmex -from nova_powervm.virt.powervm import mgmt - -LPAR_HTTPRESP_FILE = "lpar.txt" - - -class TestMgmt(test.NoDBTestCase): - def setUp(self): - super(TestMgmt, self).setUp() - self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt - - lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt) - self.assertNotEqual(lpar_http, None, - "Could not load %s " % LPAR_HTTPRESP_FILE) - - self.resp = lpar_http.response - - @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True) - def test_mgmt_uuid(self, mock_get_partition): - mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt') - adpt = mock.Mock() - - # First run should call the partition only once - self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt)) - mock_get_partition.assert_called_once_with(adpt) - - # But a subsequent call should effectively no-op - mock_get_partition.reset_mock() - self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt)) - self.assertEqual(0, mock_get_partition.call_count) - - @mock.patch('glob.glob', autospec=True) - @mock.patch('nova.privsep.path.writefile', autospec=True) - @mock.patch('os.path.realpath', autospec=True) - def test_discover_vscsi_disk(self, mock_realpath, mock_dacw, mock_glob): - scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan' - udid = ('275b5d5f88fa5611e48be9000098be9400' - '13fb2aa55a2d7b8d150cb1b7b6bc04d6') - devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid) - mapping = mock.Mock() - mapping.client_adapter.lpar_slot_num = 5 - mapping.backing_storage.udid = udid - # Realistically, first glob would return e.g. .../host0/.../host0/... - # but it doesn't matter for test purposes. - mock_glob.side_effect = [[scanpath], [devlink]] - mgmt.discover_vscsi_disk(mapping) - mock_glob.assert_has_calls( - [mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])]) - mock_dacw.assert_called_with(scanpath, 'a', '- - -') - mock_realpath.assert_called_with(devlink) - - @mock.patch('retrying.retry', autospec=True) - @mock.patch('glob.glob', autospec=True) - @mock.patch('nova.privsep.path.writefile', autospec=True) - def test_discover_vscsi_disk_not_one_result(self, mock_write, mock_glob, - mock_retry): - """Zero or more than one disk is found by discover_vscsi_disk.""" - def validate_retry(kwargs): - self.assertIn('retry_on_result', kwargs) - self.assertEqual(250, kwargs['wait_fixed']) - self.assertEqual(300000, kwargs['stop_max_delay']) - - def raiser(unused): - raise retrying.RetryError(mock.Mock(attempt_number=123)) - - def retry_passthrough(**kwargs): - validate_retry(kwargs) - - def wrapped(_poll_for_dev): - return _poll_for_dev - return wrapped - - def retry_timeout(**kwargs): - validate_retry(kwargs) - - def wrapped(_poll_for_dev): - return raiser - return wrapped - - udid = ('275b5d5f88fa5611e48be9000098be9400' - '13fb2aa55a2d7b8d150cb1b7b6bc04d6') - mapping = mock.Mock() - mapping.client_adapter.lpar_slot_num = 5 - mapping.backing_storage.udid = udid - # No disks found - mock_retry.side_effect = retry_timeout - mock_glob.side_effect = lambda path: [] - self.assertRaises(npvmex.NoDiskDiscoveryException, - mgmt.discover_vscsi_disk, mapping) - # Multiple disks found - mock_retry.side_effect = retry_passthrough - mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']] - self.assertRaises(npvmex.UniqueDiskDiscoveryException, - mgmt.discover_vscsi_disk, mapping) - - @mock.patch('time.sleep', autospec=True) - @mock.patch('os.path.realpath', autospec=True) - @mock.patch('os.stat', autospec=True) - @mock.patch('nova.privsep.path.writefile', autospec=True) - def test_remove_block_dev(self, mock_dacw, mock_stat, mock_realpath, - mock_sleep): - link = '/dev/link/foo' - realpath = '/dev/sde' - delpath = '/sys/block/sde/device/delete' - mock_realpath.return_value = realpath - - # Good path - mock_stat.side_effect = (None, None, OSError()) - mgmt.remove_block_dev(link) - mock_realpath.assert_called_with(link) - mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath), - mock.call(realpath)]) - mock_dacw.assert_called_with(delpath, 'a', '1') - self.assertEqual(0, mock_sleep.call_count) - - # Device param not found - mock_dacw.reset_mock() - mock_stat.reset_mock() - mock_stat.side_effect = (OSError(), None, None) - self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev, - link) - # stat was called once; privsep write was not called - self.assertEqual(1, mock_stat.call_count) - mock_dacw.assert_not_called() - - # Delete special file not found - mock_stat.reset_mock() - mock_stat.side_effect = (None, OSError(), None) - self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev, - link) - # stat was called twice; privsep write was not called - self.assertEqual(2, mock_stat.call_count) - mock_dacw.assert_not_called() - - @mock.patch('retrying.retry') - @mock.patch('os.path.realpath') - @mock.patch('os.stat') - @mock.patch('nova.privsep.path.writefile') - def test_remove_block_dev_timeout(self, mock_dacw, mock_stat, - mock_realpath, mock_retry): - - def validate_retry(kwargs): - self.assertIn('retry_on_result', kwargs) - self.assertEqual(250, kwargs['wait_fixed']) - self.assertEqual(10000, kwargs['stop_max_delay']) - - def raiser(unused): - raise retrying.RetryError(mock.Mock(attempt_number=123)) - - def retry_timeout(**kwargs): - validate_retry(kwargs) - - def wrapped(_poll_for_del): - return raiser - return wrapped - - # Deletion was attempted, but device is still there - link = '/dev/link/foo' - delpath = '/sys/block/sde/device/delete' - realpath = '/dev/sde' - mock_realpath.return_value = realpath - mock_stat.side_effect = lambda path: 1 - mock_retry.side_effect = retry_timeout - - self.assertRaises( - npvmex.DeviceDeletionException, mgmt.remove_block_dev, link) - mock_realpath.assert_called_once_with(link) - mock_dacw.assert_called_with(delpath, 'a', '1') diff --git a/nova_powervm/tests/virt/powervm/test_slot.py b/nova_powervm/tests/virt/powervm/test_slot.py deleted file mode 100644 index 5a2f2830..00000000 --- a/nova_powervm/tests/virt/powervm/test_slot.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock - -from nova import test -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm import slot - -from pypowervm import exceptions as pvm_exc - - -class TestNovaSlotManager(test.NoDBTestCase): - - def setUp(self): - super(TestNovaSlotManager, self).setUp() - self.store_api = mock.MagicMock() - self.inst = mock.MagicMock(uuid='uuid1') - - def test_build_slot_mgr(self): - # Test when NVRAM store exists - # The Swift-backed implementation of PowerVM SlotMapStore is returned - self.store_api.fetch_slot_map = mock.MagicMock(return_value=None) - slot_mgr = slot.build_slot_mgr(self.inst, self.store_api, adapter=None, - vol_drv_iter=None) - self.assertIsInstance(slot_mgr, slot.SwiftSlotManager) - self.assertFalse(slot_mgr.is_rebuild) - - # Test when no NVRAM store is set up - # The no-op implementation of PowerVM SlotMapStore is returned - self.assertIsInstance( - slot.build_slot_mgr(self.inst, None, adapter=None, - vol_drv_iter=None), - slot.NoopSlotManager) - - # Test that the rebuild flag is set when it is flagged as a rebuild - slot_mgr = slot.build_slot_mgr( - self.inst, self.store_api, adapter='adpt', vol_drv_iter='test') - self.assertTrue(slot_mgr.is_rebuild) - - -class TestSwiftSlotManager(test.NoDBTestCase): - - def setUp(self): - super(TestSwiftSlotManager, self).setUp() - self.store_api = mock.MagicMock() - self.store_api.fetch_slot_map = mock.MagicMock(return_value=None) - self.inst = mock.MagicMock(uuid='a2e71b38-160f-4650-bbdc-2a10cd507e2b') - self.slot_mgr = slot.SwiftSlotManager(self.store_api, - instance=self.inst) - - def test_load(self): - # load() should have been called internally by __init__ - self.store_api.fetch_slot_map.assert_called_with( - self.inst.uuid + '_slot_map') - - def test_save(self): - # Mock the call - self.store_api.store_slot_map = mock.MagicMock() - - # Run save - self.slot_mgr.save() - - # Not called because nothing changed - self.store_api.store_slot_map.assert_not_called() - - # Change something - mock_vfcmap = mock.Mock(server_adapter=mock.Mock(lpar_slot_num=123)) - self.slot_mgr.register_vfc_mapping(mock_vfcmap, 'fabric') - - # Run save - self.slot_mgr.save() - - # Validate the call - self.store_api.store_slot_map.assert_called_once_with( - self.inst.uuid + '_slot_map', mock.ANY) - - def test_delete(self): - # Mock the call - self.store_api.delete_slot_map = mock.MagicMock() - - # Run delete - self.slot_mgr.delete() - - # Validate the call - self.store_api.delete_slot_map.assert_called_once_with( - self.inst.uuid + '_slot_map') - - @mock.patch('pypowervm.tasks.slot_map.RebuildSlotMap', autospec=True) - @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True) - def test_init_recreate_map(self, mock_ftsk, mock_rebuild_slot): - vios1, vios2 = mock.Mock(uuid='uuid1'), mock.Mock(uuid='uuid2') - mock_ftsk.return_value.feed = [vios1, vios2] - self.slot_mgr.init_recreate_map(mock.Mock(), self._vol_drv_iter()) - self.assertEqual(1, mock_ftsk.call_count) - mock_rebuild_slot.assert_called_once_with( - self.slot_mgr, mock.ANY, {'udid': ['uuid2'], 'iscsi': ['uuid1']}, - ['a', 'b']) - - @mock.patch('pypowervm.tasks.slot_map.RebuildSlotMap', autospec=True) - @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True) - def test_init_recreate_map_fails(self, mock_ftsk, mock_rebuild_slot): - vios1, vios2 = mock.Mock(uuid='uuid1'), mock.Mock(uuid='uuid2') - mock_ftsk.return_value.feed = [vios1, vios2] - mock_rebuild_slot.side_effect = ( - pvm_exc.InvalidHostForRebuildNotEnoughVIOS(udid='udid56')) - self.assertRaises( - p_exc.InvalidRebuild, self.slot_mgr.init_recreate_map, mock.Mock(), - self._vol_drv_iter()) - - @mock.patch('pypowervm.tasks.slot_map.RebuildSlotMap', autospec=True) - @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True) - def test_init_recreate_map_fileio(self, mock_ftsk, mock_rebuild_slot): - vios1, vios2 = mock.Mock(uuid='uuid1'), mock.Mock(uuid='uuid2') - mock_ftsk.return_value.feed = [vios1, vios2] - expected_vio_wrap = [vios1, vios2] - self.slot_mgr.init_recreate_map(mock.Mock(), self._vol_drv_iter_2()) - self.assertEqual(1, mock_ftsk.call_count) - mock_rebuild_slot.assert_called_once_with( - self.slot_mgr, expected_vio_wrap, - {'udidvscsi': ['uuid1'], 'udid': ['uuid1']}, []) - - def _vol_drv_iter_2(self): - mock_fileio = mock.Mock() - mock_fileio.vol_type.return_value = 'fileio' - mock_fileio.is_volume_on_vios.side_effect = ((True, 'udid'), - (False, None)) - mock_scsi = mock.Mock() - mock_scsi.vol_type.return_value = 'vscsi' - mock_scsi.is_volume_on_vios.side_effect = ((True, 'udidvscsi'), - (False, None)) - - vol_drv = [mock_fileio, mock_scsi] - for type in vol_drv: - yield mock.Mock(), type - - def _vol_drv_iter(self): - mock_scsi = mock.Mock() - mock_scsi.vol_type.return_value = 'vscsi' - mock_scsi.is_volume_on_vios.side_effect = ((False, None), - (True, 'udid')) - mock_iscsi = mock.Mock() - mock_iscsi.vol_type.return_value = 'iscsi' - mock_iscsi.is_volume_on_vios.side_effect = ((True, 'iscsi'), - (False, None)) - - mock_npiv1 = mock.Mock() - mock_npiv1.vol_type.return_value = 'npiv' - mock_npiv1._fabric_names.return_value = ['a', 'b'] - - mock_npiv2 = mock.Mock() - mock_npiv2.vol_type.return_value = 'npiv' - mock_npiv2._fabric_names.return_value = ['a', 'b', 'c'] - - vol_drv = [mock_scsi, mock_npiv1, mock_npiv2, mock_iscsi] - for type in vol_drv: - yield mock.Mock(), type diff --git a/nova_powervm/tests/virt/powervm/test_vif.py b/nova_powervm/tests/virt/powervm/test_vif.py deleted file mode 100644 index 8cc184e8..00000000 --- a/nova_powervm/tests/virt/powervm/test_vif.py +++ /dev/null @@ -1,968 +0,0 @@ -# Copyright 2016, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova import exception -from nova.network import model -from nova.network.neutronv2 import api as netapi -from nova import test -from oslo_config import cfg -from pypowervm import exceptions as pvm_ex -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import logical_partition as pvm_lpar -from pypowervm.wrappers import managed_system as pvm_ms -from pypowervm.wrappers import network as pvm_net - -from nova_powervm.virt.powervm import vif - -CONF = cfg.CONF - - -def cna(mac): - """Builds a mock Client Network Adapter for unit tests.""" - nic = mock.MagicMock() - nic.mac = mac - nic.vswitch_uri = 'fake_href' - return nic - - -class FakeNetworkAPI(object): - def __init__(self, physnet): - self.physical_network = physnet - - def get(self, context, netid): - physnet = mock.MagicMock() - physnet.physical_network = self.physical_network - return physnet - - -class TestVifFunctions(test.NoDBTestCase): - - def setUp(self): - super(TestVifFunctions, self).setUp() - - self.adpt = self.useFixture(pvm_fx.AdapterFx( - traits=pvm_fx.LocalPVMTraits)).adpt - self.slot_mgr = mock.Mock() - - @mock.patch('oslo_serialization.jsonutils.dumps', autospec=True) - @mock.patch('pypowervm.wrappers.event.Event', autospec=True) - def test_push_vif_event(self, mock_event, mock_dumps): - mock_vif = mock.Mock(mac='MAC', href='HREF') - vif._push_vif_event(self.adpt, 'action', mock_vif, mock.Mock(), - 'pvm_sea') - mock_dumps.assert_called_once_with( - {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC', - 'type': 'pvm_sea'}) - mock_event.bld.assert_called_once_with(self.adpt, 'HREF', - mock_dumps.return_value) - mock_event.bld.return_value.create.assert_called_once_with() - - mock_dumps.reset_mock() - mock_event.bld.reset_mock() - mock_event.bld.return_value.create.reset_mock() - - # Exception reraises - mock_event.bld.return_value.create.side_effect = IndexError - self.assertRaises(IndexError, vif._push_vif_event, self.adpt, 'action', - mock_vif, mock.Mock(), 'pvm_sea') - mock_dumps.assert_called_once_with( - {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC', - 'type': 'pvm_sea'}) - mock_event.bld.assert_called_once_with(self.adpt, 'HREF', - mock_dumps.return_value) - mock_event.bld.return_value.create.assert_called_once_with() - - @mock.patch('nova_powervm.virt.powervm.vif._build_vif_driver', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif._push_vif_event', autospec=True) - def test_plug(self, mock_event, mock_bld_drv): - """Test the top-level plug method.""" - mock_vif = {'address': 'MAC', 'type': 'pvm_sea'} - slot_mgr = mock.Mock() - - # 1) With slot registration - slot_mgr.build_map.get_vnet_slot.return_value = None - vnet = vif.plug(self.adpt, 'host_uuid', 'instance', mock_vif, slot_mgr) - - mock_bld_drv.assert_called_once_with(self.adpt, 'host_uuid', - 'instance', mock_vif) - slot_mgr.build_map.get_vnet_slot.assert_called_once_with('MAC') - mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif, None, - new_vif=True) - slot_mgr.register_vnet.assert_called_once_with( - mock_bld_drv.return_value.plug.return_value) - mock_event.assert_called_once_with(self.adpt, 'plug', vnet, mock.ANY, - 'pvm_sea') - self.assertEqual(mock_bld_drv.return_value.plug.return_value, vnet) - - # Clean up - mock_bld_drv.reset_mock() - slot_mgr.build_map.get_vnet_slot.reset_mock() - mock_bld_drv.return_value.plug.reset_mock() - slot_mgr.register_vnet.reset_mock() - mock_event.reset_mock() - - # 2) Without slot registration; and plug returns None (which it should - # IRL whenever new_vif=False). - slot_mgr.build_map.get_vnet_slot.return_value = 123 - mock_bld_drv.return_value.plug.return_value = None - vnet = vif.plug(self.adpt, 'host_uuid', 'instance', mock_vif, slot_mgr, - new_vif=False) - - mock_bld_drv.assert_called_once_with(self.adpt, 'host_uuid', - 'instance', mock_vif) - slot_mgr.build_map.get_vnet_slot.assert_called_once_with('MAC') - mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif, 123, - new_vif=False) - slot_mgr.register_vnet.assert_not_called() - self.assertEqual(0, mock_event.call_count) - self.assertIsNone(vnet) - - @mock.patch('nova_powervm.virt.powervm.vif._build_vif_driver', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif._push_vif_event', autospec=True) - def test_unplug(self, mock_event, mock_bld_drv): - """Test the top-level unplug method.""" - mock_vif = {'address': 'MAC', 'type': 'pvm_sea'} - slot_mgr = mock.Mock() - - # 1) With slot deregistration, default cna_w_list - mock_bld_drv.return_value.unplug.return_value = 'vnet_w' - vif.unplug(self.adpt, 'host_uuid', 'instance', mock_vif, slot_mgr) - mock_bld_drv.assert_called_once_with(self.adpt, 'host_uuid', - 'instance', mock_vif) - mock_bld_drv.return_value.unplug.assert_called_once_with( - mock_vif, cna_w_list=None) - slot_mgr.drop_vnet.assert_called_once_with('vnet_w') - mock_event.assert_called_once_with(self.adpt, 'unplug', 'vnet_w', - mock.ANY, 'pvm_sea') - - # Clean up - mock_bld_drv.reset_mock() - mock_bld_drv.return_value.unplug.reset_mock() - slot_mgr.drop_vnet.reset_mock() - mock_event.reset_mock() - - # 2) Without slot deregistration, specified cna_w_list - mock_bld_drv.return_value.unplug.return_value = None - vif.unplug(self.adpt, 'host_uuid', 'instance', mock_vif, slot_mgr, - cna_w_list='cnalist') - mock_bld_drv.assert_called_once_with(self.adpt, 'host_uuid', - 'instance', mock_vif) - mock_bld_drv.return_value.unplug.assert_called_once_with( - mock_vif, cna_w_list='cnalist') - slot_mgr.drop_vnet.assert_not_called() - # When unplug doesn't find a vif, we don't push an event - self.assertEqual(0, mock_event.call_count) - - @mock.patch('nova_powervm.virt.powervm.vif._build_vif_driver', - autospec=True) - def test_plug_raises(self, mock_vif_drv): - """HttpError is converted to VirtualInterfacePlugException.""" - vif_drv = mock.Mock(plug=mock.Mock(side_effect=pvm_ex.HttpError( - resp=mock.Mock(status='status', reqmethod='method', reqpath='path', - reason='reason')))) - mock_vif_drv.return_value = vif_drv - mock_slot_mgr = mock.Mock() - mock_vif = {'address': 'vifaddr'} - self.assertRaises(exception.VirtualInterfacePlugException, - vif.plug, 'adap', 'huuid', 'inst', mock_vif, - mock_slot_mgr, new_vif='new_vif') - mock_vif_drv.assert_called_once_with('adap', 'huuid', 'inst', mock_vif) - vif_drv.plug.assert_called_once_with( - mock_vif, mock_slot_mgr.build_map.get_vnet_slot.return_value, - new_vif='new_vif') - mock_slot_mgr.build_map.get_vnet_slot.assert_called_once_with( - 'vifaddr') - - @mock.patch('pypowervm.wrappers.network.VSwitch.search') - def test_get_secure_rmc_vswitch(self, mock_search): - # Test no data coming back gets none - mock_search.return_value = [] - resp = vif.get_secure_rmc_vswitch(self.adpt, 'host_uuid') - self.assertIsNone(resp) - - # Mock that a couple vswitches get returned, but only the correct - # MGMT Switch gets returned - mock_vs = mock.MagicMock() - mock_vs.name = 'MGMTSWITCH' - mock_search.return_value = [mock_vs] - self.assertEqual(mock_vs, - vif.get_secure_rmc_vswitch(self.adpt, 'host_uuid')) - mock_search.assert_called_with( - self.adpt, parent_type=pvm_ms.System.schema_type, - parent_uuid='host_uuid', name=vif.SECURE_RMC_VSWITCH) - - @mock.patch('pypowervm.tasks.cna.crt_cna', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_plug_secure_rmc_vif(self, mock_pvm_uuid, mock_crt): - # Mock up the data - mock_pvm_uuid.return_value = 'lpar_uuid' - mock_crt.return_value = mock.Mock() - self.slot_mgr.build_map.get_mgmt_vea_slot = mock.Mock( - return_value=(None, None)) - mock_instance = mock.MagicMock(system_metadata={}) - - # Run the method - vif.plug_secure_rmc_vif(self.adpt, mock_instance, 'host_uuid', - self.slot_mgr) - - # Validate responses - mock_crt.assert_called_once_with( - self.adpt, 'host_uuid', 'lpar_uuid', 4094, vswitch='MGMTSWITCH', - crt_vswitch=True, slot_num=None, mac_addr=None) - self.slot_mgr.register_cna.assert_called_once_with( - mock_crt.return_value) - - @mock.patch('pypowervm.tasks.cna.crt_cna', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_plug_secure_rmc_vif_with_slot(self, mock_pvm_uuid, mock_crt): - # Mock up the data - mock_pvm_uuid.return_value = 'lpar_uuid' - mock_crt.return_value = mock.Mock() - self.slot_mgr.build_map.get_mgmt_vea_slot = mock.Mock( - return_value=('mac_addr', 5)) - mock_instance = mock.MagicMock(system_metadata={}) - - # Run the method - vif.plug_secure_rmc_vif(self.adpt, mock_instance, 'host_uuid', - self.slot_mgr) - - # Validate responses - mock_crt.assert_called_once_with( - self.adpt, 'host_uuid', 'lpar_uuid', 4094, vswitch='MGMTSWITCH', - crt_vswitch=True, slot_num=5, mac_addr='mac_addr') - self.assertFalse(self.slot_mgr.called) - - @mock.patch('pypowervm.tasks.cna.crt_cna', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_plug_secure_rmc_vif_for_rebuild(self, mock_pvm_uuid, mock_crt): - # Mock up the data - mock_pvm_uuid.return_value = 'lpar_uuid' - mock_crt.return_value = mock.Mock() - self.slot_mgr.build_map.get_mgmt_vea_slot = mock.Mock( - return_value=(None, None)) - mock_instance = mock.MagicMock( - system_metadata={'mgmt_interface_mac': 'old_mac'}) - - # Run the method - vif.plug_secure_rmc_vif(self.adpt, mock_instance, 'host_uuid', - self.slot_mgr) - - # Validate responses - # Validate that as part of rebuild, pvm_cna.crt_cna is called with - # 'old_mac' stored in instance's syetem_metadata. Also, the slot - # number is not passed. This is because as part of rebuild, the - # instance is destroyed and spawned again. When the instance is - # destroyed, the slot data is removed. When instance is spawned, - # the required volume and network info is got as part of BDM - # and network info dicts. The only missing information is mgmt - # interface mac address. - mock_crt.assert_called_once_with( - self.adpt, 'host_uuid', 'lpar_uuid', 4094, vswitch='MGMTSWITCH', - crt_vswitch=True, slot_num=None, mac_addr='old_mac') - # Validate that register_cna is called. - self.slot_mgr.register_cna.assert_called_once_with( - mock_crt.return_value) - - def test_build_vif_driver(self): - # Test the Shared Ethernet Adapter type VIF - mock_inst = mock.MagicMock() - mock_inst.name = 'instance' - self.assertIsInstance( - vif._build_vif_driver(self.adpt, 'host_uuid', mock_inst, - {'type': 'pvm_sea'}), - vif.PvmSeaVifDriver) - - self.assertIsInstance( - vif._build_vif_driver(self.adpt, 'host_uuid', mock_inst, - {'type': 'pvm_sriov'}), - vif.PvmVnicSriovVifDriver) - - # Test raises exception for no type - self.assertRaises(exception.VirtualInterfacePlugException, - vif._build_vif_driver, self.adpt, 'host_uuid', - mock_inst, {}) - - # Test an invalid vif type - self.assertRaises(exception.VirtualInterfacePlugException, - vif._build_vif_driver, self.adpt, 'host_uuid', - mock_inst, {'type': 'bad'}) - - @mock.patch('nova_powervm.virt.powervm.vif._build_vif_driver', - autospec=True) - def test_pre_live_migrate_at_source(self, mock_build_vif_drv): - mock_drv = mock.MagicMock() - mock_build_vif_drv.return_value = mock_drv - mock_vif = mock.MagicMock() - - vif.pre_live_migrate_at_source(self.adpt, 'host_uuid', mock.Mock(), - mock_vif) - - mock_drv.pre_live_migrate_at_source.assert_called_once_with(mock_vif) - - @mock.patch('nova_powervm.virt.powervm.vif._build_vif_driver', - autospec=True) - def test_rollback_live_migration_at_destination(self, mock_build_vif_drv): - mock_build_vif_drv.return_value = mock_drv = mock.MagicMock() - mock_vif, mappings = mock.MagicMock(), {} - - vif.rollback_live_migration_at_destination( - self.adpt, 'host_uuid', mock.Mock(), mock_vif, - mappings) - - rb = mock_drv.rollback_live_migration_at_destination - rb.assert_called_once_with(mock_vif, mappings) - - @mock.patch('nova_powervm.virt.powervm.vif._build_vif_driver', - autospec=True) - def test_pre_live_migrate_at_destination(self, mock_build_vif_drv): - mock_drv = mock.MagicMock() - mock_build_vif_drv.return_value = mock_drv - mock_vif = mock.MagicMock() - - vif.pre_live_migrate_at_destination(self.adpt, 'host_uuid', - mock.Mock(), mock_vif, {}) - - mock_drv.pre_live_migrate_at_destination.assert_called_once_with( - mock_vif, {}) - - @mock.patch('nova_powervm.virt.powervm.vif._build_vif_driver', - autospec=True) - def test_post_live_migrate_at_source(self, mock_build_vif_drv): - mock_drv = mock.MagicMock() - mock_build_vif_drv.return_value = mock_drv - mock_vif = mock.MagicMock() - - vif.post_live_migrate_at_source(self.adpt, 'host_uuid', mock.Mock(), - mock_vif) - mock_drv.post_live_migrate_at_source.assert_called_once_with(mock_vif) - - def test_get_trunk_dev_name(self): - mock_vif = {'devname': 'tap_test', 'id': '1234567890123456'} - - # Test when the dev name is available - self.assertEqual('tap_test', vif._get_trunk_dev_name(mock_vif)) - - # And when it isn't. Should also cut off a few characters from the id - del mock_vif['devname'] - self.assertEqual('nic12345678901', - vif._get_trunk_dev_name(mock_vif)) - - -class TestVifSriovDriver(test.NoDBTestCase): - - def setUp(self): - super(TestVifSriovDriver, self).setUp() - - self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt - self.inst = mock.MagicMock() - self.drv = vif.PvmVnicSriovVifDriver(self.adpt, 'host_uuid', self.inst) - - @mock.patch('pypowervm.wrappers.managed_system.System.get') - def test_plug_no_pports(self, mock_sysget): - """Raise when plug is called with a network with no physical ports.""" - sriov_adaps = [ - mock.Mock(phys_ports=[ - mock.Mock(loc_code='loc1', label='foo'), - mock.Mock(loc_code='loc2', label='')]), - mock.Mock(phys_ports=[ - mock.Mock(loc_code='loc3', label='bar'), - mock.Mock(loc_code='loc4', label='foo')])] - sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) - mock_sysget.return_value = [sys] - self.assertRaises(exception.VirtualInterfacePlugException, - self.drv.plug, FakeDirectVif('net2'), 1) - - @mock.patch('pypowervm.wrappers.iocard.VNIC.bld') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('pypowervm.tasks.sriov.set_vnic_back_devs', autospec=True) - @mock.patch('pypowervm.wrappers.managed_system.System.get') - def test_plug_no_physnet(self, mock_sysget, mock_back_devs, mock_pvm_uuid, - mock_vnic_bld): - slot = 10 - sriov_adaps = [ - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port11', label='default'), - mock.Mock(loc_code='port3', label='data1')]), - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port4', label='data2'), - mock.Mock(loc_code='port22', label='default')])] - sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) - mock_sysget.return_value = [sys] - netapi.API = mock.Mock(return_value=FakeNetworkAPI('default')) - self.drv.plug(FakeDirectVif(''), slot) - # Ensure back devs are created with pports from sriov_adaps and - # not with what pports passed into plug method - mock_back_devs.assert_called_once_with( - mock_vnic_bld.return_value, ['port11', 'port22'], redundancy=3, - capacity=None, max_capacity=None, check_port_status=True, - sys_w=sys) - - @mock.patch('pypowervm.wrappers.iocard.VNIC.bld') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('pypowervm.tasks.sriov.set_vnic_back_devs', autospec=True) - @mock.patch('pypowervm.wrappers.managed_system.System.get') - def test_plug_no_matching_pports(self, mock_sysget, mock_back_devs, - mock_pvm_uuid, mock_vnic_bld): - slot = 10 - sriov_adaps = [ - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port1', label='data1'), - mock.Mock(loc_code='port3', label='data1')]), - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port4', label='data2'), - mock.Mock(loc_code='port2', label='data2')])] - sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) - mock_sysget.return_value = [sys] - netapi.API = mock.Mock(return_value=FakeNetworkAPI('default')) - # Ensure Plug exception is raised when there are no matching pports - # for physical network of corresponding neutron network - self.assertRaises(exception.VirtualInterfacePlugException, - self.drv.plug, - FakeDirectVif('default'), slot) - - @mock.patch('pypowervm.wrappers.iocard.VNIC.bld') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('pypowervm.tasks.sriov.set_vnic_back_devs', autospec=True) - @mock.patch('pypowervm.wrappers.managed_system.System.get') - def test_plug_bad_pports(self, mock_sysget, mock_back_devs, mock_pvm_uuid, - mock_vnic_bld): - slot = 10 - sriov_adaps = [ - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port1', label='default'), - mock.Mock(loc_code='port3', label='data1')]), - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port4', label='data2'), - mock.Mock(loc_code='port2', label='default')])] - sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) - mock_sysget.return_value = [sys] - netapi.API = mock.Mock(return_value=FakeNetworkAPI('default')) - self.drv.plug(FakeDirectVif(''), slot) - # Ensure back devs are created with correct pports belonging to same - # physical network corresonding to neutron network - mock_back_devs.assert_called_once_with( - mock_vnic_bld.return_value, ['port1', 'port2'], redundancy=3, - capacity=None, max_capacity=None, check_port_status=True, - sys_w=sys) - - @mock.patch('pypowervm.wrappers.managed_system.System.get') - @mock.patch('pypowervm.util.sanitize_mac_for_api', autospec=True) - @mock.patch('pypowervm.wrappers.iocard.VNIC.bld') - @mock.patch('pypowervm.tasks.sriov.set_vnic_back_devs', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_plug(self, mock_pvm_uuid, mock_back_devs, mock_vnic_bld, - mock_san_mac, mock_sysget): - slot = 10 - sriov_adaps = [ - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port1', label='default'), - mock.Mock(loc_code='port3', label='data1')]), - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port4', label='data2'), - mock.Mock(loc_code='port2', label='default')])] - sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) - mock_sysget.return_value = [sys] - self.drv.plug(FakeDirectVif('default'), - slot) - mock_san_mac.assert_called_once_with('ab:ab:ab:ab:ab:ab') - mock_vnic_bld.assert_called_once_with( - self.drv.adapter, 79, slot_num=slot, - mac_addr=mock_san_mac.return_value, allowed_vlans='NONE', - allowed_macs='NONE') - mock_back_devs.assert_called_once_with( - mock_vnic_bld.return_value, ['port1', 'port2'], redundancy=3, - capacity=None, max_capacity=None, check_port_status=True, - sys_w=sys) - mock_pvm_uuid.assert_called_once_with(self.drv.instance) - mock_vnic_bld.return_value.create.assert_called_once_with( - parent_type=pvm_lpar.LPAR, parent_uuid=mock_pvm_uuid.return_value) - - # Now with redundancy/capacity values from binding:profile - mock_san_mac.reset_mock() - mock_vnic_bld.reset_mock() - mock_back_devs.reset_mock() - mock_pvm_uuid.reset_mock() - self.drv.plug(FakeDirectVif('default', cap=0.08), - slot) - mock_san_mac.assert_called_once_with('ab:ab:ab:ab:ab:ab') - mock_vnic_bld.assert_called_once_with( - self.drv.adapter, 79, slot_num=slot, - mac_addr=mock_san_mac.return_value, allowed_vlans='NONE', - allowed_macs='NONE') - mock_back_devs.assert_called_once_with( - mock_vnic_bld.return_value, ['port1', 'port2'], - redundancy=3, capacity=0.08, check_port_status=True, - sys_w=sys, max_capacity=None) - mock_pvm_uuid.assert_called_once_with(self.drv.instance) - mock_vnic_bld.return_value.create.assert_called_once_with( - parent_type=pvm_lpar.LPAR, parent_uuid=mock_pvm_uuid.return_value) - - # No-op with new_vif=False - mock_san_mac.reset_mock() - mock_vnic_bld.reset_mock() - mock_back_devs.reset_mock() - mock_pvm_uuid.reset_mock() - self.assertIsNone(self.drv.plug( - FakeDirectVif('default'), slot, new_vif=False)) - self.assertEqual(0, mock_san_mac.call_count) - self.assertEqual(0, mock_vnic_bld.call_count) - self.assertEqual(0, mock_back_devs.call_count) - self.assertEqual(0, mock_pvm_uuid.call_count) - - @mock.patch('pypowervm.wrappers.iocard.VNIC.bld') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('pypowervm.tasks.sriov.set_vnic_back_devs') - @mock.patch('pypowervm.wrappers.managed_system.System.get') - def test_plug_max_capacity(self, mock_sysget, mock_back_devs, - mock_pvm_uuid, mock_vnic_bld): - slot = 10 - sriov_adaps = [ - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port1', label='default'), - mock.Mock(loc_code='port3', label='data1')]), - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port4', label='data2'), - mock.Mock(loc_code='port2', label='default')])] - sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) - mock_sysget.return_value = [sys] - netapi.API = mock.Mock(return_value=FakeNetworkAPI('default')) - self.drv.plug(FakeDirectVifWithMaxCapacity('default', - cap=0.03, maxcap=0.75), - slot) - mock_back_devs.assert_called_once_with( - mock_vnic_bld.return_value, ['port1', 'port2'], redundancy=3, - capacity=0.03, max_capacity=0.75, check_port_status=True, - sys_w=sys) - - # Test without max capacity, it is set to None - self.drv.plug(FakeDirectVifWithMaxCapacity('data1', - cap=0.5), slot) - - mock_back_devs.assert_called_with( - mock_vnic_bld.return_value, ['port3'], redundancy=3, - capacity=0.5, max_capacity=None, check_port_status=True, - sys_w=sys) - - @mock.patch('pypowervm.wrappers.iocard.VNIC.bld') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('pypowervm.wrappers.managed_system.System.get') - def test_plug_max_capacity_error(self, mock_sysget, mock_pvm_uuid, - mock_vnic_bld): - sriov_adaps = [ - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port1', label='default'), - mock.Mock(loc_code='port3', label='data1')]), - mock.Mock(phys_ports=[ - mock.Mock(loc_code='port4', label='data2'), - mock.Mock(loc_code='port2', label='default')])] - sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) - mock_sysget.return_value = [sys] - netapi.API = mock.Mock(return_value=FakeNetworkAPI('default')) - - # Ensure VirtualInterfacePlugException is raised if maximum capacity - # is greater than 100 percent - self.assertRaises(exception.VirtualInterfacePlugException, - self.drv.plug, - FakeDirectVifWithMaxCapacity('data1', - cap=0.5, maxcap=1.4), 1) - - # Ensure VirtualInterfacePlugException is raised if maximum capacity - # is less than capacity - self.assertRaises(exception.VirtualInterfacePlugException, - self.drv.plug, - FakeDirectVifWithMaxCapacity('data1', - cap=0.5, maxcap=0.4), 1) - - @mock.patch('pypowervm.wrappers.iocard.VNIC.search') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('pypowervm.util.sanitize_mac_for_api', autospec=True) - def test_unplug(self, mock_san_mac, mock_pvm_uuid, mock_find): - fvif = FakeDirectVif('default') - self.assertEqual(mock_find.return_value, self.drv.unplug(fvif)) - mock_find.assert_called_once_with( - self.drv.adapter, parent_type=pvm_lpar.LPAR, - parent_uuid=mock_pvm_uuid.return_value, - mac=mock_san_mac.return_value, one_result=True) - mock_pvm_uuid.assert_called_once_with(self.inst) - mock_san_mac.assert_called_once_with(fvif['address']) - mock_find.return_value.delete.assert_called_once_with() - - # Not found - mock_find.reset_mock() - mock_pvm_uuid.reset_mock() - mock_san_mac.reset_mock() - mock_find.return_value = None - self.assertIsNone(self.drv.unplug(fvif)) - mock_find.assert_called_once_with( - self.drv.adapter, parent_type=pvm_lpar.LPAR, - parent_uuid=mock_pvm_uuid.return_value, - mac=mock_san_mac.return_value, one_result=True) - mock_pvm_uuid.assert_called_once_with(self.inst) - mock_san_mac.assert_called_once_with(fvif['address']) - - -class FakeDirectVif(dict): - - def __init__(self, physnet, pports=None, cap=None): - self._physnet = physnet - super(FakeDirectVif, self).__init__( - network={'id': 'net_id'}, - address='ab:ab:ab:ab:ab:ab', - details={ - 'vlan': '79', - 'physical_ports': [], - 'redundancy': 3, - 'capacity': cap}, - profile={}) - if pports is not None: - self['details']['physical_ports'] = pports - - def get_physical_network(self): - return self._physnet - - -class FakeDirectVifWithMaxCapacity(FakeDirectVif): - - def __init__(self, physnet, pports=None, cap=None, maxcap=None): - super(FakeDirectVifWithMaxCapacity, self).__init__(physnet, - pports=pports, - cap=cap) - self.get('details')['maxcapacity'] = maxcap - - -class TestVifSeaDriver(test.NoDBTestCase): - - def setUp(self): - super(TestVifSeaDriver, self).setUp() - - self.adpt = self.useFixture(pvm_fx.AdapterFx( - traits=pvm_fx.LocalPVMTraits)).adpt - self.inst = mock.MagicMock() - self.drv = vif.PvmSeaVifDriver(self.adpt, 'host_uuid', self.inst) - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('pypowervm.tasks.cna.crt_cna', autospec=True) - def test_plug(self, mock_crt_cna, mock_pvm_uuid): - """Tests that a VIF can be created.""" - - # Set up the mocks - fake_vif = {'network': {'meta': {'vlan': 5}}, - 'address': 'aabbccddeeff'} - fake_slot_num = 5 - - def validate_crt(adpt, host_uuid, lpar_uuid, vlan, mac_addr=None, - slot_num=None): - self.assertEqual('host_uuid', host_uuid) - self.assertEqual(5, vlan) - self.assertEqual('aabbccddeeff', mac_addr) - self.assertEqual(5, slot_num) - return pvm_net.CNA.bld(self.adpt, 5, host_uuid, slot_num=slot_num, - mac_addr=mac_addr) - mock_crt_cna.side_effect = validate_crt - - # Invoke - resp = self.drv.plug(fake_vif, fake_slot_num) - - # Validate (along with validate method above) - self.assertEqual(1, mock_crt_cna.call_count) - self.assertIsNotNone(resp) - self.assertIsInstance(resp, pvm_net.CNA) - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('pypowervm.tasks.cna.crt_cna', autospec=True) - def test_plug_from_neutron(self, mock_crt_cna, mock_pvm_uuid): - """Tests that a VIF can be created. Mocks Neutron net""" - - # Set up the mocks. Look like Neutron - fake_vif = {'details': {'vlan': 5}, 'network': {'meta': {}}, - 'address': 'aabbccddeeff'} - fake_slot_num = 5 - - def validate_crt(adpt, host_uuid, lpar_uuid, vlan, mac_addr=None, - slot_num=None): - self.assertEqual('host_uuid', host_uuid) - self.assertEqual(5, vlan) - self.assertEqual('aabbccddeeff', mac_addr) - self.assertEqual(5, slot_num) - return pvm_net.CNA.bld(self.adpt, 5, host_uuid, slot_num=slot_num, - mac_addr=mac_addr) - mock_crt_cna.side_effect = validate_crt - - # Invoke - resp = self.drv.plug(fake_vif, fake_slot_num) - - # Validate (along with validate method above) - self.assertEqual(1, mock_crt_cna.call_count) - self.assertIsNotNone(resp) - self.assertIsInstance(resp, pvm_net.CNA) - - def test_plug_existing_vif(self): - """Tests that a VIF need not be created.""" - - # Set up the mocks - fake_vif = {'network': {'meta': {'vlan': 5}}, - 'address': 'aabbccddeeff'} - fake_slot_num = 5 - - # Invoke - resp = self.drv.plug(fake_vif, fake_slot_num, new_vif=False) - - self.assertIsNone(resp) - - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True) - def test_unplug_vifs(self, mock_vm_get): - """Tests that a delete of the vif can be done.""" - # Mock up the CNA response. Two should already exist, the other - # should not. - cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')] - mock_vm_get.return_value = cnas - - # Run method. The AABBCCDDEE11 wont' be unplugged (wasn't invoked - # below) and the last unplug will also just no-op because its not on - # the VM. - self.drv.unplug({'address': 'aa:bb:cc:dd:ee:ff'}) - self.drv.unplug({'address': 'aa:bb:cc:dd:ee:22'}) - self.drv.unplug({'address': 'aa:bb:cc:dd:ee:33'}) - - # The delete should have only been called once. The second CNA didn't - # have a matching mac...so it should be skipped. - self.assertEqual(1, cnas[0].delete.call_count) - self.assertEqual(0, cnas[1].delete.call_count) - self.assertEqual(1, cnas[2].delete.call_count) - - -class TestVifOvsDriver(test.NoDBTestCase): - - def setUp(self): - super(TestVifOvsDriver, self).setUp() - - self.adpt = self.useFixture(pvm_fx.AdapterFx( - traits=pvm_fx.LocalPVMTraits)).adpt - self.inst = mock.MagicMock(uuid='inst_uuid') - self.drv = vif.PvmOvsVifDriver(self.adpt, 'host_uuid', self.inst) - - @mock.patch('nova_powervm.virt.powervm.vif._get_trunk_dev_name', - autospec=True) - @mock.patch('pypowervm.tasks.cna.crt_p2p_cna', autospec=True) - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_plug(self, mock_pvm_uuid, mock_mgmt_lpar, mock_p2p_cna, - mock_trunk_dev_name): - # Mock the data - mock_pvm_uuid.return_value = 'lpar_uuid' - mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid') - mock_trunk_dev_name.return_value = 'device' - - cna_w, trunk_wraps = mock.MagicMock(), [mock.MagicMock()] - mock_p2p_cna.return_value = cna_w, trunk_wraps - - # Run the plug - net_model = model.Model({'bridge': 'br-int', 'meta': {'mtu': 1450}}) - vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id', - devname='tap-dev', network=net_model) - slot_num = 5 - self.drv.plug(vif, slot_num) - - # Validate the calls - ovs_ext_ids = ('iface-id=vif_id,iface-status=active,' - 'attached-mac=aa:bb:cc:dd:ee:ff,vm-uuid=inst_uuid') - mock_p2p_cna.assert_called_once_with( - self.adpt, 'host_uuid', 'lpar_uuid', ['mgmt_uuid'], - 'NovaLinkVEABridge', crt_vswitch=True, - mac_addr='aa:bb:cc:dd:ee:ff', slot_num=slot_num, dev_name='device', - ovs_bridge='br-int', ovs_ext_ids=ovs_ext_ids, configured_mtu=1450) - - @mock.patch('nova_powervm.virt.powervm.vif._get_trunk_dev_name') - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('nova_powervm.virt.powervm.vif.PvmOvsVifDriver.' - '_find_cna_for_vif') - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas') - @mock.patch('pypowervm.tasks.cna.find_trunks', autospec=True) - def test_plug_existing_vif(self, mock_find_trunks, mock_get_cnas, - mock_find_cna, mock_pvm_uuid, mock_mgmt_lpar, - mock_trunk_dev_name): - # Mock the data - t1, t2 = mock.MagicMock(), mock.MagicMock() - mock_find_trunks.return_value = [t1, t2] - - mock_cna = mock.Mock() - mock_get_cnas.return_value = [mock_cna, mock.Mock()] - - mock_find_cna.return_value = mock_cna - - mock_pvm_uuid.return_value = 'lpar_uuid' - - mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid') - - mock_trunk_dev_name.return_value = 'device' - - self.inst = mock.MagicMock(uuid='c2e7ff9f-b9b6-46fa-8716-93bbb795b8b4') - self.drv = vif.PvmOvsVifDriver(self.adpt, 'host_uuid', self.inst) - - # Run the plug - network_model = model.Model({'bridge': 'br0', 'meta': {'mtu': 1500}}) - mock_vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id', - network=network_model) - slot_num = 5 - resp = self.drv.plug(mock_vif, slot_num, new_vif=False) - - self.assertIsNone(resp) - - # Validate if trunk.update got invoked for all trunks of CNA of vif - self.assertTrue(t1.update.called) - self.assertTrue(t2.update.called) - - @mock.patch('pypowervm.tasks.cna.find_trunks', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vif._get_trunk_dev_name') - @mock.patch('nova_powervm.virt.powervm.vif.PvmOvsVifDriver.' - '_find_cna_for_vif') - @mock.patch('nova_powervm.virt.powervm.vm.get_cnas') - def test_unplug(self, mock_get_cnas, mock_find_cna, mock_trunk_dev_name, - mock_find_trunks): - # Set up the mocks - mock_cna = mock.Mock() - mock_get_cnas.return_value = [mock_cna, mock.Mock()] - mock_find_cna.return_value = mock_cna - - t1, t2 = mock.MagicMock(), mock.MagicMock() - mock_find_trunks.return_value = [t1, t2] - - mock_trunk_dev_name.return_value = 'fake_dev' - - # Call the unplug - mock_vif = {'address': 'aa:bb:cc:dd:ee:ff', - 'network': {'bridge': 'br-int'}} - self.drv.unplug(mock_vif) - - # The trunks and the cna should have been deleted - self.assertTrue(t1.delete.called) - self.assertTrue(t2.delete.called) - self.assertTrue(mock_cna.delete.called) - - @mock.patch('pypowervm.tasks.cna.find_trunks', autospec=True) - @mock.patch('pypowervm.wrappers.network.CNA', autospec=True) - @mock.patch('pypowervm.util.sanitize_mac_for_api', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - def test_pre_live_migrate_at_source(self, mock_pvm_uuid, mock_sanitize, - mock_cna, mock_trunk_find): - # Set up the mocks - vif = {'address': 'aa:bb:cc:dd:ee:ff'} - mock_sanitize.return_value = 'AABBCCDDEEFF' - mock_trunk_find.return_value = 'trunk' - mock_pvm_uuid.return_value = 'pvm_uuid' - - resp = self.drv.pre_live_migrate_at_source(vif) - self.assertEqual(resp, 'trunk') - - # Make sure the APIs were called correctly - mock_sanitize.assert_called_once_with(vif['address']) - mock_cna.search.assert_called_once_with( - self.adpt, parent_type=pvm_lpar.LPAR.schema_type, - parent_uuid='pvm_uuid', one_result=True, mac='AABBCCDDEEFF') - mock_trunk_find.assert_called_once_with(self.adpt, mock.ANY) - - @mock.patch('pypowervm.tasks.cna.crt_trunk_with_free_vlan', autospec=True) - @mock.patch('pypowervm.tasks.cna.find_orphaned_trunks', autospec=True) - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - def test_pre_live_migrate_at_destination( - self, mock_part_get, mock_find_trunks, mock_trunk_crt): - # Mock the vif - net_model = model.Model({'bridge': 'br-int', 'meta': {'mtu': 1450}}) - vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id', - devname='tap-dev', network=net_model) - - # Mock out the management partition - mock_mgmt_wrap = mock.MagicMock() - mock_mgmt_wrap.uuid = 'mgmt_uuid' - mock_part_get.return_value = mock_mgmt_wrap - - mock_trunk_crt.return_value = [mock.Mock(pvid=2)] - - mock_orphan_wrap = mock.MagicMock(mac='aabbccddeeff') - mock_find_trunks.return_value = [mock_orphan_wrap] - - # Invoke and test the basic response - vea_vlan_mappings = {} - self.drv.pre_live_migrate_at_destination(vif, vea_vlan_mappings) - self.assertEqual(vea_vlan_mappings['aa:bb:cc:dd:ee:ff'], 2) - - # Now validate it called the things it needed to - ovs_ext_ids = ('iface-id=vif_id,iface-status=active,' - 'attached-mac=aa:bb:cc:dd:ee:ff,vm-uuid=inst_uuid') - mock_trunk_crt.assert_called_once_with( - self.adpt, 'host_uuid', ['mgmt_uuid'], - CONF.powervm.pvm_vswitch_for_novalink_io, dev_name='tap-dev', - ovs_bridge='br-int', ovs_ext_ids=ovs_ext_ids, - configured_mtu=1450) - mock_find_trunks.assert_called_once_with( - self.adpt, CONF.powervm.pvm_vswitch_for_novalink_io) - mock_orphan_wrap.delete.assert_called_once_with() - - @mock.patch('pypowervm.wrappers.network.CNA', autospec=True) - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - @mock.patch('pypowervm.wrappers.network.VSwitch', autospec=True) - def test_rollback_live_migration_at_destination( - self, mock_vs, mock_get_part, mock_cna): - # All the fun mocking - mock_vs.search.return_value = mock.MagicMock(switch_id=5) - - # Since this gets passed through conductor, the VLAN's switch to string - # format. - vea_vlan_mappings = {'aa:bb:cc:dd:ee:ff': '3', - 'aa:bb:cc:dd:ee:ee': '4'} - vif = {'devname': 'tap-dev', 'address': 'aa:bb:cc:dd:ee:ee', - 'network': {'bridge': 'br-int'}, 'id': 'vif_id'} - - mock_vio = mock.MagicMock(schema_type='VIO', uuid='uuid') - mock_get_part.return_value = mock_vio - - trunk1 = mock.Mock(pvid=2, vswitch_id=3, trunk_pri=1) - trunk2 = mock.Mock(pvid=3, vswitch_id=5, trunk_pri=1) - trunk3 = mock.Mock(pvid=4, vswitch_id=5, trunk_pri=None) - trunk4 = mock.Mock(pvid=4, vswitch_id=5, trunk_pri=1) - mock_cna.get.return_value = [trunk1, trunk2, trunk3, trunk4] - - # Invoke - self.drv.rollback_live_migration_at_destination(vif, vea_vlan_mappings) - - # Make sure the trunk was deleted - trunk4.delete.assert_called_once() - - # Now make sure the calls were done correctly to actually produce a - # trunk adapter - mock_vs.search.assert_called_once_with( - self.drv.adapter, parent_type=pvm_ms.System, one_result=True, - name=CONF.powervm.pvm_vswitch_for_novalink_io) - mock_get_part.assert_called_once_with(self.drv.adapter) - mock_cna.get.assert_called_once_with( - self.drv.adapter, parent=mock_vio) - - @mock.patch('nova_powervm.virt.powervm.vif.PvmOvsVifDriver.' - '_cleanup_orphan_adapters') - def test_post_live_migrate_at_source(self, mock_orphan_cleanup): - # Mock the vif - vif = {'devname': 'tap-dev', 'address': 'aa:bb:cc:dd:ee:ff', - 'network': {'bridge': 'br-int'}, 'id': 'vif_id'} - # Invoke and test - self.drv.post_live_migrate_at_source(vif) - # Validate that the cleanup is called - mock_orphan_cleanup.assert_called_once_with( - vif, CONF.powervm.pvm_vswitch_for_novalink_io) diff --git a/nova_powervm/tests/virt/powervm/test_vm.py b/nova_powervm/tests/virt/powervm/test_vm.py deleted file mode 100644 index 22af0af0..00000000 --- a/nova_powervm/tests/virt/powervm/test_vm.py +++ /dev/null @@ -1,913 +0,0 @@ -# Copyright 2014, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -import fixtures -import logging -import mock - -from nova.compute import power_state -from nova.compute import task_states -from nova import exception -from nova import objects -from nova import test -from nova.virt import event -from pypowervm import exceptions as pvm_exc -from pypowervm.helpers import log_helper as pvm_log -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.tests.test_utils import pvmhttp -from pypowervm.utils import lpar_builder as lpar_bld -from pypowervm.wrappers import base_partition as pvm_bp -from pypowervm.wrappers import logical_partition as pvm_lpar - -from nova_powervm.tests.virt import powervm -from nova_powervm.virt.powervm import exception as nvex -from nova_powervm.virt.powervm import vm - - -LPAR_HTTPRESP_FILE = "lpar.txt" -LPAR_MAPPING = ( - { - 'z3-9-5-126-127-00000001': '089ffb20-5d19-4a8c-bb80-13650627d985', - 'z3-9-5-126-208-000001f0': '668b0882-c24a-4ae9-91c8-297e95e3fe29' - }) - -LOG = logging.getLogger(__name__) -logging.basicConfig() - - -class FakeAdapterResponse(object): - def __init__(self, status): - self.status = status - - -class TestVMBuilder(test.NoDBTestCase): - - def setUp(self): - super(TestVMBuilder, self).setUp() - - self.adpt = mock.MagicMock() - self.host_w = mock.MagicMock() - self.lpar_b = vm.VMBuilder(self.host_w, self.adpt) - - self.san_lpar_name = self.useFixture(fixtures.MockPatch( - 'pypowervm.util.sanitize_partition_name_for_api')).mock - self.san_lpar_name.side_effect = lambda name: name - - def test_resize_attributes_maintained(self): - lpar_w = mock.MagicMock() - lpar_w.io_config.max_virtual_slots = 200 - lpar_w.proc_config.shared_proc_cfg.pool_id = 56 - lpar_w.avail_priority = 129 - lpar_w.srr_enabled = False - lpar_w.proc_compat_mode = 'POWER7' - lpar_w.allow_perf_data_collection = True - vm_bldr = vm.VMBuilder(self.host_w, self.adpt, cur_lpar_w=lpar_w) - self.assertEqual(200, vm_bldr.stdz.max_slots) - self.assertEqual(56, vm_bldr.stdz.spp) - self.assertEqual(129, vm_bldr.stdz.avail_priority) - self.assertFalse(vm_bldr.stdz.srr) - self.assertEqual('POWER7', vm_bldr.stdz.proc_compat) - self.assertTrue(vm_bldr.stdz.enable_lpar_metric) - - def test_max_vslots_is_the_greater(self): - lpar_w = mock.MagicMock() - lpar_w.io_config.max_virtual_slots = 64 - lpar_w.proc_config.shared_proc_cfg.pool_id = 56 - lpar_w.avail_priority = 129 - lpar_w.srr_enabled = False - lpar_w.proc_compat_mode = 'POWER7' - lpar_w.allow_perf_data_collection = True - slot_mgr = mock.MagicMock() - slot_mgr.build_map.get_max_vslots.return_value = 128 - vm_bldr = vm.VMBuilder( - self.host_w, self.adpt, slot_mgr=slot_mgr, cur_lpar_w=lpar_w) - self.assertEqual(128, vm_bldr.stdz.max_slots) - - def test_conf_values(self): - # Test driver CONF values are passed to the standardizer - self.flags(uncapped_proc_weight=75, proc_units_factor=.25, - group='powervm') - lpar_bldr = vm.VMBuilder(self.host_w, self.adpt) - self.assertEqual(75, lpar_bldr.stdz.uncapped_weight) - self.assertEqual(.25, lpar_bldr.stdz.proc_units_factor) - - def test_format_flavor(self): - """Perform tests against _format_flavor.""" - instance = objects.Instance(**powervm.TEST_INSTANCE) - flavor = instance.get_flavor() - # LP 1561128, simplified remote restart is enabled by default - lpar_attrs = {'memory': 2048, - 'name': 'instance-00000001', - 'uuid': '49629a5c-f4c4-4721-9511-9725786ff2e5', - 'vcpu': 1, 'srr_capability': True} - - # Test dedicated procs - flavor.extra_specs = {'powervm:dedicated_proc': 'true'} - test_attrs = dict(lpar_attrs, dedicated_proc='true') - - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test dedicated procs, min/max vcpu and sharing mode - flavor.extra_specs = {'powervm:dedicated_proc': 'true', - 'powervm:dedicated_sharing_mode': - 'share_idle_procs_active', - 'powervm:min_vcpu': '1', - 'powervm:max_vcpu': '3'} - test_attrs = dict(lpar_attrs, - dedicated_proc='true', - sharing_mode='sre idle procs active', - min_vcpu='1', max_vcpu='3') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test shared proc sharing mode - flavor.extra_specs = {'powervm:uncapped': 'true'} - test_attrs = dict(lpar_attrs, sharing_mode='uncapped') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test availability priority - flavor.extra_specs = {'powervm:availability_priority': '150'} - test_attrs = dict(lpar_attrs, avail_priority='150') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test the Enable LPAR Metrics for true value - flavor.extra_specs = {'powervm:enable_lpar_metric': 'true'} - test_attrs = dict(lpar_attrs, enable_lpar_metric=True) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test the Enable LPAR Metrics for false value - flavor.extra_specs = {'powervm:enable_lpar_metric': 'false'} - test_attrs = dict(lpar_attrs, enable_lpar_metric=False) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test processor compatibility - flavor.extra_specs = {'powervm:processor_compatibility': 'POWER8'} - test_attrs = dict(lpar_attrs, processor_compatibility='POWER8') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - flavor.extra_specs = {'powervm:processor_compatibility': 'POWER6+'} - test_attrs = dict( - lpar_attrs, - processor_compatibility=pvm_bp.LPARCompat.POWER6_PLUS) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - flavor.extra_specs = {'powervm:processor_compatibility': - 'POWER6+_Enhanced'} - test_attrs = dict( - lpar_attrs, - processor_compatibility=pvm_bp.LPARCompat.POWER6_PLUS_ENHANCED) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test min, max proc units - flavor.extra_specs = {'powervm:min_proc_units': '0.5', - 'powervm:max_proc_units': '2.0'} - test_attrs = dict(lpar_attrs, min_proc_units='0.5', - max_proc_units='2.0') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test min, max mem - flavor.extra_specs = {'powervm:min_mem': '1024', - 'powervm:max_mem': '4096'} - test_attrs = dict(lpar_attrs, min_mem='1024', max_mem='4096') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.san_lpar_name.assert_called_with(instance.name) - self.san_lpar_name.reset_mock() - - # Test remote restart set to false - flavor.extra_specs = {'powervm:srr_capability': 'false'} - test_attrs = dict(lpar_attrs, srr_capability=False) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - - # Test PPT set - flavor.extra_specs = {'powervm:ppt_ratio': '1:64'} - test_attrs = dict(lpar_attrs, ppt_ratio='1:64') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - - # Test enforce affinity check set to true - flavor.extra_specs = {'powervm:enforce_affinity_check': 'true'} - test_attrs = dict(lpar_attrs, enforce_affinity_check=True) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - - # Test enforce affinity check set to false - flavor.extra_specs = {'powervm:enforce_affinity_check': 'false'} - test_attrs = dict(lpar_attrs, enforce_affinity_check=False) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - - # Test enforce affinity check set to invalid value - flavor.extra_specs = {'powervm:enforce_affinity_check': 'invalid'} - self.assertRaises(exception.ValidationError, - self.lpar_b._format_flavor, instance) - - # Test secure boot set - flavor.extra_specs = {'powervm:secure_boot': '2'} - test_attrs = dict(lpar_attrs, secure_boot='2') - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - - # Prep for unsupported host tests - self.lpar_b.host_w.get_capability.return_value = False - - # Test PPT ratio not set when rebuilding to non-supported host - flavor.extra_specs = {'powervm:ppt_ratio': '1:4096'} - instance.task_state = task_states.REBUILD_SPAWNING - test_attrs = dict(lpar_attrs) - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.lpar_b.host_w.get_capability.assert_called_once_with( - 'physical_page_table_ratio_capable') - - # Test affinity check not set when rebuilding to non-supported host - self.lpar_b.host_w.get_capability.reset_mock() - flavor.extra_specs = {'powervm:enforce_affinity_check': 'true'} - self.assertEqual(self.lpar_b._format_flavor(instance), test_attrs) - self.lpar_b.host_w.get_capability.assert_called_once_with( - 'affinity_check_capable') - - @mock.patch('pypowervm.wrappers.shared_proc_pool.SharedProcPool.search') - def test_spp_pool_id(self, mock_search): - # The default pool is always zero. Validate the path. - self.assertEqual(0, self.lpar_b._spp_pool_id('DefaultPool')) - self.assertEqual(0, self.lpar_b._spp_pool_id(None)) - - # Further invocations require calls to the adapter. Build a minimal - # mocked SPP wrapper - spp = mock.MagicMock() - spp.id = 1 - - # Three invocations. First has too many elems. Second has none. - # Third is just right. :-) - mock_search.side_effect = [[spp, spp], [], [spp]] - - self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id, - 'fake_name') - self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id, - 'fake_name') - - self.assertEqual(1, self.lpar_b._spp_pool_id('fake_name')) - - def test_flavor_bool(self): - true_iterations = ['true', 't', 'yes', 'y', 'TrUe', 'YeS', 'Y', 'T'] - for t in true_iterations: - self.assertTrue(self.lpar_b._flavor_bool(t, 'key')) - - false_iterations = ['false', 'f', 'no', 'n', 'FaLSe', 'nO', 'F', 'N'] - for f in false_iterations: - self.assertFalse(self.lpar_b._flavor_bool(f, 'key')) - - raise_iterations = ['NotGood', '', 'invalid'] - for r in raise_iterations: - self.assertRaises(exception.ValidationError, - self.lpar_b._flavor_bool, r, 'key') - - -class TestVM(test.NoDBTestCase): - def setUp(self): - super(TestVM, self).setUp() - self.apt = self.useFixture(pvm_fx.AdapterFx( - traits=pvm_fx.LocalPVMTraits)).adpt - self.apt.helpers = [pvm_log.log_helper] - - self.san_lpar_name = self.useFixture(fixtures.MockPatch( - 'pypowervm.util.sanitize_partition_name_for_api')).mock - self.san_lpar_name.side_effect = lambda name: name - - lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt) - self.assertNotEqual(lpar_http, None, - "Could not load %s " % - LPAR_HTTPRESP_FILE) - - self.resp = lpar_http.response - - def test_translate_event(self): - # (expected event, pvm state, power_state) - tests = [ - (event.EVENT_LIFECYCLE_STARTED, "running", power_state.SHUTDOWN), - (None, "running", power_state.RUNNING) - ] - for t in tests: - self.assertEqual(t[0], vm.translate_event(t[1], t[2])) - - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_get_instance(self, mock_get_uuid): - mock_get_uuid.return_value = '1111' - self.assertEqual('1111', vm.get_instance('ctx', 'ABC')) - - mock_get_uuid.side_effect = [ - exception.InstanceNotFound({'instance_id': 'fake_instance'}), - '222' - ] - self.assertEqual('222', vm.get_instance('ctx', 'ABC')) - - def test_uuid_set_high_bit(self): - self.assertEqual( - vm._uuid_set_high_bit('65e7a5f0-ceb2-427d-a6d1-e47f0eb38708'), - 'e5e7a5f0-ceb2-427d-a6d1-e47f0eb38708') - self.assertEqual( - vm._uuid_set_high_bit('f6f79d3f-eef1-4009-bfd4-172ab7e6fff4'), - 'f6f79d3f-eef1-4009-bfd4-172ab7e6fff4') - - def test_translate_vm_state(self): - self.assertEqual(power_state.RUNNING, - vm._translate_vm_state('running')) - self.assertEqual(power_state.RUNNING, - vm._translate_vm_state('migrating running')) - self.assertEqual(power_state.RUNNING, - vm._translate_vm_state('starting')) - self.assertEqual(power_state.RUNNING, - vm._translate_vm_state('open firmware')) - self.assertEqual(power_state.RUNNING, - vm._translate_vm_state('shutting down')) - self.assertEqual(power_state.RUNNING, - vm._translate_vm_state('suspending')) - - self.assertEqual(power_state.SHUTDOWN, - vm._translate_vm_state('migrating not active')) - self.assertEqual(power_state.SHUTDOWN, - vm._translate_vm_state('not activated')) - - self.assertEqual(power_state.NOSTATE, - vm._translate_vm_state('unknown')) - self.assertEqual(power_state.NOSTATE, - vm._translate_vm_state('hardware discovery')) - self.assertEqual(power_state.NOSTATE, - vm._translate_vm_state('not available')) - - self.assertEqual(power_state.SUSPENDED, - vm._translate_vm_state('resuming')) - self.assertEqual(power_state.SUSPENDED, - vm._translate_vm_state('suspended')) - - self.assertEqual(power_state.CRASHED, - vm._translate_vm_state('error')) - - def test_get_lpars(self): - self.apt.read.return_value = self.resp - lpars = vm.get_lpars(self.apt) - # One of the LPARs is a management partition, so one less than the - # total length should be returned. - self.assertEqual(len(self.resp.feed.entries) - 1, len(lpars)) - - exc = pvm_exc.Error('Not found', response=FakeAdapterResponse(404)) - self.apt.read.side_effect = exc - self.assertRaises(pvm_exc.Error, vm.get_lpars, self.apt) - - def test_get_lpar_names(self): - self.apt.read.return_value = self.resp - lpar_list = vm.get_lpar_names(self.apt) - # Check the first one in the feed and the length of the feed - self.assertEqual(lpar_list[0], 'z3-9-5-126-208-000001f0') - self.assertEqual(len(lpar_list), 20) - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True) - def test_dlt_lpar(self, mock_vterm, mock_pvm_uuid): - """Performs a delete LPAR test.""" - mock_pvm_uuid.return_value = 'pvm_uuid' - - vm.delete_lpar(self.apt, 'inst') - mock_pvm_uuid.assert_called_once_with('inst') - mock_vterm.assert_called_once_with(self.apt, 'pvm_uuid') - self.apt.delete.assert_called_once_with('LogicalPartition', - root_id='pvm_uuid') - - # Test Failure Path - # build a mock response body with the expected HSCL msg - resp = mock.Mock(body='error msg: HSCL151B more text') - self.apt.delete.side_effect = pvm_exc.Error( - 'Mock Error Message', response=resp) - - # Reset counters - mock_pvm_uuid.reset_mock() - self.apt.reset_mock() - mock_vterm.reset_mock() - - self.assertRaises(pvm_exc.Error, - vm.delete_lpar, self.apt, 'inst') - mock_pvm_uuid.assert_called_once_with('inst') - mock_vterm.assert_called_once_with(self.apt, 'pvm_uuid') - self.apt.delete.assert_called_once_with('LogicalPartition', - root_id='pvm_uuid') - - # Test HttpNotFound - exception not raised - mock_pvm_uuid.reset_mock() - self.apt.reset_mock() - mock_vterm.reset_mock() - - resp.status = 404 - self.apt.delete.side_effect = pvm_exc.HttpNotFound(resp=resp) - vm.delete_lpar(self.apt, 'inst') - mock_pvm_uuid.assert_called_once_with('inst') - mock_vterm.assert_called_once_with(self.apt, 'pvm_uuid') - self.apt.delete.assert_called_once_with('LogicalPartition', - root_id='pvm_uuid') - - # Test Other HttpError - mock_pvm_uuid.reset_mock() - self.apt.reset_mock() - mock_vterm.reset_mock() - - resp.status = 111 - self.apt.delete.side_effect = pvm_exc.HttpError(resp=resp) - self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst') - mock_pvm_uuid.assert_called_once_with('inst') - mock_vterm.assert_called_once_with(self.apt, 'pvm_uuid') - self.apt.delete.assert_called_once_with('LogicalPartition', - root_id='pvm_uuid') - - # Test HttpNotFound closing vterm - mock_pvm_uuid.reset_mock() - self.apt.reset_mock() - mock_vterm.reset_mock() - - resp.status = 404 - mock_vterm.side_effect = pvm_exc.HttpNotFound(resp=resp) - vm.delete_lpar(self.apt, 'inst') - mock_pvm_uuid.assert_called_once_with('inst') - mock_vterm.assert_called_once_with(self.apt, 'pvm_uuid') - self.apt.delete.assert_not_called() - - # Test Other HttpError closing vterm - mock_pvm_uuid.reset_mock() - self.apt.reset_mock() - mock_vterm.reset_mock() - - resp.status = 111 - mock_vterm.side_effect = pvm_exc.HttpError(resp=resp) - self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst') - mock_pvm_uuid.assert_called_once_with('inst') - mock_vterm.assert_called_once_with(self.apt, 'pvm_uuid') - self.apt.delete.assert_not_called() - - @mock.patch('nova_powervm.virt.powervm.vm.VMBuilder._add_IBMi_attrs', - autospec=True) - @mock.patch('pypowervm.utils.lpar_builder.DefaultStandardize', - autospec=True) - @mock.patch('pypowervm.utils.lpar_builder.LPARBuilder.build', - autospec=True) - @mock.patch('pypowervm.utils.validation.LPARWrapperValidator.validate_all', - autospec=True) - def test_crt_lpar(self, mock_vld_all, mock_bld, mock_stdz, mock_ibmi): - instance = objects.Instance(**powervm.TEST_INSTANCE) - flavor = instance.get_flavor() - flavor.extra_specs = {'powervm:dedicated_proc': 'true'} - - host_wrapper = mock.Mock() - lparw = pvm_lpar.LPAR.wrap(self.resp.feed.entries[0]) - mock_bld.return_value = lparw - self.apt.create.return_value = lparw.entry - vm.create_lpar(self.apt, host_wrapper, instance, nvram='data') - self.apt.create.assert_called_once_with( - lparw, host_wrapper.schema_type, child_type='LogicalPartition', - root_id=host_wrapper.uuid, service='uom', timeout=-1) - mock_stdz.assert_called_once_with(host_wrapper, uncapped_weight=64, - proc_units_factor=0.1) - self.assertEqual(lparw.nvram, 'data') - self.assertTrue(mock_vld_all.called) - - # Test srr and slot_mgr - self.apt.reset_mock() - mock_vld_all.reset_mock() - mock_stdz.reset_mock() - flavor.extra_specs = {'powervm:srr_capability': 'true'} - self.apt.create.return_value = lparw.entry - mock_slot_mgr = mock.Mock(build_map=mock.Mock( - get_max_vslots=mock.Mock(return_value=123))) - vm.create_lpar(self.apt, host_wrapper, instance, - slot_mgr=mock_slot_mgr) - self.assertTrue(self.apt.create.called) - self.assertTrue(mock_vld_all.called) - self.assertTrue(lparw.srr_enabled) - mock_stdz.assert_called_once_with(host_wrapper, uncapped_weight=64, - proc_units_factor=0.1, max_slots=123) - # The save is called with the LPAR's actual value, which in this mock - # setup comes from lparw - mock_slot_mgr.register_max_vslots.assert_called_with( - lparw.io_config.max_virtual_slots) - - # Test to verify the LPAR Creation with invalid name specification - mock_bld.side_effect = lpar_bld.LPARBuilderException("Invalid Name") - host_wrapper = mock.Mock() - self.assertRaises(exception.BuildAbortException, vm.create_lpar, - self.apt, host_wrapper, instance) - - resp = mock.Mock(status=202, method='fake', path='/dev/', - reason='Failure') - mock_bld.side_effect = pvm_exc.HttpError(resp) - try: - vm.create_lpar(self.apt, host_wrapper, instance) - except nvex.PowerVMAPIFailed as e: - self.assertEqual(e.kwargs['inst_name'], instance.name) - self.assertEqual(e.kwargs['reason'], mock_bld.side_effect) - flavor.extra_specs = {'powervm:BADATTR': 'true'} - host_wrapper = mock.Mock() - self.assertRaises(exception.InvalidAttribute, vm.create_lpar, - self.apt, host_wrapper, instance) - - @mock.patch('pypowervm.wrappers.logical_partition.LPAR.get') - def test_get_instance_wrapper(self, mock_get): - mock_get.side_effect = pvm_exc.HttpNotFound(resp=mock.Mock(status=404)) - instance = objects.Instance(**powervm.TEST_INSTANCE) - # vm.get_instance_wrapper(self.apt, instance, 'lpar_uuid') - self.assertRaises(exception.InstanceNotFound, vm.get_instance_wrapper, - self.apt, instance, 'lpar_uuid') - - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.VMBuilder', autospec=True) - def test_update(self, mock_vmb, mock_get_inst): - instance = objects.Instance(**powervm.TEST_INSTANCE) - entry = mock.Mock() - name = "new_name" - entry.update.return_value = 'NewEntry' - bldr = mock_vmb.return_value - lpar_bldr = bldr.lpar_builder.return_value - new_entry = vm.update(self.apt, 'mock_host_wrap', instance, - entry=entry, name=name) - # Ensure the lpar was rebuilt - lpar_bldr.rebuild.assert_called_once_with(entry) - entry.update.assert_called_once_with() - self.assertEqual(name, entry.name) - self.assertEqual('NewEntry', new_entry) - self.san_lpar_name.assert_called_with(name) - - @mock.patch('pypowervm.utils.transaction.entry_transaction', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - def test_rename(self, mock_get_inst, mock_entry_transaction): - instance = objects.Instance(**powervm.TEST_INSTANCE) - - mock_entry_transaction.side_effect = lambda x: x - - entry = mock.Mock() - entry.update.return_value = 'NewEntry' - new_entry = vm.rename(self.apt, instance, 'new_name', entry=entry) - self.assertEqual('new_name', entry.name) - entry.update.assert_called_once_with() - mock_entry_transaction.assert_called_once_with(mock.ANY) - self.assertEqual('NewEntry', new_entry) - self.san_lpar_name.assert_called_with('new_name') - self.san_lpar_name.reset_mock() - - # Test optional entry parameter - entry.reset_mock() - mock_get_inst.return_value = entry - new_entry = vm.rename(self.apt, instance, 'new_name') - mock_get_inst.assert_called_once_with(self.apt, instance) - self.assertEqual('new_name', entry.name) - entry.update.assert_called_once_with() - self.assertEqual('NewEntry', new_entry) - self.san_lpar_name.assert_called_with('new_name') - - def test_add_IBMi_attrs(self): - inst = mock.Mock() - # Non-ibmi distro - attrs = {} - inst.system_metadata = {'image_os_distro': 'rhel'} - bldr = vm.VMBuilder(mock.Mock(), mock.Mock()) - bldr._add_IBMi_attrs(inst, attrs) - self.assertDictEqual(attrs, {}) - - inst.system_metadata = {} - bldr._add_IBMi_attrs(inst, attrs) - self.assertDictEqual(attrs, {}) - - # ibmi distro - inst.system_metadata = {'image_os_distro': 'ibmi'} - bldr._add_IBMi_attrs(inst, attrs) - self.assertDictEqual(attrs, {'env': 'OS400'}) - - @mock.patch('pypowervm.tasks.power.power_on', autospec=True) - @mock.patch('oslo_concurrency.lockutils.lock', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - def test_power_on(self, mock_wrap, mock_lock, mock_power_on): - instance = objects.Instance(**powervm.TEST_INSTANCE) - entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED) - mock_wrap.return_value = entry - - self.assertTrue(vm.power_on(None, instance, opts='opts')) - mock_power_on.assert_called_once_with(entry, None, add_parms='opts') - mock_lock.assert_called_once_with('power_%s' % instance.uuid) - - mock_power_on.reset_mock() - mock_lock.reset_mock() - - stop_states = [ - pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING, - pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN, - pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING, - pvm_bp.LPARState.SUSPENDING] - - for stop_state in stop_states: - entry.state = stop_state - self.assertFalse(vm.power_on(None, instance)) - self.assertEqual(0, mock_power_on.call_count) - mock_lock.assert_called_once_with('power_%s' % instance.uuid) - mock_lock.reset_mock() - - @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True) - @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True) - @mock.patch('oslo_concurrency.lockutils.lock', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - def test_power_off(self, mock_wrap, mock_lock, mock_power_off, mock_pop): - instance = objects.Instance(**powervm.TEST_INSTANCE) - entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED) - mock_wrap.return_value = entry - - self.assertFalse(vm.power_off(None, instance)) - self.assertEqual(0, mock_power_off.call_count) - self.assertEqual(0, mock_pop.stop.call_count) - mock_lock.assert_called_once_with('power_%s' % instance.uuid) - - stop_states = [ - pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING, - pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN, - pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING, - pvm_bp.LPARState.SUSPENDING] - for stop_state in stop_states: - entry.state = stop_state - mock_power_off.reset_mock() - mock_pop.stop.reset_mock() - mock_lock.reset_mock() - self.assertTrue(vm.power_off(None, instance)) - mock_power_off.assert_called_once_with(entry) - self.assertEqual(0, mock_pop.stop.call_count) - mock_lock.assert_called_once_with('power_%s' % instance.uuid) - mock_power_off.reset_mock() - mock_lock.reset_mock() - self.assertTrue(vm.power_off( - None, instance, force_immediate=True, timeout=5)) - self.assertEqual(0, mock_power_off.call_count) - mock_pop.stop.assert_called_once_with( - entry, opts=mock.ANY, timeout=5) - self.assertEqual('PowerOff(immediate=true, operation=shutdown)', - str(mock_pop.stop.call_args[1]['opts'])) - mock_lock.assert_called_once_with('power_%s' % instance.uuid) - - @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - def test_power_off_negative(self, mock_wrap, mock_power_off): - """Negative tests.""" - instance = objects.Instance(**powervm.TEST_INSTANCE) - mock_wrap.return_value = mock.Mock(state=pvm_bp.LPARState.RUNNING) - - # Raise the expected pypowervm exception - mock_power_off.side_effect = pvm_exc.VMPowerOffFailure( - reason='Something bad.', lpar_nm='TheLPAR') - # We should get a valid Nova exception that the compute manager expects - self.assertRaises(exception.InstancePowerOffFailure, - vm.power_off, None, instance) - - @mock.patch('oslo_concurrency.lockutils.lock', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - @mock.patch('pypowervm.tasks.power.power_on', autospec=True) - @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True) - @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True) - def test_reboot(self, mock_pop, mock_pwroff, mock_pwron, mock_giw, - mock_lock): - entry = mock.Mock() - inst = mock.Mock(uuid='uuid') - mock_giw.return_value = entry - - # VM is in 'not activated' state - entry.state = pvm_bp.LPARState.NOT_ACTIVATED - vm.reboot('adapter', inst, True) - mock_pwron.assert_called_once_with(entry, None) - self.assertEqual(0, mock_pwroff.call_count) - self.assertEqual(0, mock_pop.stop.call_count) - mock_lock.assert_called_once_with('power_uuid') - - mock_pwron.reset_mock() - mock_lock.reset_mock() - - # VM is in an active state - entry.state = pvm_bp.LPARState.RUNNING - vm.reboot('adapter', inst, True) - self.assertEqual(0, mock_pwron.call_count) - self.assertEqual(0, mock_pwroff.call_count) - mock_pop.stop.assert_called_once_with(entry, opts=mock.ANY) - self.assertEqual( - 'PowerOff(immediate=true, operation=shutdown, restart=true)', - str(mock_pop.stop.call_args[1]['opts'])) - mock_lock.assert_called_once_with('power_uuid') - - mock_pop.stop.reset_mock() - mock_lock.reset_mock() - - # Same, but soft - vm.reboot('adapter', inst, False) - self.assertEqual(0, mock_pwron.call_count) - mock_pwroff.assert_called_once_with(entry, restart=True) - self.assertEqual(0, mock_pop.stop.call_count) - mock_lock.assert_called_once_with('power_uuid') - - mock_pwroff.reset_mock() - mock_lock.reset_mock() - - # Exception path - mock_pwroff.side_effect = Exception() - self.assertRaises(exception.InstanceRebootFailure, vm.reboot, - 'adapter', inst, False) - self.assertEqual(0, mock_pwron.call_count) - mock_pwroff.assert_called_once_with(entry, restart=True) - self.assertEqual(0, mock_pop.stop.call_count) - mock_lock.assert_called_once_with('power_uuid') - - def test_get_pvm_uuid(self): - - nova_uuid = "dbbb48f1-2406-4019-98af-1c16d3df0204" - # Test with uuid string - self.assertEqual('5BBB48F1-2406-4019-98AF-1C16D3DF0204', - vm.get_pvm_uuid(nova_uuid)) - - mock_inst = mock.Mock(uuid=nova_uuid) - # Test with instance object - self.assertEqual('5BBB48F1-2406-4019-98AF-1C16D3DF0204', - vm.get_pvm_uuid(mock_inst)) - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp', autospec=True) - def test_instance_exists(self, mock_getvmqp, mock_getuuid): - # Try the good case where it exists - mock_getvmqp.side_effect = 'fake_state' - mock_parms = (mock.Mock(), mock.Mock()) - self.assertTrue(vm.instance_exists(*mock_parms)) - - # Test the scenario where it does not exist. - mock_getvmqp.side_effect = exception.InstanceNotFound(instance_id=123) - self.assertFalse(vm.instance_exists(*mock_parms)) - - def test_get_vm_qp(self): - def adapter_read(root_type, root_id=None, suffix_type=None, - suffix_parm=None, helpers=None): - json_str = (u'{"IsVirtualServiceAttentionLEDOn":"false","Migration' - u'State":"Not_Migrating","CurrentProcessingUnits":0.1,' - u'"ProgressState":null,"PartitionType":"AIX/Linux","Pa' - u'rtitionID":1,"AllocatedVirtualProcessors":1,"Partiti' - u'onState":"not activated","RemoteRestartState":"Inval' - u'id","OperatingSystemVersion":"Unknown","AssociatedMa' - u'nagedSystem":"https://9.1.2.3:12443/rest/api/uom/Man' - u'agedSystem/98498bed-c78a-3a4f-b90a-4b715418fcb6","RM' - u'CState":"inactive","PowerManagementMode":null,"Parti' - u'tionName":"lpar-1-06674231-lpar","HasDedicatedProces' - u'sors":"false","ResourceMonitoringIPAddress":null,"Re' - u'ferenceCode":"00000000","CurrentProcessors":null,"Cu' - u'rrentMemory":512,"SharingMode":"uncapped"}') - self.assertEqual('LogicalPartition', root_type) - self.assertEqual('lpar_uuid', root_id) - self.assertEqual('quick', suffix_type) - resp = mock.MagicMock() - if suffix_parm is None: - resp.body = json_str - elif suffix_parm == 'PartitionID': - resp.body = '1' - elif suffix_parm == 'CurrentProcessingUnits': - resp.body = '0.1' - elif suffix_parm == 'AssociatedManagedSystem': - # The double quotes are important - resp.body = ('"https://9.1.2.3:12443/rest/api/uom/ManagedSyste' - 'm/98498bed-c78a-3a4f-b90a-4b715418fcb6"') - else: - self.fail('Unhandled quick property key %s' % suffix_parm) - return resp - - def adpt_read_no_log(*args, **kwds): - helpers = kwds['helpers'] - try: - helpers.index(pvm_log.log_helper) - except ValueError: - # Successful path since the logger shouldn't be there - return adapter_read(*args, **kwds) - - self.fail('Log helper was found when it should not be') - - ms_href = ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/98498bed-' - 'c78a-3a4f-b90a-4b715418fcb6') - self.apt.read.side_effect = adapter_read - self.assertEqual(1, vm.get_vm_id(self.apt, 'lpar_uuid')) - self.assertEqual(ms_href, vm.get_vm_qp(self.apt, 'lpar_uuid', - 'AssociatedManagedSystem')) - self.apt.read.side_effect = adpt_read_no_log - self.assertEqual(0.1, vm.get_vm_qp(self.apt, 'lpar_uuid', - 'CurrentProcessingUnits', - log_errors=False)) - qp_dict = vm.get_vm_qp(self.apt, 'lpar_uuid', log_errors=False) - self.assertEqual(ms_href, qp_dict['AssociatedManagedSystem']) - self.assertEqual(1, qp_dict['PartitionID']) - self.assertEqual(0.1, qp_dict['CurrentProcessingUnits']) - - resp = mock.MagicMock() - resp.status = 404 - self.apt.read.side_effect = pvm_exc.HttpNotFound(resp) - self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt, - 'lpar_uuid', log_errors=False) - - self.apt.read.side_effect = pvm_exc.Error("message", response=None) - self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt, - 'lpar_uuid', log_errors=False) - - resp.status = 500 - self.apt.read.side_effect = pvm_exc.Error("message", response=resp) - self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt, - 'lpar_uuid', log_errors=False) - - def test_norm_mac(self): - EXPECTED = "12:34:56:78:90:ab" - self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:ab")) - self.assertEqual(EXPECTED, vm.norm_mac("1234567890ab")) - self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:AB")) - self.assertEqual(EXPECTED, vm.norm_mac("1234567890AB")) - - @mock.patch('pypowervm.tasks.ibmi.update_ibmi_settings', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper', - autospec=True) - def test_update_ibmi_settings(self, mock_lparw, mock_ibmi): - instance = mock.MagicMock() - - # Test update load source with vscsi boot - boot_type = 'vscsi' - vm.update_ibmi_settings(self.apt, instance, boot_type) - mock_ibmi.assert_called_once_with(self.apt, mock.ANY, 'vscsi') - mock_ibmi.reset_mock() - - # Test update load source with npiv boot - boot_type = 'npiv' - vm.update_ibmi_settings(self.apt, instance, boot_type) - mock_ibmi.assert_called_once_with(self.apt, mock.ANY, 'npiv') - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('pypowervm.wrappers.network.CNA.search') - @mock.patch('pypowervm.wrappers.network.CNA.get') - def test_get_cnas(self, mock_get, mock_search, mock_uuid): - # No kwargs: get - self.assertEqual(mock_get.return_value, vm.get_cnas(self.apt, 'inst')) - mock_uuid.assert_called_once_with('inst') - mock_get.assert_called_once_with(self.apt, parent_type=pvm_lpar.LPAR, - parent_uuid=mock_uuid.return_value) - mock_search.assert_not_called() - # With kwargs: search - mock_get.reset_mock() - mock_uuid.reset_mock() - self.assertEqual(mock_search.return_value, vm.get_cnas( - self.apt, 'inst', one=2, three=4)) - mock_uuid.assert_called_once_with('inst') - mock_search.assert_called_once_with( - self.apt, parent_type=pvm_lpar.LPAR, - parent_uuid=mock_uuid.return_value, one=2, three=4) - mock_get.assert_not_called() - - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('pypowervm.wrappers.iocard.VNIC.search') - @mock.patch('pypowervm.wrappers.iocard.VNIC.get') - def test_get_vnics(self, mock_get, mock_search, mock_uuid): - # No kwargs: get - self.assertEqual(mock_get.return_value, vm.get_vnics(self.apt, 'inst')) - mock_uuid.assert_called_once_with('inst') - mock_get.assert_called_once_with(self.apt, parent_type=pvm_lpar.LPAR, - parent_uuid=mock_uuid.return_value) - mock_search.assert_not_called() - # With kwargs: search - mock_get.reset_mock() - mock_uuid.reset_mock() - self.assertEqual(mock_search.return_value, vm.get_vnics( - self.apt, 'inst', one=2, three=4)) - mock_uuid.assert_called_once_with('inst') - mock_search.assert_called_once_with( - self.apt, parent_type=pvm_lpar.LPAR, - parent_uuid=mock_uuid.return_value, one=2, three=4) - mock_get.assert_not_called() diff --git a/nova_powervm/tests/virt/powervm/volume/__init__.py b/nova_powervm/tests/virt/powervm/volume/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/tests/virt/powervm/volume/test_driver.py b/nova_powervm/tests/virt/powervm/volume/test_driver.py deleted file mode 100644 index 48295bba..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_driver.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from nova import test - -from nova_powervm.virt.powervm import volume -from nova_powervm.virt.powervm.volume import gpfs -from nova_powervm.virt.powervm.volume import iscsi -from nova_powervm.virt.powervm.volume import local -from nova_powervm.virt.powervm.volume import nfs -from nova_powervm.virt.powervm.volume import npiv -from nova_powervm.virt.powervm.volume import vscsi - - -class TestVolumeAdapter(test.NoDBTestCase): - - def setUp(self): - super(TestVolumeAdapter, self).setUp() - - # Enable passing through the can attach/detach checks - self.mock_get_inst_wrap_p = mock.patch('nova_powervm.virt.powervm.vm.' - 'get_instance_wrapper') - self.mock_get_inst_wrap = self.mock_get_inst_wrap_p.start() - self.addCleanup(self.mock_get_inst_wrap_p.stop) - self.mock_inst_wrap = mock.MagicMock() - self.mock_inst_wrap.can_modify_io.return_value = (True, None) - self.mock_get_inst_wrap.return_value = self.mock_inst_wrap - - -class TestInitMethods(test.NoDBTestCase): - - # Volume driver types to classes - volume_drivers = { - 'iscsi': iscsi.IscsiVolumeAdapter, - 'local': local.LocalVolumeAdapter, - 'nfs': nfs.NFSVolumeAdapter, - 'gpfs': gpfs.GPFSVolumeAdapter, - } - - def test_get_volume_class(self): - for vol_type, class_type in six.iteritems(self.volume_drivers): - self.assertEqual(class_type, volume.get_volume_class(vol_type)) - - # Try the fibre as vscsi - self.flags(fc_attach_strategy='vscsi', group='powervm') - self.assertEqual(vscsi.PVVscsiFCVolumeAdapter, - volume.get_volume_class('fibre_channel')) - - # Try the fibre as npiv - self.flags(fc_attach_strategy='npiv', group='powervm') - self.assertEqual(npiv.NPIVVolumeAdapter, - volume.get_volume_class('fibre_channel')) - - def test_build_volume_driver(self): - for vol_type, class_type in six.iteritems(self.volume_drivers): - vdrv = volume.build_volume_driver( - mock.Mock(), "abc123", mock.Mock(uuid='abc1'), - {'driver_volume_type': vol_type}) - self.assertIsInstance(vdrv, class_type) - - # Try the fibre as vscsi - self.flags(fc_attach_strategy='vscsi', group='powervm') - vdrv = volume.build_volume_driver( - mock.Mock(), "abc123", mock.Mock(uuid='abc1'), - {'driver_volume_type': 'fibre_channel'}) - self.assertIsInstance(vdrv, vscsi.PVVscsiFCVolumeAdapter) - - # Try the fibre as npiv - self.flags(fc_attach_strategy='npiv', group='powervm') - vdrv = volume.build_volume_driver( - mock.Mock(), "abc123", mock.Mock(uuid='abc1'), - {'driver_volume_type': 'fibre_channel'}) - self.assertIsInstance(vdrv, npiv.NPIVVolumeAdapter) - - def test_hostname_for_volume(self): - self.flags(host='test_host') - mock_instance = mock.Mock() - mock_instance.name = 'instance' - - # Try the fibre as vscsi - self.flags(fc_attach_strategy='vscsi', group='powervm') - self.assertEqual("test_host", - volume.get_hostname_for_volume(mock_instance)) - - # Try the fibre as npiv - self.flags(fc_attach_strategy='npiv', group='powervm') - self.assertEqual("test_host_instance", - volume.get_hostname_for_volume(mock_instance)) - - # NPIV with long host name - self.flags(host='really_long_host_name_too_long') - self.assertEqual("really_long_host_nam_instance", - volume.get_hostname_for_volume(mock_instance)) diff --git a/nova_powervm/tests/virt/powervm/volume/test_fileio.py b/nova_powervm/tests/virt/powervm/volume/test_fileio.py deleted file mode 100644 index 115bad3f..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_fileio.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm.volume import fileio as v_drv -from pypowervm import const as pvm_const -from pypowervm import exceptions as pvm_exc -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import base_partition as pvm_bp -from pypowervm.wrappers import storage as pvm_stg -from pypowervm.wrappers import virtual_io_server as pvm_vios - - -class FakeFileIOVolAdapter(v_drv.FileIOVolumeAdapter): - """Subclass for FileIOVolumeAdapter, since it is abstract.""" - - def __init__(self, adapter, host_uuid, instance, connection_info, - stg_ftsk=None): - super(FakeFileIOVolAdapter, self).__init__( - adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk) - - def _get_path(self): - return "fake_path" - - -class TestFileIOVolumeAdapter(test_vol.TestVolumeAdapter): - """Tests the FileIOVolumeAdapter. NovaLink is a I/O host.""" - - def setUp(self): - super(TestFileIOVolumeAdapter, self).setUp() - - # Needed for the volume adapter - self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt - mock_inst = mock.MagicMock(uuid='2BC123') - - self.vol_drv = FakeFileIOVolAdapter( - self.adpt, 'host_uuid', mock_inst, - {'data': {'volume_id': 'a_vol_id'}, - 'serial': 'volid1'}) - - self.fake_vios = pvm_vios.VIOS.bld( - self.adpt, 'vios1', - pvm_bp.PartitionMemoryConfiguration.bld(self.adpt, 1024), - pvm_bp.PartitionMemoryConfiguration.bld(self.adpt, 0.1, 1)) - self.feed = [pvm_vios.VIOS.wrap(self.fake_vios.entry)] - ftskfx = pvm_fx.FeedTaskFx(self.feed) - self.useFixture(ftskfx) - - def test_min_xags(self): - """Ensures xag's only returns SCSI Mappings.""" - self.assertEqual([pvm_const.XAG.VIO_SMAP], self.vol_drv.min_xags()) - - @mock.patch('pypowervm.tasks.scsi_mapper.add_map') - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.entities.Entry.uuid', - new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.slot_map.SlotMapStore.register_vscsi_mapping') - @mock.patch('pypowervm.tasks.client_storage.udid_to_scsi_mapping') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') - @mock.patch('pypowervm.wrappers.storage.FileIO.bld') - def test_connect_volume(self, mock_file_bld, mock_get_mgmt_partition, - mock_get_vm_id, mock_udid_to_map, mock_reg_map, - mock_get_vios_uuid, mock_build_map, mock_add_map): - # Mockups - mock_file = mock.Mock() - mock_file_bld.return_value = mock_file - mock_slot_mgr = mock.MagicMock() - mock_slot_mgr.build_map.get_vscsi_slot.return_value = 4, 'fake_path' - - mock_vios = mock.Mock(uuid='uuid1') - mock_get_mgmt_partition.return_value = mock_vios - mock_get_vios_uuid.return_value = 'uuid1' - mock_get_vm_id.return_value = 'partition_id' - - mock_udid_to_map.return_value = mock.Mock() - mock_add_map.return_value = None - - # Invoke - self.vol_drv.connect_volume(mock_slot_mgr) - - # Validate - mock_file_bld.assert_called_once_with( - self.adpt, 'fake_path', - backstore_type=pvm_stg.BackStoreType.LOOP, tag='a_vol_id') - self.assertEqual(1, mock_build_map.call_count) - self.assertEqual(1, mock_udid_to_map.call_count) - - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.entities.Entry.uuid', - new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.slot_map.SlotMapStore.register_vscsi_mapping') - @mock.patch('pypowervm.tasks.client_storage.udid_to_scsi_mapping') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') - @mock.patch('pypowervm.wrappers.storage.FileIO.bld') - def test_connect_volume_rebuild_no_slot( - self, mock_file_bld, mock_get_mgmt_partition, mock_get_vm_id, - mock_udid_to_map, mock_reg_map, mock_get_vios_uuid, - mock_build_map): - # Mockups - mock_file = mock.Mock() - mock_file_bld.return_value = mock_file - mock_slot_mgr = mock.MagicMock() - mock_slot_mgr.is_rebuild = True - mock_slot_mgr.build_map.get_vscsi_slot.return_value = None, None - - mock_vios = mock.Mock(uuid='uuid1') - mock_get_mgmt_partition.return_value = mock_vios - mock_get_vios_uuid.return_value = 'uuid1' - - # Invoke - self.vol_drv.connect_volume(mock_slot_mgr) - - # Validate - mock_file_bld.assert_called_once_with( - self.adpt, 'fake_path', - backstore_type=pvm_stg.BackStoreType.LOOP, tag='a_vol_id') - self.assertEqual(0, mock_build_map.call_count) - - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - @mock.patch('pypowervm.tasks.storage.rescan_vstor', autospec=True) - def test_extend_volume(self, mock_rescan, mock_get_mgmt_partition): - # FileIO driver can only have 1 uuid in vol_drv.vios_uuids - mock_vios = mock.Mock(uuid='uuid1') - mock_get_mgmt_partition.return_value = mock_vios - self.vol_drv.extend_volume() - mock_rescan.assert_called_once_with(self.vol_drv.vios_uuids[0], - "fake_path", adapter=self.adpt) - mock_rescan.side_effect = pvm_exc.JobRequestFailed( - operation_name='RescanVirtualDisk', error='fake_err') - self.assertRaises(p_exc.VolumeExtendFailed, self.vol_drv.extend_volume) - mock_rescan.side_effect = pvm_exc.VstorNotFound( - stor_udid='stor_udid', vios_uuid='uuid') - self.assertRaises(p_exc.VolumeExtendFailed, self.vol_drv.extend_volume) - - @mock.patch('pypowervm.entities.Entry.uuid', - new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') - @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func') - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps') - def test_disconnect_volume(self, mock_find_maps, mock_gen_match_func, - mock_get_mgmt_partition, mock_entry_uuid): - # Mockups - mock_slot_mgr = mock.MagicMock() - - mock_vios = mock.Mock(uuid='uuid1') - mock_get_mgmt_partition.return_value = mock_vios - - mock_match_func = mock.Mock() - mock_gen_match_func.return_value = mock_match_func - mock_entry_uuid.return_value = 'uuid1' - # Invoke - self.vol_drv._disconnect_volume(mock_slot_mgr) - - # Validate - mock_gen_match_func.assert_called_once_with( - pvm_stg.VDisk, names=['fake_path']) - mock_find_maps.assert_called_once_with( - mock.ANY, - client_lpar_id='2BC123', match_func=mock_match_func) - - @mock.patch('os.path.exists') - def test_pre_live_migration_on_destination(self, mock_path_exists): - mock_path_exists.return_value = False - self.assertRaises( - p_exc.VolumePreMigrationFailed, - self.vol_drv.pre_live_migration_on_destination, mock.ANY) - - @mock.patch('nova_powervm.virt.powervm.volume.fileio.FileIOVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - def test_is_volume_on_vios(self, mock_vios_uuids): - mock_vios_uuids.return_value = ['uuid1'] - vol_found, vol_path = self.vol_drv.is_volume_on_vios( - mock.Mock(uuid='uuid2')) - self.assertFalse(vol_found) - self.assertIsNone(vol_path) diff --git a/nova_powervm/tests/virt/powervm/volume/test_gpfs.py b/nova_powervm/tests/virt/powervm/volume/test_gpfs.py deleted file mode 100644 index 3348f28f..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_gpfs.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm.volume import gpfs as v_drv - - -class TestGPFSVolumeAdapter(test_vol.TestVolumeAdapter): - """Tests the GPFSVolumeAdapter. NovaLink is a I/O host.""" - - def setUp(self): - super(TestGPFSVolumeAdapter, self).setUp() - - # Needed for the volume adapter - self.adpt = mock.Mock() - mock_inst = mock.MagicMock(uuid='2BC123') - - # Connection Info - mock_conn_info = {'data': {'device_path': '/gpfs/path'}} - - self.vol_drv = v_drv.GPFSVolumeAdapter( - self.adpt, 'host_uuid', mock_inst, mock_conn_info) - - def test_get_path(self): - self.assertEqual('/gpfs/path', self.vol_drv._get_path()) diff --git a/nova_powervm/tests/virt/powervm/volume/test_iscsi.py b/nova_powervm/tests/virt/powervm/volume/test_iscsi.py deleted file mode 100644 index 45561e81..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_iscsi.py +++ /dev/null @@ -1,669 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova import exception as nova_exc - -from nova_powervm import conf as cfg -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm.volume import iscsi - -from pypowervm import const as pvm_const -from pypowervm import exceptions as pvm_exc -from pypowervm.tasks import hdisk -from pypowervm.tests.tasks.util import load_file -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import storage as pvm_stor -from pypowervm.wrappers import virtual_io_server as pvm_vios - -CONF = cfg.CONF - -VIOS_FEED = 'fake_vios_feed.txt' - - -class TestISCSIAdapter(test_vol.TestVolumeAdapter): - """Tests the vSCSI Volume Connector Adapter. Single VIOS tests""" - - def setUp(self): - super(TestISCSIAdapter, self).setUp() - self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt - - self.vios_feed_resp = load_file(VIOS_FEED) - - self.feed = pvm_vios.VIOS.wrap(self.vios_feed_resp) - self.ft_fx = pvm_fx.FeedTaskFx(self.feed) - self.useFixture(self.ft_fx) - - self.adpt.read.return_value = self.vios_feed_resp - - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', - autospec=True) - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi_initiator', - autospec=True) - def init_vol_adpt(mock_initiator, mock_mgmt_part, mock_pvm_uuid, - mock_vios): - self.iqn = 'iqn.2016-08.com.foo:bar' - self.lun = 1 - self.host_ip = '10.0.0.1' - self.user = 'user' - self.password = 'password' - self.serial = 'f042c68a-c5a5-476a-ba34-2f6d43f4226c' - con_info = { - 'serial': self.serial, - 'driver_volume_type': 'iscsi', - 'connector': {}, - 'data': { - 'target_iqn': self.iqn, - 'target_lun': self.lun, - 'target_portal': self.host_ip, - 'auth_username': self.user, - 'auth_password': self.password, - 'volume_id': 'a_volume_id', - 'auth_method': 'CHAP' - }, - } - self.auth_method = 'CHAP' - multi_con_info = { - 'serial': self.serial, - 'driver_volume_type': 'iser', - 'connector': {'multipath': True}, - 'data': { - 'target_iqn': self.iqn, - 'target_lun': self.lun, - 'target_portal': self.host_ip, - 'auth_method': self.auth_method, - 'auth_username': self.user, - 'auth_password': self.password, - 'discovery_auth_method': self.auth_method, - 'discovery_auth_username': self.user, - 'discovery_auth_password': self.password, - 'target_iqns': [self.iqn], - 'target_luns': [self.lun], - 'target_portals': [self.host_ip], - 'volume_id': 'b_volume_id', - }, - } - mock_inst = mock.MagicMock() - mock_pvm_uuid.return_value = '1234' - mock_initiator.return_value = 'initiator iqn' - # The getter can just return the VIOS values (to remove a read - # that would otherwise need to be mocked). - mock_vios.getter.return_value = self.feed - single_path = iscsi.IscsiVolumeAdapter(self.adpt, 'host_uuid', - mock_inst, con_info) - multi_path = iscsi.IscsiVolumeAdapter(self.adpt, 'host_uuid', - mock_inst, multi_con_info) - return single_path, multi_path - self.vol_drv, self.multi_vol_drv = init_vol_adpt() - - # setup system_metadata tests - self.devname = "/dev/fake" - self.slot_mgr = mock.Mock() - self.slot_mgr.build_map.get_vscsi_slot.return_value = 62, 'the_lua' - - @mock.patch('nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter' - '._validate_vios_on_connection', new=mock.Mock()) - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.tasks.hdisk.lua_recovery', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume(self, mock_get_vm_id, mock_lua_recovery, - mock_build_map, mock_add_map, mock_discover): - # The mock return values - mock_lua_recovery.return_value = ( - hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') - mock_get_vm_id.return_value = '2' - mock_discover.return_value = '/dev/fake', 'fake_udid' - - def build_map_func(host_uuid, vios_w, lpar_uuid, pv, - lpar_slot_num=None, lua=None, target_name=None): - self.assertEqual('host_uuid', host_uuid) - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('1234', lpar_uuid) - self.assertIsInstance(pv, pvm_stor.PV) - self.assertEqual('_volume_id', pv.tag[1:]) - self.assertEqual(62, lpar_slot_num) - self.assertEqual('the_lua', lua) - return 'fake_map' - - mock_build_map.side_effect = build_map_func - # Run the method - self.vol_drv.connect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(2, mock_add_map.call_count) - self.assertEqual(2, self.ft_fx.patchers['update'].mock.call_count) - self.assertEqual(2, mock_build_map.call_count) - - calls = [mock.call(self.adpt, self.host_ip, self.user, self.password, - self.iqn, self.feed[0].uuid, lunid=self.lun, - multipath=False, iface_name='default', - discovery_auth=None, discovery_username=None, - auth='CHAP', discovery_password=None), - mock.call(self.adpt, self.host_ip, self.user, self.password, - self.iqn, self.feed[1].uuid, lunid=self.lun, - multipath=False, iface_name='default', - discovery_auth=None, discovery_username=None, - auth='CHAP', discovery_password=None)] - multi_calls = [ - mock.call(self.adpt, [self.host_ip], self.user, self.password, - [self.iqn], self.feed[0].uuid, lunid=[self.lun], - iface_name='iser', multipath=True, - auth=self.auth_method, discovery_auth=self.auth_method, - discovery_username=self.user, - discovery_password=self.password), - mock.call(self.adpt, [self.host_ip], self.user, self.password, - [self.iqn], self.feed[1].uuid, lunid=[self.lun], - iface_name='iser', multipath=True, - auth=self.auth_method, discovery_auth=self.auth_method, - discovery_username=self.user, - discovery_password=self.password)] - mock_discover.assert_has_calls(calls, any_order=True) - self.multi_vol_drv.connect_volume(self.slot_mgr) - mock_discover.assert_has_calls(multi_calls, any_order=True) - - @mock.patch('nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter' - '._validate_vios_on_connection', new=mock.Mock()) - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.tasks.hdisk.lua_recovery', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_noauth(self, mock_get_vm_id, mock_lua_recovery, - mock_build_map, - mock_add_map, mock_discover): - # The mock return values - mock_lua_recovery.return_value = ( - hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') - mock_get_vm_id.return_value = '2' - mock_discover.return_value = '/dev/fake', 'fake_udid' - - def build_map_func(host_uuid, vios_w, lpar_uuid, pv, - lpar_slot_num=None, lua=None, target_name=None): - self.assertEqual('host_uuid', host_uuid) - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('1234', lpar_uuid) - self.assertIsInstance(pv, pvm_stor.PV) - self.assertEqual('_volume_id', pv.tag[1:]) - self.assertEqual(62, lpar_slot_num) - self.assertEqual('the_lua', lua) - return 'fake_map' - - mock_build_map.side_effect = build_map_func - # connect without using CHAP authentication. - self.vol_drv.connection_info['data'].pop('auth_method') - mock_discover.return_value = '/dev/fake', 'fake_udid2' - mock_add_map.reset_mock() - mock_discover.reset_mock() - self.vol_drv.connect_volume(self.slot_mgr) - calls = [mock.call(self.adpt, self.host_ip, None, None, - self.iqn, self.feed[0].uuid, lunid=self.lun, - multipath=False, iface_name='default', - discovery_auth=None, discovery_username=None, - auth=None, discovery_password=None), - mock.call(self.adpt, self.host_ip, None, None, - self.iqn, self.feed[1].uuid, lunid=self.lun, - multipath=False, iface_name='default', - discovery_auth=None, discovery_username=None, - auth=None, discovery_password=None)] - mock_discover.assert_has_calls(calls, any_order=True) - - @mock.patch('nova_powervm.virt.powervm.volume.volume.VscsiVolumeAdapter' - '._validate_vios_on_connection') - @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.volume.driver.PowerVMVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_active_vios(self, mock_get_vm_id, mock_discover, - mock_vios_uuids, mock_build_map, - mock_add_map, mock_validate_vios): - # Mockups - mock_build_map.return_value = 'fake_map' - mock_get_vm_id.return_value = '2' - mock_add_map.return_value = None - mock_get_vm_id.return_value = 'partition_id' - mock_discover.return_value = '/dev/fake', 'fake_udid' - vios_ids = ['1300C76F-9814-4A4D-B1F0-5B69352A7DEA', - '7DBBE705-E4C4-4458-8223-3EBE07015CA9'] - mock_vios_uuids.return_value = vios_ids - - self.multi_vol_drv.connect_volume(self.slot_mgr) - self.assertEqual(2, mock_discover.call_count) - - # If the vios entries exists in the list - mock_discover.reset_mock() - mock_discover.return_value = '/dev/fake2', 'fake_udid2' - mock_vios_uuids.return_value = [vios_ids[0]] - self.multi_vol_drv.connect_volume(self.slot_mgr) - # Check if discover iscsi is called - self.assertEqual(1, mock_discover.call_count) - - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_discover_fail(self, mock_get_vm_id, mock_discover): - mock_get_vm_id.return_value = '2' - mock_discover.side_effect = pvm_exc.ISCSIDiscoveryFailed( - vios_uuid='fake_vios', status='fake_status') - - # Run the method - self.assertRaises(p_exc.VolumeAttachFailed, - self.vol_drv.connect_volume, self.slot_mgr) - - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_job_fail(self, mock_get_vm_id, mock_discover): - mock_get_vm_id.return_value = '2' - mock_discover.side_effect = pvm_exc.JobRequestFailed( - operation_name='ISCSIDiscovery', error='fake_err') - - # Run the method - self.assertRaises(p_exc.VolumeAttachFailed, - self.multi_vol_drv.connect_volume, self.slot_mgr) - - @mock.patch('pypowervm.tasks.partition.get_active_vioses', autospec=True) - @mock.patch('pypowervm.tasks.storage.rescan_vstor', autospec=True) - def test_extend_volume(self, mock_rescan, mock_active_vioses): - self.vol_drv._set_udid("vstor_uuid") - mock_vios = mock.Mock(uuid='fake_uuid') - # Test single vios - mock_active_vioses.return_value = [mock_vios] - self.vol_drv.extend_volume() - mock_rescan.assert_called_once_with(self.vol_drv.vios_uuids[0], - "vstor_uuid", adapter=self.adpt) - mock_rescan.side_effect = pvm_exc.JobRequestFailed( - operation_name='RescanVirtualDisk', error='fake_err') - self.assertRaises(p_exc.VolumeExtendFailed, self.vol_drv.extend_volume) - mock_rescan.side_effect = pvm_exc.VstorNotFound( - stor_udid='stor_udid', vios_uuid='uuid') - self.assertRaises(p_exc.VolumeExtendFailed, self.vol_drv.extend_volume) - - # Test multiple vios - mock_active_vioses.return_value = [mock_vios, mock_vios] - mock_rescan.reset_mock() - mock_rescan.side_effect = [pvm_exc.JobRequestFailed( - operation_name='RescanVirtualDisk', error='fake_err'), None] - self.assertRaises(p_exc.VolumeExtendFailed, self.vol_drv.extend_volume) - self.assertEqual(2, mock_rescan.call_count) - mock_rescan.reset_mock() - mock_rescan.side_effect = [None, pvm_exc.VstorNotFound( - stor_udid='stor_udid', vios_uuid='uuid')] - self.vol_drv.extend_volume() - self.assertEqual(2, mock_rescan.call_count) - - self.vol_drv._set_udid(None) - self.assertRaises(nova_exc.InvalidBDM, self.vol_drv.extend_volume) - - @mock.patch('nova_powervm.virt.powervm.volume.driver.PowerVMVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.hdisk.remove_iscsi', autospec=True) - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid', - autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_disconnect_on_active_vioses(self, mock_get_vm_id, - mock_remove_maps, - mock_hdisk_from_uuid, - mock_remove_iscsi, - mock_vios_uuids): - # The mock return values - mock_hdisk_from_uuid.return_value = 'device_name' - mock_get_vm_id.return_value = '2' - self.multi_vol_drv._set_udid('vstor_uuid') - mock_remove_maps.return_value = 'removed' - vios_ids = ['1300C76F-9814-4A4D-B1F0-5B69352A7DEA', - '7DBBE705-E4C4-4458-8223-3EBE07015CA9'] - mock_vios_uuids.return_value = vios_ids - - # Run the method - self.multi_vol_drv.disconnect_volume(self.slot_mgr) - self.assertEqual(2, mock_remove_iscsi.call_count) - self.assertEqual(2, mock_remove_maps.call_count) - - @mock.patch('nova_powervm.virt.powervm.volume.driver.PowerVMVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.hdisk.remove_iscsi', autospec=True) - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid', - autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_disconnect_on_single_vios(self, mock_get_vm_id, - mock_remove_maps, - mock_hdisk_from_uuid, - mock_remove_iscsi, - mock_vios_uuids): - # The mock return values - mock_hdisk_from_uuid.return_value = 'device_name' - mock_get_vm_id.return_value = '2' - self.multi_vol_drv._set_udid('vstor_uuid') - mock_remove_maps.return_value = 'removed' - mock_vios_uuids.return_value = ['1300C76F-9814-4A4D-B1F0-5B69352A7DEA'] - - # Run the method - self.multi_vol_drv.disconnect_volume(self.slot_mgr) - self.assertEqual(1, mock_remove_iscsi.call_count) - self.assertEqual(1, mock_remove_maps.call_count) - - @mock.patch('pypowervm.tasks.hdisk.remove_iscsi', autospec=True) - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid', - autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_disconnect_volume(self, mock_get_vm_id, mock_remove_maps, - mock_hdisk_from_uuid, mock_remove_iscsi): - # The mock return values - mock_hdisk_from_uuid.return_value = 'device_name' - mock_get_vm_id.return_value = '2' - self.vol_drv._set_udid('vstor_uuid') - - def validate_remove_maps(vios_w, vm_uuid, match_func): - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('2', vm_uuid) - return 'removed' - mock_remove_maps.side_effect = validate_remove_maps - - # Run the method - self.vol_drv.disconnect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(2, mock_remove_maps.call_count) - self.assertEqual(2, self.ft_fx.patchers['update'].mock.call_count) - calls = [mock.call(self.adpt, self.iqn, self.feed[0].uuid, - lun=self.lun, iface_name='default', - portal=self.host_ip, multipath=False), - mock.call(self.adpt, self.iqn, self.feed[1].uuid, - lun=self.lun, iface_name='default', - portal=self.host_ip, multipath=False)] - multi_calls = [mock.call(self.adpt, [self.iqn], self.feed[0].uuid, - lun=[self.lun], iface_name='iser', - portal=[self.host_ip], multipath=True), - mock.call(self.adpt, [self.iqn], self.feed[1].uuid, - lun=[self.lun], iface_name='iser', - portal=[self.host_ip], multipath=True)] - mock_remove_iscsi.assert_has_calls(calls, any_order=True) - mock_remove_iscsi.reset_mock() - self.multi_vol_drv._set_udid('vstor_uuid') - self.multi_vol_drv.disconnect_volume(self.slot_mgr) - mock_remove_iscsi.assert_has_calls(multi_calls, any_order=True) - - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id', autospec=True) - @mock.patch('pypowervm.tasks.hdisk.remove_iscsi', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True) - def test_disconnect_volume_no_devname( - self, mock_remove_maps, mock_remove_iscsi, mock_get_vm_id, - mock_hdisk_from_uuid): - - # Ensures that if device_name not found, then mappings are not - # removed and disconnect return False. - self.vol_drv._set_udid("vstor_uuid") - mock_hdisk_from_uuid.return_value = None - mock_get_vm_id.return_value = '2' - - # Run the method - status = self.vol_drv.disconnect_volume(self.slot_mgr) - - # In this case no disconnect should happen - # mock_remove_maps.assert_not_called() - self.assertEqual(0, mock_remove_maps.call_count) - self.assertEqual(0, mock_remove_iscsi.call_count) - mock_hdisk_from_uuid.assert_called_with('vstor_uuid') - self.assertFalse(status) - - # Ensures that if UDID not found, then mappings are not - # removed and disconnect return False. - self.vol_drv._set_udid(None) - mock_hdisk_from_uuid.reset_mock() - - # Run the method - status = self.vol_drv.disconnect_volume(self.slot_mgr) - - # In this case no disconnect should happen - self.assertEqual(0, mock_remove_maps.call_count) - self.assertEqual(0, mock_remove_iscsi.call_count) - self.assertEqual(0, mock_hdisk_from_uuid.call_count) - self.assertFalse(status) - - def test_min_xags(self): - xags = self.vol_drv.min_xags() - self.assertEqual(1, len(xags)) - self.assertIn(pvm_const.XAG.VIO_SMAP, xags) - - def test_vol_type(self): - self.assertEqual('iscsi', self.vol_drv.vol_type()) - - @mock.patch('pypowervm.tasks.partition.get_active_vioses') - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi_initiator') - def test_get_iscsi_initiators(self, mock_iscsi_init, mock_active_vioses): - # Set up mocks and clear out data that may have been set by other - # tests - iscsi._ISCSI_INITIATORS = dict() - mock_iscsi_init.return_value = 'test_initiator' - - vios_ids = ['1300C76F-9814-4A4D-B1F0-5B69352A7DEA', - '7DBBE705-E4C4-4458-8223-3EBE07015CA9'] - vios0 = mock.Mock(uuid=vios_ids[0]) - vios1 = mock.Mock(uuid=vios_ids[1]) - mock_active_vioses.return_value = [vios0, vios1] - - expected_output = { - '1300C76F-9814-4A4D-B1F0-5B69352A7DEA': 'test_initiator', - '7DBBE705-E4C4-4458-8223-3EBE07015CA9': 'test_initiator' - } - - self.assertEqual(expected_output, - iscsi.get_iscsi_initiators(self.adpt, vios_ids)) - - # Make sure it gets set properly in the backend - self.assertEqual(expected_output, iscsi._ISCSI_INITIATORS) - self.assertEqual(mock_active_vioses.call_count, 0) - self.assertEqual(mock_iscsi_init.call_count, 2) - - # Invoke again, make sure it doesn't call down to the mgmt part again - mock_iscsi_init.reset_mock() - self.assertEqual(expected_output, - iscsi.get_iscsi_initiators(self.adpt, vios_ids)) - self.assertEqual(mock_active_vioses.call_count, 0) - self.assertEqual(mock_iscsi_init.call_count, 0) - - # Invoke iscsi.get_iscsi_initiators with vios_id=None - iscsi._ISCSI_INITIATORS = dict() - mock_iscsi_init.reset_mock() - self.assertEqual(expected_output, - iscsi.get_iscsi_initiators(self.adpt, None)) - self.assertEqual(expected_output, iscsi._ISCSI_INITIATORS) - self.assertEqual(mock_active_vioses.call_count, 1) - self.assertEqual(mock_iscsi_init.call_count, 2) - - # Invoke again with vios_id=None to ensure get_active_vioses, - # discover_iscsi_initiator is not called - mock_iscsi_init.reset_mock() - mock_active_vioses.reset_mock() - self.assertEqual(expected_output, - iscsi.get_iscsi_initiators(self.adpt, None)) - self.assertEqual(mock_active_vioses.call_count, 0) - self.assertEqual(mock_iscsi_init.call_count, 0) - - # Invoke iscsi.get_iscsi_initiators with discover_iscsi_initiator() - # raises ISCSIDiscoveryFailed exception - iscsi._ISCSI_INITIATORS = dict() - mock_iscsi_init.reset_mock() - mock_iscsi_init.side_effect = pvm_exc.ISCSIDiscoveryFailed( - vios_uuid='fake_vios_uid', status="fake_status") - self.assertEqual(dict(), - iscsi.get_iscsi_initiators(self.adpt, vios_ids)) - self.assertEqual(dict(), iscsi._ISCSI_INITIATORS) - - # Invoke iscsi.get_iscsi_initiators with discover_iscsi_initiator() - # raises JobRequestFailed exception - iscsi._ISCSI_INITIATORS = dict() - mock_iscsi_init.reset_mock() - mock_iscsi_init.side_effect = pvm_exc.JobRequestFailed( - operation_name='fake_operation_name', error="fake_error") - self.assertEqual(dict(), - iscsi.get_iscsi_initiators(self.adpt, vios_ids)) - self.assertEqual(dict(), iscsi._ISCSI_INITIATORS) - - def test_get_iscsi_conn_props(self): - # Get the conn props with auth enabled - vios_w = mock.MagicMock() - props = self.vol_drv._get_iscsi_conn_props(vios_w, auth=True) - expected_props = { - 'target_iqn': self.iqn, - 'target_lun': self.lun, - 'target_portal': self.host_ip, - 'auth_username': self.user, - 'auth_password': self.password, - 'auth_method': 'CHAP' - } - self.assertItemsEqual(expected_props, props) - - # Check with multipath enabled - mprops = self.multi_vol_drv._get_iscsi_conn_props(vios_w, auth=True) - multi_props = { - 'discovery_auth_method': self.auth_method, - 'discovery_auth_username': self.user, - 'discovery_auth_password': self.password, - 'target_iqns': [self.iqn], - 'target_luns': [self.lun], - 'target_portals': [self.host_ip] - } - multi_props.update(expected_props) - self.assertItemsEqual(multi_props, mprops) - - # Call without auth props - props = self.vol_drv._get_iscsi_conn_props(vios_w, auth=False) - expected_props.pop('auth_username') - expected_props.pop('auth_password') - expected_props.pop('auth_method') - self.assertItemsEqual(expected_props, props) - - # KeyError - self.vol_drv.connection_info['data'].pop('target_iqn') - props = self.vol_drv._get_iscsi_conn_props(vios_w, auth=False) - self.assertIsNone(props) - - @mock.patch('nova_powervm.virt.powervm.volume.driver.PowerVMVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.hdisk.discover_iscsi') - @mock.patch('pypowervm.tasks.storage.find_stale_lpars') - def test_pre_live_migration(self, mock_fsl, mock_discover, - mock_vios_uuids): - # The mock return values - vios_ids = ['1300C76F-9814-4A4D-B1F0-5B69352A7DEA', - '7DBBE705-E4C4-4458-8223-3EBE07015CA9'] - mock_vios_uuids.return_value = vios_ids - - mock_fsl.return_value = [] - mock_discover.return_value = ( - 'devname', 'udid') - - # Run the method - migrate_data = {} - self.vol_drv.pre_live_migration_on_destination(migrate_data) - volume_key = 'vscsi-' + self.serial - self.assertEqual(migrate_data, {volume_key: 'udid'}) - - # Test exception path - mock_discover.return_value = ( - 'devname', None) - - # Run the method - self.assertRaises(p_exc.VolumePreMigrationFailed, - self.vol_drv.pre_live_migration_on_destination, {}) - - # Test when volume discover on a single vios - mock_discover.reset_mock() - mock_discover.side_effect = [('devname', 'udid'), ('devname', None)] - self.vol_drv.pre_live_migration_on_destination(migrate_data) - self.assertEqual(migrate_data, {volume_key: 'udid'}) - self.assertEqual(2, mock_discover.call_count) - - # Test with bad vios_uuid - mock_discover.reset_mock() - mock_vios_uuids.return_value = ['fake_vios'] - self.assertRaises(p_exc.VolumePreMigrationFailed, - self.vol_drv.pre_live_migration_on_destination, {}) - mock_discover.assert_not_called() - - @mock.patch('nova_powervm.virt.powervm.volume.volume.VscsiVolumeAdapter' - '._set_udid', autospec=True) - def test_post_live_migration_at_destination(self, mock_set_udid): - volume_key = 'vscsi-' + self.serial - mig_vol_stor = {volume_key: 'udid'} - self.vol_drv.post_live_migration_at_destination(mig_vol_stor) - mock_set_udid.assert_called_with('udid') - - def test_post_live_migr_source(self): - - # Bad path. volume id not found - bad_data = {'vscsi-BAD': 'udid1'} - # good path. - good_data = {'vscsi-' + self.serial: 'udid1'} - - with mock.patch.object(self.vol_drv, '_cleanup_volume') as mock_cln: - self.vol_drv.post_live_migration_at_source(bad_data) - mock_cln.assert_called_once_with(None) - - mock_cln.reset_mock() - self.vol_drv.post_live_migration_at_source(good_data) - mock_cln.assert_called_once_with('udid1') - - @mock.patch('nova_powervm.virt.powervm.volume.driver.PowerVMVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - def test_is_volume_on_vios(self, mock_vios_uuids): - # The mock return values - mock_vios_uuids.return_value = ['fake_vios1', 'fake_vios2'] - - with mock.patch.object(self.vol_drv, - '_discover_volume_on_vios') as mock_discover: - found, udid = self.vol_drv.is_volume_on_vios(self.feed[0]) - mock_discover.assert_not_called() - self.assertFalse(found) - self.assertIsNone(udid) - - mock_discover.reset_mock() - mock_discover.return_value = 'device1', 'udid1' - vios_ids = ['1300C76F-9814-4A4D-B1F0-5B69352A7DEA', - '7DBBE705-E4C4-4458-8223-3EBE07015CA9'] - mock_vios_uuids.return_value = vios_ids - - found, udid = self.vol_drv.is_volume_on_vios(self.feed[0]) - mock_discover.assert_called_once_with(self.feed[0]) - self.assertTrue(found) - self.assertEqual(udid, 'udid1') - - mock_discover.reset_mock() - mock_discover.return_value = None, 'udid1' - found, udid = self.vol_drv.is_volume_on_vios(self.feed[0]) - self.assertFalse(found) - self.assertEqual(udid, 'udid1') - - mock_discover.reset_mock() - mock_discover.return_value = 'device1', None - found, udid = self.vol_drv.is_volume_on_vios(self.feed[0]) - self.assertFalse(found) - self.assertIsNone(udid) diff --git a/nova_powervm/tests/virt/powervm/volume/test_local.py b/nova_powervm/tests/virt/powervm/volume/test_local.py deleted file mode 100644 index 5b2eada5..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_local.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm.volume import local as v_drv - - -class TestLocalVolumeAdapter(test_vol.TestVolumeAdapter): - """Tests the LocalVolumeAdapter. NovaLink is a I/O host.""" - - def setUp(self): - super(TestLocalVolumeAdapter, self).setUp() - - # Needed for the volume adapter - self.adpt = mock.Mock() - mock_inst = mock.MagicMock(uuid='2BC123') - - # Connection Info - mock_conn_info = {'data': {'device_path': '/local/path'}} - - self.vol_drv = v_drv.LocalVolumeAdapter( - self.adpt, 'host_uuid', mock_inst, mock_conn_info) - - def test_get_path(self): - self.assertEqual('/local/path', self.vol_drv._get_path()) diff --git a/nova_powervm/tests/virt/powervm/volume/test_nfs.py b/nova_powervm/tests/virt/powervm/volume/test_nfs.py deleted file mode 100644 index 38ec69cd..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_nfs.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm.volume import nfs as v_drv - - -class TestNFSVolumeAdapter(test_vol.TestVolumeAdapter): - """Tests the NFSVolumeAdapter. NovaLink is a I/O host.""" - - def setUp(self): - super(TestNFSVolumeAdapter, self).setUp() - - # Needed for the volume adapter - self.adpt = mock.Mock() - mock_inst = mock.MagicMock(uuid='2BC123') - - # Connection Info - mock_conn_info = {'data': {'export': '/nfs', 'name': 'path'}} - - self.vol_drv = v_drv.NFSVolumeAdapter( - self.adpt, 'host_uuid', mock_inst, mock_conn_info) - - def test_get_path(self): - self.assertEqual('/nfs/path', self.vol_drv._get_path()) diff --git a/nova_powervm/tests/virt/powervm/volume/test_npiv.py b/nova_powervm/tests/virt/powervm/volume/test_npiv.py deleted file mode 100644 index 46788eb6..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_npiv.py +++ /dev/null @@ -1,694 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova.compute import task_states -from oslo_serialization import jsonutils -from pypowervm import const as pvm_const -from pypowervm.tests.tasks import util as tju -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.tests.test_utils import pvmhttp -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm import conf as cfg -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm import exception as exc -from nova_powervm.virt.powervm.volume import npiv - -VIOS_FEED = 'fake_vios_feed2.txt' -VIOS_FEED_2 = 'fake_vios_feed.txt' - -CONF = cfg.CONF - - -class TestNPIVAdapter(test_vol.TestVolumeAdapter): - """Tests the NPIV Volume Connector Adapter.""" - - def setUp(self): - super(TestNPIVAdapter, self).setUp() - - self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt - - def resp(file_name): - return pvmhttp.load_pvm_resp( - file_name, adapter=self.adpt).get_response() - self.vios_feed_resp = resp(VIOS_FEED) - self.wwpn1 = '21000024FF649104' - self.wwpn2 = '21000024FF649107' - self.vios_uuid = '3443DB77-AED1-47ED-9AA5-3DB9C6CF7089' - self.slot_mgr = mock.Mock() - - # Set up the transaction manager - feed = pvm_vios.VIOS.wrap(self.vios_feed_resp) - self.ft_fx = pvm_fx.FeedTaskFx(feed) - self.useFixture(self.ft_fx) - - # Set up the mocks for the internal volume driver - name = 'nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - self.mock_port_count_p = mock.patch(name + '_ports_per_fabric') - self.mock_port_count = self.mock_port_count_p.start() - self.mock_port_count.return_value = 1 - - self.mock_fabric_names_p = mock.patch(name + '_fabric_names') - self.mock_fabric_names = self.mock_fabric_names_p.start() - self.mock_fabric_names.return_value = ['A'] - - self.mock_fabric_ports_p = mock.patch(name + '_fabric_ports') - self.mock_fabric_ports = self.mock_fabric_ports_p.start() - self.mock_fabric_ports.return_value = [self.wwpn1, self.wwpn2] - - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.getter') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - def init_vol_adpt(mock_pvm_uuid, mock_getter): - con_info = {'serial': 'id', - 'data': {'initiator_target_map': {'i1': ['t1'], - 'i2': ['t2', 't3']}, - 'target_lun': '1'}} - mock_inst = mock.MagicMock() - mock_pvm_uuid.return_value = '1234' - - # The getter can just return the VIOS values (to remove a read - # that would otherwise need to be mocked). - mock_getter.return_value = feed - - return npiv.NPIVVolumeAdapter(self.adpt, 'host_uuid', mock_inst, - con_info) - self.vol_drv = init_vol_adpt() - - def tearDown(self): - super(TestNPIVAdapter, self).tearDown() - - self.mock_port_count_p.stop() - self.mock_fabric_names_p.stop() - self.mock_fabric_ports_p.stop() - - @mock.patch('pypowervm.tasks.vfc_mapper.add_map') - def test_connect_volume(self, mock_add_map): - # Mock - self._basic_system_metadata(npiv.FS_UNMAPPED) - self.slot_mgr.build_map.get_vfc_slots = mock.Mock( - return_value=['62']) - - def add_map(vios_w, host_uuid, vm_uuid, port_map, **kwargs): - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('host_uuid', host_uuid) - self.assertEqual('1234', vm_uuid) - self.assertEqual(('21000024FF649104', 'AA BB'), port_map) - return 'good' - mock_add_map.side_effect = add_map - - # Test connect volume - self.vol_drv.connect_volume(self.slot_mgr) - - # Verify that the appropriate connections were made. - self.assertEqual(1, mock_add_map.call_count) - mock_add_map.assert_called_once_with( - mock.ANY, 'host_uuid', '1234', ('21000024FF649104', 'AA BB'), - lpar_slot_num='62') - self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count) - self.assertEqual(npiv.FS_INST_MAPPED, - self.vol_drv._get_fabric_state('A')) - - # Verify the correct post execute methods were added to the feed task - self.assertEqual('fab_slot_A_id', - self.vol_drv.stg_ftsk._post_exec[0].name) - self.assertEqual('fab_A_id', - self.vol_drv.stg_ftsk._post_exec[1].name) - - def test_connect_volume_not_valid(self): - """Validates that a connect will fail if in a bad state.""" - self.mock_inst_wrap.can_modify_io.return_value = False, 'Invalid I/O' - self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.connect_volume, - self.slot_mgr) - - @mock.patch('pypowervm.tasks.vfc_mapper.add_map') - def test_connect_volume_inst_mapped(self, mock_add_map): - """Test if already connected to an instance, don't do anything""" - self._basic_system_metadata(npiv.FS_INST_MAPPED) - mock_add_map.return_value = None - self.slot_mgr.build_map.get_vfc_slots = mock.Mock( - return_value=['62']) - - # Test subsequent connect volume calls when the fabric is mapped with - # inst partition - self.vol_drv.connect_volume(self.slot_mgr) - - # Verify - self.assertEqual(1, mock_add_map.call_count) - self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count) - - # Check the fabric state remains mapped to instance - self.assertEqual(npiv.FS_INST_MAPPED, - self.vol_drv._get_fabric_state('A')) - - @mock.patch('pypowervm.tasks.vfc_mapper.find_vios_for_wwpn') - @mock.patch('pypowervm.tasks.vfc_mapper.derive_npiv_map') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_set_fabric_meta') - def test_ensure_phys_ports(self, set_fabric_meta, derive_npiv_map, - find_vios_for_wwpn): - """Test that an npiv mapping gets rebuilt for stale wwpns""" - - self._basic_system_metadata(npiv.FS_INST_MAPPED, "stale-wwpn") - port_maps = [('stale-wwpn', 'AA BB')] - vios_wraps = [mock.MagicMock()] - fab = 'A' - - # First the 'good' case where output should equal input - port_maps = [(self.wwpn1, 'AA BB')] - find_vios_for_wwpn.return_value = (vios_wraps[0], mock.MagicMock()) - maps = self.vol_drv._ensure_phys_ports_for_system(port_maps, - vios_wraps, fab) - self.assertEqual(port_maps, maps) - find_vios_for_wwpn.assert_called_once() - derive_npiv_map.assert_not_called() - - # Now test the stale case - rebuild - port_maps = [('stale-wwpn', 'AA BB')] - expected_map = [(self.wwpn1, port_maps[0][1])] - derive_npiv_map.return_value = expected_map - find_vios_for_wwpn.return_value = (None, None) - maps = self.vol_drv._ensure_phys_ports_for_system(port_maps, - vios_wraps, fab) - self.assertEqual(expected_map, maps) - derive_npiv_map.assert_called_once() - - def _basic_system_metadata(self, fabric_state, p_wwpn='21000024FF649104'): - meta_fb_key = self.vol_drv._sys_meta_fabric_key('A') - meta_fb_map = '%s,AA,BB' % p_wwpn - meta_st_key = self.vol_drv._sys_fabric_state_key('A') - self.vol_drv.instance.system_metadata = {meta_st_key: fabric_state, - meta_fb_key: meta_fb_map} - - def test_extend_volume(self): - # Ensure the method is implemented - self.vol_drv.extend_volume() - - @mock.patch('pypowervm.tasks.vfc_mapper.remove_maps') - @mock.patch('pypowervm.tasks.vfc_mapper.find_vios_for_vfc_wwpns') - def test_disconnect_volume(self, mock_find_vios, mock_remove_maps): - # Mock Data - self.vol_drv.instance.task_state = 'deleting' - - meta_key = self.vol_drv._sys_meta_fabric_key('A') - meta_map = '21000024FF649104,AA,BB,21000024FF649105,CC,DD' - self.vol_drv.instance.system_metadata = {meta_key: meta_map} - mock_find_vios.return_value = (mock.Mock(uuid=self.vios_uuid),) - - # Invoke - self.vol_drv.disconnect_volume(self.slot_mgr) - - # Two maps removed on one VIOS - self.assertEqual(2, mock_remove_maps.call_count) - self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count) - - @mock.patch('pypowervm.tasks.vfc_mapper.remove_maps') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_meta') - def test_disconnect_volume_no_fabric_meta(self, mock_get_fabric_meta, - mock_remove_maps): - # Mock Data. The fabric_names is set to A by setUp. - # Force a None return - self.vol_drv.instance.task_state = 'deleting' - mock_get_fabric_meta.return_value = [] - - # Invoke - self.vol_drv.disconnect_volume(self.slot_mgr) - - # No mappings should have been removed - self.assertFalse(mock_remove_maps.called) - - def test_disconnect_volume_not_valid(self): - """Validates that a disconnect will fail if in a bad state.""" - self.mock_inst_wrap.can_modify_io.return_value = False, 'Bleh' - self.assertRaises(exc.VolumeDetachFailed, - self.vol_drv.disconnect_volume, self.slot_mgr) - - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_fabric_names') - def test_disconnect_volume_not_on_same_host(self, mock_names): - """Validates a disconnect still removes mapping when host's differ.""" - self.vol_drv.instance.task_state = None - self.vol_drv.instance.host = 'not_host_in_conf' - - self.vol_drv.disconnect_volume(self.slot_mgr) - mock_names.assert_called_once() - - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_fabric_names') - def test_disconnect_volume_spawning_not_on_same_host(self, mock_names): - """Validates disconnect when instance is spawning on another host.""" - self.vol_drv.instance.task_state = 'spawning' - self.vol_drv.instance.host = 'not_host_in_conf' - - self.vol_drv.disconnect_volume(self.slot_mgr) - mock_names.assert_called_once() - - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_remove_maps_for_fabric') - def test_disconnect_volume_no_op(self, mock_remove_maps): - """Tests that when the task state is not set, connections are left.""" - # Invoke - self.vol_drv.instance.task_state = None - self.vol_drv.instance.host = None - - self.vol_drv.disconnect_volume(self.slot_mgr) - - # Verify - self.assertEqual(0, mock_remove_maps.call_count) - - def test_disconnect_volume_no_op_other_state(self): - """Tests that the deletion doesn't go through on certain states.""" - self.vol_drv.instance.task_state = task_states.RESUMING - self.vol_drv.instance.host = CONF.host - - # Invoke - self.vol_drv.disconnect_volume(self.slot_mgr) - self.assertEqual(0, self.adpt.read.call_count) - - def test_connect_volume_no_map(self): - """Tests that if the VFC Mapping exists, another is not added.""" - # Mock Data - self.vol_drv._fabric_names.return_value = {} - self.vol_drv.connection_info = {'data': {'initiator_target_map': - {'a': None, 'b': None}, - 'volume_id': 'vid'}} - - mock_mapping = mock.MagicMock() - mock_mapping.client_adapter.wwpns = {'a', 'b'} - - mock_vios = mock.MagicMock() - mock_vios.vfc_mappings = [mock_mapping] - - # Invoke - self.vol_drv.connect_volume(self.slot_mgr) - - def test_min_xags(self): - xags = self.vol_drv.min_xags() - self.assertEqual(2, len(xags)) - self.assertIn(pvm_const.XAG.VIO_STOR, xags) - self.assertIn(pvm_const.XAG.VIO_FMAP, xags) - - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_meta') - def test_is_initial_wwpn(self, mock_fabric_meta): - # The deleting state is for roll back on spawn. Migrating is a - # scenario where you can't be creating new wwpns - mock_fabric_meta.return_value = [('21000024FF649104', 'virt1 virt2')] - bad_states = [task_states.DELETING, task_states.MIGRATING] - for state in bad_states: - self.vol_drv.instance.task_state = state - self.assertFalse(self.vol_drv._is_initial_wwpn( - npiv.FS_UNMAPPED, 'a')) - - # Task state should still be bad. - self.assertFalse(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a')) - - # Set a good task state, but fails due to the WWPNs already being - # hosted - self.vol_drv.instance.task_state = task_states.NETWORKING - self.assertFalse(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a')) - - # Validate that having no fabric metadata returns that this is an - # initial wwpn - mock_fabric_meta.return_value = [] - self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a')) - - # Validate that has fabric metadata of a different host, and therefore - # is still a valid initial wwpn. It is initial because it simulates - # a reschedule on a new host. - mock_fabric_meta.return_value = [('BAD_WWPN', 'virt1 virt2')] - self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a')) - - # And now no task state. - self.vol_drv.instance.task_state = None - self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a')) - - def test_is_migration_wwpn(self): - inst = self.vol_drv.instance - - # Migrating on different host - inst.task_state = task_states.MIGRATING - inst.host = 'Not Correct Host' - self.assertTrue(self.vol_drv._is_migration_wwpn(npiv.FS_INST_MAPPED)) - - # Try if the instance isn't mapped - self.assertFalse(self.vol_drv._is_migration_wwpn(npiv.FS_UNMAPPED)) - - # Simulate a rollback on the target host from a live migration failure - inst.task_state = None - self.assertTrue(self.vol_drv._is_migration_wwpn(npiv.FS_INST_MAPPED)) - - # Mapped but on same host - inst.task_state = task_states.MIGRATING - inst.host = CONF.host - self.assertFalse(self.vol_drv._is_migration_wwpn(npiv.FS_INST_MAPPED)) - - @mock.patch('pypowervm.tasks.vfc_mapper.derive_npiv_map') - def test_configure_wwpns_for_migration(self, mock_derive): - # Mock out the fabric - meta_fb_key = self.vol_drv._sys_meta_fabric_key('A') - meta_fb_map = '21000024FF649104,AA,BB,21000024FF649105,CC,DD' - self.vol_drv.instance.system_metadata = {meta_fb_key: meta_fb_map} - - # Mock out what the derive returns - expected_map = [('21000024FF649104', 'BB AA'), - ('21000024FF649105', 'DD CC')] - mock_derive.return_value = expected_map - - # Invoke - resp_maps = self.vol_drv._configure_wwpns_for_migration('A') - - # Make sure the updated maps are returned - expected = [('21000024FF649104', 'BB AA'), - ('21000024FF649105', 'DD CC')] - self.assertEqual(expected, resp_maps) - mock_derive.assert_called_with( - mock.ANY, ['21000024FF649104', '21000024FF649107'], - ['BB', 'AA', 'DD', 'CC']) - - @mock.patch('pypowervm.tasks.vfc_mapper.derive_npiv_map') - def test_configure_wwpns_for_migration_existing(self, mock_derive): - """Validates nothing is done if WWPNs are already flipped.""" - # Mock out the fabric - meta_fb_key = self.vol_drv._sys_meta_fabric_key('A') - meta_fb_map = '21000024FF649104,C05076079CFF0FA0,C05076079CFF0FA1' - meta_fb_st_key = self.vol_drv._sys_fabric_state_key('A') - meta_fb_st_val = npiv.FS_MIGRATING - self.vol_drv.instance.system_metadata = { - meta_fb_key: meta_fb_map, meta_fb_st_key: meta_fb_st_val} - - # Invoke - resp_maps = self.vol_drv._configure_wwpns_for_migration('A') - - # Make sure that the order of the client WWPNs is not changed. - expected = [('21000024FF649104', 'C05076079CFF0FA0 C05076079CFF0FA1')] - self.assertEqual(expected, resp_maps) - self.assertFalse(mock_derive.called) - - @mock.patch('pypowervm.tasks.vfc_mapper.build_wwpn_pair') - @mock.patch('pypowervm.tasks.vfc_mapper.derive_npiv_map') - def test_wwpns(self, mock_derive, mock_build_pair): - """Tests that new WWPNs get generated properly.""" - # Mock Data - mock_derive.return_value = [('21000024FF649104', 'AA BB'), - ('21000024FF649105', 'CC DD')] - self.adpt.read.return_value = self.vios_feed_resp - - meta_key = self.vol_drv._sys_meta_fabric_key('A') - self.vol_drv.instance.system_metadata = {meta_key: None} - - # Invoke - wwpns = self.vol_drv.wwpns() - - # Check - self.assertListEqual(['AA', 'CC'], wwpns) - self.assertEqual('21000024FF649104,AA,BB,21000024FF649105,CC,DD', - self.vol_drv.instance.system_metadata[meta_key]) - self.assertEqual(1, mock_derive.call_count) - self.assertTrue(self.vol_drv.instance.save.called) - - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_state') - def test_wwpns_on_sys_meta(self, mock_fabric_state): - """Tests that previously stored WWPNs are returned.""" - # Mock - mock_fabric_state.return_value = npiv.FS_INST_MAPPED - self.vol_drv.instance.host = CONF.host - self.vol_drv.instance.system_metadata = { - self.vol_drv._sys_meta_fabric_key('A'): 'phys1,a,b,phys2,c,d'} - - # Invoke and Verify - self.assertListEqual(['a', 'c'], self.vol_drv.wwpns()) - - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_configure_wwpns_for_migration') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_is_migration_wwpn') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_is_initial_wwpn') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_state') - def test_wwpns_for_migration(self, mock_fabric_state, mock_initial, - mock_migration, mock_configure): - """Tests that wwpns for migration are generated properly.""" - # Mock - mock_fabric_state.return_value = npiv.FS_INST_MAPPED - mock_initial.return_value = False - mock_migration.return_value = True - mock_configure.return_value = [('phys1', 'a b'), ('phys2', 'c d')] - self.vol_drv.stg_ftsk = mock.MagicMock() - - # Invoke and Verify - self.assertListEqual(['a', 'c'], self.vol_drv.wwpns()) - - # Verify that on migration, the WWPNs are reversed. - self.assertEqual(1, self.vol_drv.stg_ftsk.feed.reverse.call_count) - - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_state') - def test_wwpns_bad_task_state(self, mock_fabric_state): - """Tests behavior with a bad task state.""" - # Mock - mock_fabric_state.return_value = npiv.FS_UNMAPPED - self.vol_drv.instance.system_metadata = { - self.vol_drv._sys_meta_fabric_key('A'): 'phys1,a,b,phys2,c,d'} - - # Invoke and Verify - for state in [task_states.DELETING, task_states.MIGRATING]: - self.vol_drv.instance.task_state = state - self.assertListEqual(['a', 'c'], self.vol_drv.wwpns()) - - @mock.patch('pypowervm.tasks.vfc_mapper.find_vios_for_vfc_wwpns') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_set_fabric_meta') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_meta') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_fabric_names') - def test_post_live_migration_at_destination( - self, mock_fabric_names, mock_get_fabric_meta, - mock_set_fabric_meta, mock_find_wwpns): - mock_fabric_names.return_value = ['A', 'B'] - mock_get_fabric_meta.side_effect = [ - [('S1', 'AA BB'), ('S2', 'CC DD')], - [('S3', 'EE FF')]] - - # This represents the new physical WWPNs on the target server side. - mock_find_wwpns.side_effect = [ - (None, mock.Mock(backing_port=mock.Mock(wwpn='T1'))), - (None, mock.Mock(backing_port=mock.Mock(wwpn='T2'))), - (None, mock.Mock(backing_port=mock.Mock(wwpn='T3')))] - - # Execute the test - mig_vol_stor = {} - self.vol_drv.post_live_migration_at_destination(mig_vol_stor) - - # Client WWPNs should be flipped and the new physical WWPNs should be - # associated with them. - mock_set_fabric_meta.assert_any_call( - 'A', [('T1', 'BB AA'), ('T2', 'DD CC')]) - mock_set_fabric_meta.assert_any_call( - 'B', [('T3', 'FF EE')]) - - # Invoke a second time. Should not 're-flip' or even call set. - mock_set_fabric_meta.reset_mock() - self.vol_drv.post_live_migration_at_destination(mig_vol_stor) - self.assertFalse(mock_set_fabric_meta.called) - - @mock.patch('pypowervm.tasks.vfc_mapper.find_vios_for_vfc_wwpns') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_meta') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_fabric_names') - def test_pre_live_migration_on_source( - self, mock_fabric_names, mock_get_fabric_meta, - mock_find_vios_for_vfc_wwpns): - mock_fabric_names.return_value = ['A', 'B'] - mock_get_fabric_meta.side_effect = [ - [('11', 'AA BB'), ('22', 'CC DD')], - [('33', 'EE FF')]] - - vios_wraps = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FEED)) - vios1_w = vios_wraps[0] - - def mock_client_adpt(slot): - return mock.Mock(client_adapter=mock.Mock(lpar_slot_num=slot)) - - mock_find_vios_for_vfc_wwpns.side_effect = [ - (vios1_w, mock_client_adpt(1)), (vios1_w, mock_client_adpt(2)), - (vios1_w, mock_client_adpt(3))] - - # Execute the test - mig_data = {} - self.vol_drv.pre_live_migration_on_source(mig_data) - - self.assertEqual('[1, 2]', mig_data.get('src_npiv_fabric_slots_A')) - self.assertEqual('[3]', mig_data.get('src_npiv_fabric_slots_B')) - vios_peer_slots = jsonutils.loads(mig_data.get('src_vios_peer_slots')) - self.assertItemsEqual([[1, 2, 3]], vios_peer_slots) - # Ensure only string data is placed in the dict. - for key in mig_data: - self.assertEqual(str, type(mig_data[key])) - - @mock.patch('pypowervm.tasks.vfc_mapper.find_vios_for_vfc_wwpns') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_get_fabric_meta') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_fabric_names') - def test_pre_live_migration_on_source_dual_vios( - self, mock_fabric_names, mock_get_fabric_meta, - mock_find_vios_for_vfc_wwpns): - mock_fabric_names.return_value = ['A', 'B'] - mock_get_fabric_meta.side_effect = [ - [('11', 'AA BB'), ('22', 'CC DD')], - [('33', 'EE FF'), ('44', 'GG HH')]] - - vios_wraps = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FEED_2)) - vios1_w = vios_wraps[0] - vios2_w = vios_wraps[1] - - def mock_client_adpt(slot): - return mock.Mock(client_adapter=mock.Mock(lpar_slot_num=slot)) - - mock_find_vios_for_vfc_wwpns.side_effect = [ - (vios1_w, mock_client_adpt(1)), (vios1_w, mock_client_adpt(2)), - (vios2_w, mock_client_adpt(3)), (vios2_w, mock_client_adpt(4))] - - # Execute the test - mig_data = {} - self.vol_drv.pre_live_migration_on_source(mig_data) - - self.assertEqual('[1, 2]', mig_data.get('src_npiv_fabric_slots_A')) - self.assertEqual('[3, 4]', mig_data.get('src_npiv_fabric_slots_B')) - vios_peer_slots = jsonutils.loads(mig_data.get('src_vios_peer_slots')) - self.assertItemsEqual([[1, 2], [3, 4]], vios_peer_slots) - # Ensure only string data is placed in the dict. - for key in mig_data: - self.assertEqual(str, type(mig_data[key])) - - # test unequal mapping across 2 VIOS - mock_get_fabric_meta.side_effect = [ - [('11', 'AA BB'), ('22', 'CC DD')], - [('33', 'EE FF')]] - - mock_find_vios_for_vfc_wwpns.side_effect = [ - (vios1_w, mock_client_adpt(1)), (vios2_w, mock_client_adpt(2)), - (vios1_w, mock_client_adpt(3))] - - # Execute the test - mig_data = {} - self.vol_drv.pre_live_migration_on_source(mig_data) - - self.assertEqual('[1, 2]', mig_data.get('src_npiv_fabric_slots_A')) - self.assertEqual('[3]', mig_data.get('src_npiv_fabric_slots_B')) - vios_peer_slots = jsonutils.loads(mig_data.get('src_vios_peer_slots')) - self.assertItemsEqual([[1, 3], [2]], vios_peer_slots) - # Ensure only string data is placed in the dict. - for key in mig_data: - self.assertEqual(str, type(mig_data[key])) - - @mock.patch('pypowervm.tasks.vfc_mapper.' - 'build_migration_mappings_for_fabric') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_fabric_names') - def test_pre_live_migration_on_destination_legacy( - self, mock_fabric_names, mock_build_mig_map): - mock_fabric_names.return_value = ['A', 'B'] - - mig_data = {'src_npiv_fabric_slots_A': jsonutils.dumps([1, 2]), - 'src_npiv_fabric_slots_B': jsonutils.dumps([3])} - - mock_build_mig_map.side_effect = [['a'], ['b']] - self.vol_drv.stg_ftsk = mock.MagicMock() - - # Execute the test - self.vol_drv.pre_live_migration_on_destination(mig_data) - - self.assertEqual('["a"]', mig_data.get('dest_npiv_fabric_mapping_A')) - self.assertEqual('["b"]', mig_data.get('dest_npiv_fabric_mapping_B')) - # Ensure only string data is placed in the dict. - for key in mig_data: - self.assertEqual(str, type(mig_data[key])) - - # Order of the mappings is not important. - self.assertEqual( - {'b', 'a'}, - set(jsonutils.loads(mig_data.get('vfc_lpm_mappings')))) - - # Verify that on migration, the WWPNs are reversed. - self.assertEqual(2, self.vol_drv.stg_ftsk.feed.reverse.call_count) - - @mock.patch('pypowervm.tasks.vfc_mapper.build_migration_mappings') - @mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.' - '_fabric_names') - def test_pre_live_migration_on_destination( - self, mock_fabric_names, mock_build_mig_map): - mock_fabric_names.return_value = ['A', 'B'] - - mig_data = {'src_npiv_fabric_slots_A': jsonutils.dumps([1, 2]), - 'src_npiv_fabric_slots_B': jsonutils.dumps([3]), - 'src_vios_peer_slots': jsonutils.dumps([[1, 2, 3]])} - - mock_build_mig_map.return_value = {'a', 'b'} - self.vol_drv.stg_ftsk = mock.MagicMock() - - # Execute the test - self.vol_drv.pre_live_migration_on_destination(mig_data) - - # Order of the mappings is not important. - self.assertEqual( - {'b', 'a'}, set(jsonutils.loads(mig_data.get('vfc_lpm_mappings')))) - - def test_set_fabric_meta(self): - port_map = [('1', 'aa AA'), ('2', 'bb BB'), - ('3', 'cc CC'), ('4', 'dd DD'), - ('5', 'ee EE'), ('6', 'ff FF'), - ('7', 'gg GG'), ('8', 'hh HH'), - ('9', 'ii II'), ('10', 'jj JJ')] - expected = {'npiv_adpt_wwpns_A': - '1,aa,AA,2,bb,BB,3,cc,CC,4,dd,DD', - 'npiv_adpt_wwpns_A_2': - '5,ee,EE,6,ff,FF,7,gg,GG,8,hh,HH', - 'npiv_adpt_wwpns_A_3': - '9,ii,II,10,jj,JJ'} - self.vol_drv.instance.system_metadata = dict() - self.vol_drv._set_fabric_meta('A', port_map) - self.assertEqual(self.vol_drv.instance.system_metadata, expected) - - # Clear out the metadata and make sure it sticks. - self.vol_drv._set_fabric_meta('A', []) - self.assertEqual(self.vol_drv.instance.system_metadata, {}) - - def test_get_fabric_meta(self): - system_meta = {'npiv_adpt_wwpns_A': - '1,aa,AA,2,bb,BB,3,cc,CC,4,dd,DD', - 'npiv_adpt_wwpns_A_2': - '5,ee,EE,6,ff,FF,7,gg,GG,8,hh,HH', - 'npiv_adpt_wwpns_A_3': - '9,ii,II,10,jj,JJ'} - expected = [('1', 'aa AA'), ('2', 'bb BB'), - ('3', 'cc CC'), ('4', 'dd DD'), - ('5', 'ee EE'), ('6', 'ff FF'), - ('7', 'gg GG'), ('8', 'hh HH'), - ('9', 'ii II'), ('10', 'jj JJ')] - self.vol_drv.instance.system_metadata = system_meta - fabric_meta = self.vol_drv._get_fabric_meta('A') - self.assertEqual(fabric_meta, expected) - - def test_vol_type(self): - self.assertEqual('npiv', self.vol_drv.vol_type()) diff --git a/nova_powervm/tests/virt/powervm/volume/test_rbd.py b/nova_powervm/tests/virt/powervm/volume/test_rbd.py deleted file mode 100644 index be620fe7..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_rbd.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm.volume import rbd as v_drv -from pypowervm import const as pvm_const -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.wrappers import base_partition as pvm_bp -from pypowervm.wrappers import storage as pvm_stg -from pypowervm.wrappers import virtual_io_server as pvm_vios - - -class FakeRBDVolAdapter(v_drv.RBDVolumeAdapter): - """Subclass for RBDVolumeAdapter, since it is abstract.""" - - def __init__(self, adapter, host_uuid, instance, connection_info, - stg_ftsk=None): - super(FakeRBDVolAdapter, self).__init__( - adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk) - - -class TestRBDVolumeAdapter(test_vol.TestVolumeAdapter): - """Tests the RBDVolumeAdapter. NovaLink is a I/O host.""" - - def setUp(self): - super(TestRBDVolumeAdapter, self).setUp() - - # Needed for the volume adapter - self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt - mock_inst = mock.MagicMock(uuid='2BC123') - - self.vol_drv = FakeRBDVolAdapter( - self.adpt, 'host_uuid', mock_inst, - {'data': {'name': 'pool/image', 'volume_id': 'a_vol_id'}, - 'serial': 'volid1'}) - - self.fake_vios = pvm_vios.VIOS.bld( - self.adpt, 'vios1', - pvm_bp.PartitionMemoryConfiguration.bld(self.adpt, 1024), - pvm_bp.PartitionMemoryConfiguration.bld(self.adpt, 0.1, 1)) - self.feed = [pvm_vios.VIOS.wrap(self.fake_vios.entry)] - ftskfx = pvm_fx.FeedTaskFx(self.feed) - self.useFixture(ftskfx) - - def test_min_xags(self): - """Ensures xag's only returns SCSI Mappings.""" - self.assertEqual([pvm_const.XAG.VIO_SMAP], self.vol_drv.min_xags()) - - @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.entities.Entry.uuid', - new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.slot_map.SlotMapStore.register_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.tasks.client_storage.udid_to_scsi_mapping', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - @mock.patch('pypowervm.wrappers.storage.RBD.bld_ref') - def test_connect_volume(self, mock_rbd_bld_ref, mock_get_mgmt_partition, - mock_get_vm_id, mock_udid_to_map, mock_reg_map, - mock_get_vios_uuid, mock_build_map, mock_add_map): - # Mockups - mock_rbd = mock.Mock() - mock_rbd_bld_ref.return_value = mock_rbd - mock_slot_mgr = mock.MagicMock() - mock_slot_mgr.build_map.get_vscsi_slot.return_value = 4, 'fake_path' - - mock_vios = mock.Mock(uuid='uuid1') - mock_get_mgmt_partition.return_value = mock_vios - mock_get_vios_uuid.return_value = 'uuid1' - mock_get_vm_id.return_value = 'partition_id' - - mock_udid_to_map.return_value = mock.Mock() - mock_add_map.return_value = None - - # Set user - v_drv.CONF.powervm.rbd_user = 'tester' - # Invoke - self.vol_drv.connect_volume(mock_slot_mgr) - # Validate - mock_rbd_bld_ref.assert_called_once_with( - self.adpt, 'pool/image', tag='a_vol_id', user='tester') - self.assertEqual(1, mock_build_map.call_count) - self.assertEqual(1, mock_udid_to_map.call_count) - - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.entities.Entry.uuid', - new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.slot_map.SlotMapStore.register_vscsi_mapping', - autospec=True) - @mock.patch('pypowervm.tasks.client_storage.udid_to_scsi_mapping', - autospec=True) - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - @mock.patch('pypowervm.wrappers.storage.RBD.bld_ref') - def test_connect_volume_rebuild_no_slot( - self, mock_rbd_bld_ref, mock_get_mgmt_partition, mock_get_vm_id, - mock_udid_to_map, mock_reg_map, mock_get_vios_uuid, - mock_build_map): - # Mockups - mock_rbd = mock.Mock() - mock_rbd_bld_ref.return_value = mock_rbd - mock_slot_mgr = mock.MagicMock() - mock_slot_mgr.is_rebuild = True - mock_slot_mgr.build_map.get_vscsi_slot.return_value = None, None - - mock_vios = mock.Mock(uuid='uuid1') - mock_get_mgmt_partition.return_value = mock_vios - mock_get_vios_uuid.return_value = 'uuid1' - # Invoke - self.vol_drv.connect_volume(mock_slot_mgr) - - # Validate - mock_rbd_bld_ref.assert_called_once_with( - self.adpt, 'pool/image', tag='a_vol_id', user='') - self.assertEqual(0, mock_build_map.call_count) - - @mock.patch('pypowervm.entities.Entry.uuid', - new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True) - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True) - def test_disconnect_volume(self, mock_find_maps, mock_gen_match_func, - mock_get_mgmt_partition, mock_entry_uuid): - # Mockups - mock_slot_mgr = mock.MagicMock() - - mock_vios = mock.Mock(uuid='uuid1') - mock_get_mgmt_partition.return_value = mock_vios - - mock_match_func = mock.Mock() - mock_gen_match_func.return_value = mock_match_func - mock_entry_uuid.return_value = 'uuid1' - # Invoke - self.vol_drv._disconnect_volume(mock_slot_mgr) - - # Validate - mock_gen_match_func.assert_called_once_with( - pvm_stg.VDisk, names=['pool/image']) - mock_find_maps.assert_called_once_with( - mock.ANY, - client_lpar_id='2BC123', match_func=mock_match_func) - - @mock.patch('nova_powervm.virt.powervm.volume.rbd.RBDVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('nova_powervm.virt.powervm.volume.rbd.RBDVolumeAdapter.' - 'is_volume_on_vios') - def test_pre_live_migration_on_destination(self, mock_on_vios, mock_uuids): - mock_uuids.return_value = ['uuid1'] - mock_on_vios.return_value = (True, 'pool/image') - self.vol_drv.pre_live_migration_on_destination(mock.ANY) - mock_on_vios.return_value = (False, None) - self.assertRaises( - p_exc.VolumePreMigrationFailed, - self.vol_drv.pre_live_migration_on_destination, mock.ANY) - - @mock.patch('nova_powervm.virt.powervm.volume.rbd.RBDVolumeAdapter.' - 'vios_uuids', new_callable=mock.PropertyMock) - @mock.patch('pypowervm.tasks.hdisk.rbd_exists', autospec=True) - def test_is_volume_on_vios(self, mock_exists, mock_vios_uuids): - mock_exists.return_value = True - mock_vios_uuids.return_value = ['uuid1'] - vol_found, vol_name = self.vol_drv.is_volume_on_vios( - mock.Mock(uuid='uuid2')) - self.assertFalse(vol_found) - self.assertIsNone(vol_name) - vol_found, vol_name = self.vol_drv.is_volume_on_vios( - mock.Mock(uuid='uuid1')) - self.assertTrue(vol_found) - self.assertEqual('pool/image', vol_name) - mock_exists.return_value = False - vol_found, vol_name = self.vol_drv.is_volume_on_vios( - mock.Mock(uuid='uuid1')) - self.assertFalse(vol_found) - self.assertIsNone(vol_name) diff --git a/nova_powervm/tests/virt/powervm/volume/test_vscsi.py b/nova_powervm/tests/virt/powervm/volume/test_vscsi.py deleted file mode 100644 index e9a1f142..00000000 --- a/nova_powervm/tests/virt/powervm/volume/test_vscsi.py +++ /dev/null @@ -1,601 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova_powervm import conf as cfg -from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm.volume import volume -from nova_powervm.virt.powervm.volume import vscsi - -from pypowervm import const as pvm_const -from pypowervm.tasks import hdisk -from pypowervm.tests import test_fixtures as pvm_fx -from pypowervm.tests.test_utils import pvmhttp -from pypowervm.wrappers import storage as pvm_stor -from pypowervm.wrappers import virtual_io_server as pvm_vios - -CONF = cfg.CONF - -VIOS_FEED = 'fake_vios_feed2.txt' -VIOS_FEED_MULTI = 'fake_vios_feed_multi.txt' - -I_WWPN_1 = '21000024FF649104' -I_WWPN_2 = '21000024FF649105' - -I2_WWPN_1 = '10000090FA5371F2' -I2_WWPN_2 = '10000090FA53720A' - - -class BaseVSCSITest(test_vol.TestVolumeAdapter): - """Basic test case for the VSCSI Volume Connector.""" - - def setUp(self, vios_feed_file, p_wwpn1, p_wwpn2): - super(BaseVSCSITest, self).setUp() - self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt - - def resp(file_name): - return pvmhttp.load_pvm_resp( - file_name, adapter=self.adpt).get_response() - self.vios_feed_resp = resp(vios_feed_file) - - self.feed = pvm_vios.VIOS.wrap(self.vios_feed_resp) - self.ft_fx = pvm_fx.FeedTaskFx(self.feed) - self.useFixture(self.ft_fx) - - self.adpt.read.return_value = self.vios_feed_resp - - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.getter') - @mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid') - def init_vol_adpt(mock_pvm_uuid, mock_getter): - con_info = { - 'serial': 'id', - 'data': { - 'initiator_target_map': { - p_wwpn1: ['t1'], - p_wwpn2: ['t2', 't3'] - }, - 'target_lun': '1', - 'volume_id': 'a_volume_identifier', - 'pg83NAA': '4567' - }, - } - mock_inst = mock.MagicMock() - mock_pvm_uuid.return_value = '1234' - - # The getter can just return the VIOS values (to remove a read - # that would otherwise need to be mocked). - mock_getter.return_value = self.feed - - return vscsi.PVVscsiFCVolumeAdapter(self.adpt, 'host_uuid', - mock_inst, con_info) - self.vol_drv = init_vol_adpt() - - -class TestVSCSIAdapter(BaseVSCSITest): - """Tests the vSCSI Volume Connector Adapter. Single VIOS tests""" - - def setUp(self): - super(TestVSCSIAdapter, self).setUp( - VIOS_FEED, I_WWPN_1, I_WWPN_2) - - # setup system_metadata tests - self.volume_id = 'f042c68a-c5a5-476a-ba34-2f6d43f4226c' - self.vios_uuid = '3443DB77-AED1-47ED-9AA5-3DB9C6CF7089' - self.udid = ( - '01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDA1Rg==') - self.slot_mgr = mock.Mock() - self.slot_mgr.build_map.get_vscsi_slot.return_value = 62, 'the_lua' - - @mock.patch('pypowervm.tasks.hdisk.discover_hdisk') - @mock.patch('pypowervm.tasks.storage.find_stale_lpars') - def test_pre_live_migration(self, mock_fsl, mock_discover): - # The mock return values - mock_fsl.return_value = [] - mock_discover.return_value = ( - hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') - - # Run the method - self.vol_drv.pre_live_migration_on_destination({}) - - # Test exception path - mock_discover.return_value = ( - hdisk.LUAStatus.ITL_NOT_RELIABLE, 'devname', 'udid') - - # Run the method - self.assertRaises(p_exc.VolumePreMigrationFailed, - self.vol_drv.pre_live_migration_on_destination, {}) - - @mock.patch('pypowervm.tasks.hdisk.remove_hdisk') - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') - def test_cleanup_volume(self, mock_hdisk_from_uuid, mock_remove_hdisk): - mock_hdisk_from_uuid.return_value = 'device_name' - - # Bad path. udid not found - # Run the method - this should produce a warning - with self.assertLogs(volume.__name__, 'WARNING'): - self.vol_drv._cleanup_volume(None) - - # Good path - self.vol_drv._cleanup_volume('udid1') - # We don't update the feed, we run remove hdisk instead - self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count) - mock_remove_hdisk.assert_called_once_with( - self.adpt, mock.ANY, 'device_name', self.vios_uuid) - - def test_post_live_migr_source(self): - - # Bad path. volume id not found - bad_data = {'vscsi-BAD': 'udid1'} - # good path. - good_data = {'vscsi-id': 'udid1'} - - with mock.patch.object(self.vol_drv, '_cleanup_volume') as mock_cln: - self.vol_drv.post_live_migration_at_source(bad_data) - mock_cln.assert_called_once_with(None) - - mock_cln.reset_mock() - self.vol_drv.post_live_migration_at_source(good_data) - mock_cln.assert_called_once_with('udid1') - - def test_cleanup_at_dest(self): - - # Bad path. volume id not found - bad_data = {'vscsi-BAD': 'udid1'} - # good path. - good_data = {'vscsi-id': 'udid1'} - - with mock.patch.object(self.vol_drv, '_cleanup_volume') as mock_cln: - self.vol_drv.cleanup_volume_at_destination(bad_data) - mock_cln.assert_called_once_with(None) - - mock_cln.reset_mock() - self.vol_drv.cleanup_volume_at_destination(good_data) - mock_cln.assert_called_once_with('udid1') - - @mock.patch('pypowervm.tasks.scsi_mapper.add_map') - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.tasks.hdisk.discover_hdisk') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_rebuild_new_vio(self, mock_get_vm_id, mock_disc, - mock_build_map, mock_add_map): - """Check if bad slot map. - - If the slot map on a rebuild returns None, we should NOT connect it. - """ - # Make sure the rebuild is set to True - self.slot_mgr.build_map.get_vscsi_slot.return_value = None, None - self.slot_mgr.is_rebuild = True - - # Set up that the device is on the VIOS - mock_get_vm_id.return_value = 'partition_id' - mock_build_map.side_effect = Exception - mock_disc.return_value = ( - hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') - - # Run the method. It will fail only because there isn't a second - # VIOS that does support the connect. - self.assertRaises(p_exc.VolumeAttachFailed, - self.vol_drv.connect_volume, self.slot_mgr) - - # Make sure an add map is not invoked - self.assertEqual(0, mock_add_map.call_count) - - @mock.patch('pypowervm.tasks.scsi_mapper.add_map') - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.tasks.hdisk.discover_hdisk') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume(self, mock_get_vm_id, mock_disc_hdisk, - mock_build_map, mock_add_map): - # The mock return values - mock_disc_hdisk.return_value = ( - hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') - mock_get_vm_id.return_value = 'partition_id' - - def build_map_func(host_uuid, vios_w, lpar_uuid, pv, - lpar_slot_num=None, lua=None, target_name=None): - self.assertEqual('host_uuid', host_uuid) - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('1234', lpar_uuid) - self.assertIsInstance(pv, pvm_stor.PV) - self.assertEqual('a_volume_identifier', pv.tag) - self.assertEqual(62, lpar_slot_num) - self.assertEqual('the_lua', lua) - return 'fake_map' - - mock_build_map.side_effect = build_map_func - - # Run the method - self.vol_drv.connect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(1, mock_add_map.call_count) - self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count) - self.assertEqual(1, mock_build_map.call_count) - - @mock.patch('pypowervm.tasks.scsi_mapper.add_map') - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.tasks.hdisk.discover_hdisk') - @mock.patch('nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter' - '._validate_vios_on_connection') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_no_update( - self, mock_get_vm_id, mock_validate_vioses, mock_disc_hdisk, - mock_build_map, mock_add_map): - """Make sure we don't do an actual update of the VIOS if not needed.""" - # The mock return values - mock_build_map.return_value = 'fake_map' - mock_add_map.return_value = None - mock_get_vm_id.return_value = 'partition_id' - mock_disc_hdisk.return_value = (hdisk.LUAStatus.DEVICE_AVAILABLE, - 'devname', 'udid') - - # Run the method - self.vol_drv.connect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - mock_validate_vioses.assert_called_with(1) - self.assertEqual(1, mock_add_map.call_count) - self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count) - self.assertEqual(1, mock_disc_hdisk.call_count) - - @mock.patch('pypowervm.tasks.hdisk.build_itls') - @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping') - @mock.patch('nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter' - '._validate_vios_on_connection') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_to_initiators( - self, mock_get_vm_id, mock_validate_vioses, - mock_add_vscsi_mapping, mock_build_itls): - """Tests that the connect w/out initiators throws errors.""" - mock_get_vm_id.return_value = 'partition_id' - - mock_instance = mock.Mock() - mock_instance.system_metadata = {} - - mock_validate_vioses.side_effect = p_exc.VolumeAttachFailed( - volume_id='1', reason='message', instance_name='inst') - - mock_build_itls.return_value = [] - self.assertRaises(p_exc.VolumeAttachFailed, - self.vol_drv.connect_volume, self.slot_mgr) - - # Validate that the validate was called with no vioses. - mock_validate_vioses.assert_called_with(0) - - def test_validate_vios_on_connection(self): - # Happy path! - self.vol_drv._validate_vios_on_connection(1) - - # Raise if no VIOSes are found - self.assertRaises(p_exc.VolumeAttachFailed, - self.vol_drv._validate_vios_on_connection, 0) - - # Multi VIOS required happy path. - self.flags(vscsi_vios_connections_required=2, group='powervm') - self.vol_drv._validate_vios_on_connection(2) - - # Raise if multiple VIOSes required - self.assertRaises(p_exc.VolumeAttachFailed, - self.vol_drv._validate_vios_on_connection, 1) - - def test_extend_volume(self): - # Ensure the method is implemented - self.vol_drv.extend_volume() - - @mock.patch('pypowervm.tasks.hdisk.remove_hdisk') - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_disconnect_volume(self, mock_get_vm_id, mock_remove_maps, - mock_hdisk_from_uuid, mock_remove_hdisk): - # The mock return values - mock_hdisk_from_uuid.return_value = 'device_name' - mock_get_vm_id.return_value = 'partition_id' - self.vol_drv._set_udid('UDIDIT!') - - def validate_remove_maps(vios_w, vm_uuid, match_func): - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('partition_id', vm_uuid) - return 'removed' - mock_remove_maps.side_effect = validate_remove_maps - - # Run the method - self.vol_drv.disconnect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(1, mock_remove_maps.call_count) - self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count) - mock_remove_hdisk.assert_called_once_with( - self.adpt, mock.ANY, 'device_name', self.vios_uuid) - - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps') - @mock.patch('pypowervm.tasks.hdisk.remove_hdisk') - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_disconnect_volume_shared(self, mock_get_vm_id, mock_remove_maps, - mock_hdisk_from_uuid, mock_remove_hdisk, - mock_find_maps): - # The mock return values - mock_hdisk_from_uuid.return_value = 'device_name' - mock_get_vm_id.return_value = 'partition_id' - # Consider there are multiple attachments - mock_find_maps.return_value = [mock.MagicMock(), mock.MagicMock()] - self.vol_drv._set_udid('UDIDIT!') - - def validate_remove_maps(vios_w, vm_uuid, match_func): - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('partition_id', vm_uuid) - return 'removed' - mock_remove_maps.side_effect = validate_remove_maps - - # Run the method - self.vol_drv.disconnect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(1, mock_remove_maps.call_count) - self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count) - # Since device has multiple mappings remove disk should not get called - self.assertEqual(0, mock_remove_hdisk.call_count) - - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_disconnect_volume_no_update( - self, mock_get_vm_id, mock_remove_maps, mock_hdisk_from_uuid): - """Validate that if no maps removed, the VIOS update is not called.""" - # The mock return values - mock_remove_maps.return_value = [] - mock_hdisk_from_uuid.return_value = 'device_name' - mock_get_vm_id.return_value = 'partition_id' - self.vol_drv._set_udid('UDIDIT!') - - # Run the method - self.vol_drv.disconnect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(1, mock_remove_maps.call_count) - self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count) - - @mock.patch('pypowervm.tasks.hdisk.good_discovery') - @mock.patch('pypowervm.tasks.hdisk.remove_hdisk') - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_disconnect_volume_no_udid( - self, mock_get_vm_id, mock_remove_maps, mock_hdisk_from_uuid, - mock_remove_hdisk, mock_good_discover): - - # The mock return values - mock_hdisk_from_uuid.return_value = 'device_name' - mock_get_vm_id.return_value = 'partition_id' - mock_good_discover.return_value = True - - def validate_remove_maps(vios_w, vm_uuid, match_func): - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('partition_id', vm_uuid) - return 'removed' - mock_remove_maps.side_effect = validate_remove_maps - - with mock.patch.object( - self.vol_drv, '_discover_volume_on_vios', - return_value=('status', 'dev_name', 'udidit')): - - # Run the method - self.vol_drv.disconnect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(1, mock_remove_maps.call_count) - self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count) - mock_remove_hdisk.assert_called_once_with( - self.adpt, mock.ANY, 'dev_name', self.vios_uuid) - - @mock.patch('pypowervm.tasks.hdisk.good_discovery') - @mock.patch('pypowervm.tasks.hdisk.remove_hdisk') - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - def test_disconnect_volume_no_udid_on_discover( - self, mock_remove_maps, mock_remove_hdisk, mock_good_discover): - """Ensures that if the UDID can not be found, no disconnect.""" - mock_good_discover.return_value = False - with mock.patch.object( - self.vol_drv, '_discover_volume_on_vios', - return_value=('status', 'dev_name', None)): - - # Run the method - self.vol_drv.disconnect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(0, mock_remove_maps.call_count) - self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count) - self.assertEqual(0, mock_remove_hdisk.call_count) - - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') - def test_disconnect_volume_udid_no_name(self, mock_hdisk_from_uuid): - """The VIO has no Storage XAG; should trigger discovery.""" - self.vol_drv._set_udid(self.udid) - mock_hdisk_from_uuid.return_value = None - - with mock.patch.object(self.vol_drv, - '_discover_volume_on_vios') as mock_dvov: - self.vol_drv.disconnect_volume(self.slot_mgr) - mock_dvov.assert_called() - - mock_hdisk_from_uuid.assert_called() - - @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') - @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') - def test_disconnect_volume_no_valid_vio(self, mock_remove_maps, - mock_hdisk_from_uuid): - """Validate that if all VIOSes are invalid, the vio updates are 0.""" - # The mock return values - mock_remove_maps.return_value = None - mock_hdisk_from_uuid.return_value = None - - # Run the method. No disconnects should yield a LOG.warning. - with self.assertLogs(vscsi.__name__, 'WARNING'): - self.vol_drv.disconnect_volume(self.slot_mgr) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(0, mock_remove_maps.call_count) - self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count) - - @mock.patch('pypowervm.tasks.partition.get_physical_wwpns') - def test_wwpns(self, mock_vio_wwpns): - mock_vio_wwpns.return_value = ['aa', 'bb'] - - wwpns = self.vol_drv.wwpns() - - self.assertListEqual(['aa', 'bb'], wwpns) - - def test_min_xags(self): - xags = self.vol_drv.min_xags() - self.assertEqual(1, len(xags)) - self.assertIn(pvm_const.XAG.VIO_SMAP, xags) - - def test_vol_type(self): - self.assertEqual('vscsi', self.vol_drv.vol_type()) - - def test_set_udid(self): - - # Mock connection info - self.vol_drv.connection_info['data'][vscsi.UDID_KEY] = None - - # Set the UDID - self.vol_drv._set_udid(self.udid) - - # Verify - self.assertEqual(self.udid, - self.vol_drv.connection_info['data'][vscsi.UDID_KEY]) - - def test_get_udid(self): - - # Set the value to retrieve - self.vol_drv.connection_info['data'][vscsi.UDID_KEY] = self.udid - retrieved_udid = self.vol_drv._get_udid() - # Check key found - self.assertEqual(self.udid, retrieved_udid) - - # Check key not found - self.vol_drv.connection_info['data'].pop(vscsi.UDID_KEY) - retrieved_udid = self.vol_drv._get_udid() - # Check key not found - self.assertIsNone(retrieved_udid) - - def test_get_hdisk_itls(self): - """Validates the _get_hdisk_itls method.""" - - mock_vios = mock.MagicMock() - mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_1] - - i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios) - self.assertListEqual([I_WWPN_1], i_wwpn) - self.assertListEqual(['t1'], t_wwpns) - self.assertEqual('1', lun) - - mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_2] - i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios) - self.assertListEqual([I_WWPN_2], i_wwpn) - self.assertListEqual(['t2', 't3'], t_wwpns) - - mock_vios.get_active_pfc_wwpns.return_value = ['12345'] - i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios) - self.assertListEqual([], i_wwpn) - - @mock.patch('pypowervm.tasks.scsi_mapper.find_maps') - def test_check_host_mappings(self, mock_find): - mock_vios = mock.MagicMock() - mock_vios.uuid = self.vios_uuid - # Test when multiple matching entries found - mock_find.return_value = [mock.MagicMock(), mock.MagicMock()] - mapping = self.vol_drv._check_host_mappings(mock_vios, self.volume_id) - self.assertTrue(mapping) - # Test when single entry found check host mapping should return False - mock_find.return_value = [mock.MagicMock()] - mapping = self.vol_drv._check_host_mappings(mock_vios, self.volume_id) - self.assertFalse(mapping) - # Test when no entry found check host mapping should return False - mock_find.return_value = [] - mapping = self.vol_drv._check_host_mappings(mock_vios, self.volume_id) - self.assertFalse(mapping) - - -class TestVSCSIAdapterMultiVIOS(BaseVSCSITest): - """Tests the vSCSI Volume Connector Adapter against multiple VIOSes.""" - - def setUp(self): - super(TestVSCSIAdapterMultiVIOS, self).setUp( - VIOS_FEED_MULTI, I2_WWPN_1, I2_WWPN_2) - self.slot_mgr = mock.Mock() - self.slot_mgr.build_map.get_vscsi_slot.return_value = 62, 'the_lua' - - @mock.patch('pypowervm.tasks.hdisk.build_itls') - @mock.patch('pypowervm.tasks.scsi_mapper.add_map') - @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') - @mock.patch('pypowervm.tasks.hdisk.discover_hdisk') - @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') - def test_connect_volume_multi_vio(self, mock_vm_id, mock_discover_hdisk, - mock_build_map, mock_add_map, mock_itls): - # The mock return values - mock_discover_hdisk.return_value = ( - hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') - mock_vm_id.return_value = 'partition_id' - mock_itls.return_value = 'fake_itls' - - def build_map_func(host_uuid, vios_w, lpar_uuid, pv, - lpar_slot_num=None, lua=None, target_name=None): - self.assertEqual('host_uuid', host_uuid) - self.assertIsInstance(vios_w, pvm_vios.VIOS) - self.assertEqual('1234', lpar_uuid) - self.assertIsInstance(pv, pvm_stor.PV) - self.assertEqual(62, lpar_slot_num) - self.assertEqual('the_lua', lua) - return 'fake_map' - - mock_build_map.side_effect = build_map_func - - # Run the method - self.vol_drv.connect_volume(self.slot_mgr) - - # Assert the discover_hdisk call parameters - mock_discover_hdisk.assert_has_calls([ - mock.call( - self.adpt, vio.uuid, 'fake_itls', - device_id='NDU2Nw=='.encode()) - for vio in self.feed], any_order=True) - - # As initialized above, remove_maps returns True to trigger update. - self.assertEqual(2, mock_add_map.call_count) - self.assertEqual(2, mock_build_map.call_count) - - # Two of the calls are for the slots, two are for the add mappings - self.assertEqual(2, self.ft_fx.patchers['update'].mock.call_count) - - @mock.patch('pypowervm.tasks.hdisk.discover_hdisk') - @mock.patch('pypowervm.tasks.storage.find_stale_lpars') - def test_pre_live_migration_multi_vios(self, mock_fsl, mock_discover): - # The mock return values - mock_fsl.return_value = [] - - mock_discover.side_effect = [( - hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid'), - (None, None, None)] - - # Run the method - mig_data = {} - self.vol_drv.pre_live_migration_on_destination(mig_data) - self.assertIsNotNone(mig_data.get('vscsi-' + self.vol_drv.volume_id)) diff --git a/nova_powervm/version.py b/nova_powervm/version.py deleted file mode 100644 index 11315abf..00000000 --- a/nova_powervm/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2015, 2016 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo(__package__) diff --git a/nova_powervm/virt/__init__.py b/nova_powervm/virt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/virt/powervm/__init__.py b/nova_powervm/virt/powervm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/virt/powervm/disk/__init__.py b/nova_powervm/virt/powervm/disk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/virt/powervm/disk/driver.py b/nova_powervm/virt/powervm/disk/driver.py deleted file mode 100644 index b5cd941d..00000000 --- a/nova_powervm/virt/powervm/disk/driver.py +++ /dev/null @@ -1,425 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import oslo_log.log as logging -from oslo_utils import excutils -from oslo_utils import units -import random -import six -import time - -import pypowervm.const as pvm_const -import pypowervm.tasks.scsi_mapper as tsk_map -import pypowervm.util as pvm_util -import pypowervm.wrappers.virtual_io_server as pvm_vios - -from nova_powervm.virt.powervm import exception as npvmex -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm import mgmt -from nova_powervm.virt.powervm import vm - -LOG = logging.getLogger(__name__) - - -class DiskType(object): - BOOT = 'boot' - RESCUE = 'rescue' - IMAGE = 'image' - - -class IterableToFileAdapter(object): - """A degenerate file-like so that an iterable can be read like a file. - - The Glance client returns an iterable, but PowerVM requires a file. This - is the adapter between the two. - - Taken from xenapi/image/apis.py - """ - - def __init__(self, iterable): - self.iterator = iterable.__iter__() - self.remaining_data = '' - - def read(self, size): - chunk = self.remaining_data - try: - while not chunk: - chunk = next(self.iterator) - except StopIteration: - return '' - return_value = chunk[0:size] - self.remaining_data = chunk[size:] - return return_value - - -@six.add_metaclass(abc.ABCMeta) -class DiskAdapter(object): - - capabilities = { - 'shared_storage': False, - 'has_imagecache': False, - 'snapshot': False, - } - - def __init__(self, adapter, host_uuid): - """Initialize the DiskAdapter - - :param adapter: The pypowervm adapter - :param host_uuid: The UUID of the PowerVM host. - """ - self.adapter = adapter - self.host_uuid = host_uuid - self.mp_uuid = mgmt.mgmt_uuid(self.adapter) - - @abc.abstractproperty - def vios_uuids(self): - """List the UUIDs of the Virtual I/O Servers hosting the storage.""" - raise NotImplementedError() - - def get_info(self): - """Return disk information for the driver. - - This method is used on cold migration to pass disk information from - the source to the destination. The data needed to be retrieved and - validated (see the validate method below) are determined by the disk - driver implementation. - - Currently this and the validate method will only be called for the SSP - driver because it's the only one that supports shared storage. - - :return: returns a dict of disk information - """ - return {} - - def manage_image_cache(self, context, all_instances): - """Update the image cache. - - Only called if implentation has the capability: has_imagecache. - - :param context: nova context - :param all_instances: List of all instances on the node - """ - pass - - def validate(self, disk_info): - """Validate the disk information is compatible with this driver. - - This method is called during cold migration to ensure the disk - drivers on the destination host is compatible with the source host. - - :param disk_info: disk information dictionary - :returns: None if compatible, otherwise a reason for incompatibility - """ - return _('The configured disk driver does not support migration ' - 'or resize.') - - @abc.abstractmethod - def _disk_match_func(self, disk_type, instance): - """Return a matching function to locate the disk for an instance. - - :param disk_type: One of the DiskType enum values. - :param instance: The instance whose disk is to be found. - :return: Callable suitable for the match_func parameter of the - pypowervm.tasks.scsi_mapper.find_maps method. - """ - raise NotImplementedError() - - def get_bootdisk_path(self, instance, vios_uuid): - """Find the local path for the instance's boot disk. - - :param instance: nova.objects.instance.Instance object owning the - requested disk. - :param vios_uuid: PowerVM UUID of the VIOS to search for mappings. - :return: Local path for instance's boot disk. - """ - vm_uuid = vm.get_pvm_uuid(instance) - match_func = self._disk_match_func(DiskType.BOOT, instance) - vios_wrap = pvm_vios.VIOS.get(self.adapter, uuid=vios_uuid, - xag=[pvm_const.XAG.VIO_SMAP]) - maps = tsk_map.find_maps(vios_wrap.scsi_mappings, - client_lpar_id=vm_uuid, match_func=match_func) - if maps: - return maps[0].server_adapter.backing_dev_name - return None - - def _get_bootdisk_iter(self, instance): - """Return an iterator of (storage_elem, VIOS) tuples for the instance. - - This method returns an iterator of (storage_elem, VIOS) tuples, where - storage_elem is a pypowervm storage element wrapper associated with - the instance boot disk and VIOS is the wrapper of the Virtual I/O - server owning that storage element. - - :param instance: nova.objects.instance.Instance object owning the - requested disk. - :return: Iterator of tuples of (storage_elem, VIOS). - """ - lpar_wrap = vm.get_instance_wrapper(self.adapter, instance) - match_func = self._disk_match_func(DiskType.BOOT, instance) - for vios_uuid in self.vios_uuids: - vios_wrap = pvm_vios.VIOS.get( - self.adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP]) - for scsi_map in tsk_map.find_maps( - vios_wrap.scsi_mappings, client_lpar_id=lpar_wrap.id, - match_func=match_func): - yield scsi_map.backing_storage, vios_wrap - - def connect_instance_disk_to_mgmt(self, instance): - """Connect an instance's boot disk to the management partition. - - :param instance: The instance whose boot disk is to be mapped. - :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped - :return vios: The EntryWrapper of the VIOS from which the mapping was - made. - :raise InstanceDiskMappingFailed: If the mapping could not be done. - """ - for stg_elem, vios in self._get_bootdisk_iter(instance): - msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name} - - # Create a new mapping. NOTE: If there's an existing mapping on - # the other VIOS but not this one, we'll create a second mapping - # here. It would take an extreme sequence of events to get to that - # point, and the second mapping would be harmless anyway. The - # alternative would be always checking all VIOSes for existing - # mappings, which increases the response time of the common case by - # an entire GET of VIOS+VIO_SMAP. - LOG.debug("Mapping boot disk %(disk_name)s to the management " - "partition from Virtual I/O Server %(vios_name)s.", - msg_args, instance=instance) - try: - tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid, - stg_elem) - # If that worked, we're done. add_vscsi_mapping logged. - return stg_elem, vios - except Exception: - LOG.exception("Failed to map boot disk %(disk_name)s to the " - "management partition from Virtual I/O Server " - "%(vios_name)s.", msg_args, instance=instance) - # Try the next hit, if available. - # We either didn't find the boot dev, or failed all attempts to map it. - raise npvmex.InstanceDiskMappingFailed(instance_name=instance.name) - - @abc.abstractmethod - def disconnect_disk_from_mgmt(self, vios_uuid, disk_name): - """Disconnect a disk from the management partition. - - :param vios_uuid: The UUID of the Virtual I/O Server serving the - mapping. - :param disk_name: The name of the disk to unmap. - """ - raise NotImplementedError() - - @abc.abstractproperty - def capacity(self): - """Capacity of the storage in gigabytes.""" - raise NotImplementedError() - - @abc.abstractproperty - def capacity_used(self): - """Capacity of the storage in gigabytes that is used.""" - raise NotImplementedError() - - @staticmethod - def _get_disk_name(disk_type, instance, short=False): - """Generate a name for a virtual disk associated with an instance. - - :param disk_type: One of the DiskType enum values. - :param instance: The instance for which the disk is to be created. - :param short: If True, the generated name will be limited to 15 - characters (the limit for virtual disk). If False, it - will be limited by the API (79 characters currently). - :return: - """ - prefix = '%s_' % (disk_type[0] if short else disk_type) - base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short - else instance.name) - return pvm_util.sanitize_file_name_for_api( - base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short - else pvm_const.MaxLen.FILENAME_DEFAULT) - - @staticmethod - def get_name_by_uuid(disk_type, uuid, short=False): - """Generate a name for a DiskType using a given uuid. - - :param disk_type: One of the DiskType enum values. - :param uuid: The uuid to use for the name - :param short: If True the generate name will be limited to 15 - characters. If False it will be limited by the API. - :return: A name base off of disk_type and uuid. - """ - prefix = '%s_' % (disk_type[0] if short else disk_type) - return pvm_util.sanitize_file_name_for_api( - uuid, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short - else pvm_const.MaxLen.FILENAME_DEFAULT) - - @staticmethod - def _get_image_name(image_meta, max_len=pvm_const.MaxLen.FILENAME_DEFAULT): - """Generate a name for a virtual storage copy of an image. - - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param max_len: Maximum string length for the resulting image name. - :return: String name for the image on the server. - """ - return pvm_util.sanitize_file_name_for_api( - image_meta.name, prefix=DiskType.IMAGE + '_', - suffix='_' + image_meta.checksum, max_len=max_len) - - @staticmethod - def _disk_gb_to_bytes(size_gb, floor=None): - """Convert a GB size (usually of a disk) to bytes, with a minimum. - - :param size_gb: GB size to convert - :param floor: The minimum value to return. If specified, and the - converted size_gb is smaller, this value will be returned - instead. - :return: A size in bytes. - """ - disk_bytes = size_gb * units.Gi - if floor is not None: - if disk_bytes < floor: - disk_bytes = floor - return disk_bytes - - @abc.abstractmethod - def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None): - """Disconnects the storage adapters from the image disk. - - :param instance: instance to disconnect the image for. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when this method is executed. - :param disk_type: The list of disk types to remove or None which means - to remove all disks from the VM. - :return: A list of all the backing storage elements that were - disconnected from the I/O Server and VM. - """ - raise NotImplementedError() - - @abc.abstractmethod - def delete_disks(self, storage_elems): - """Removes the disks specified by the mappings. - - :param storage_elems: A list of the storage elements that are to be - deleted. Derived from the return value from - disconnect_disk. - """ - raise NotImplementedError() - - def create_disk_from_image(self, context, instance, image_meta, - image_type=DiskType.BOOT): - """Creates a disk and copies the specified image to it. - - :param context: nova context used to retrieve image from glance - :param instance: instance to create the disk for. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param image_type: the image type. See disk constants above. - :return: The backing pypowervm storage object that was created. - """ - - # Retry 3 times on exception - for attempt in range(1, 5): - try: - return self._create_disk_from_image( - context, instance, image_meta, image_type=image_type) - except Exception: - with excutils.save_and_reraise_exception( - logger=LOG, reraise=False) as sare: - if attempt < 4: - LOG.exception("Disk Upload attempt #%d failed. " - "Retrying the upload.", attempt, - instance=instance) - time.sleep(random.randint(1, 5)) - else: - sare.reraise = True - - def _create_disk_from_image(self, context, instance, image_meta, - image_type=DiskType.BOOT): - """Creates a disk and copies the specified image to it. - - Cleans up created disk if an error occurs. - - :param context: nova context used to retrieve image from glance - :param instance: instance to create the disk for. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param image_type: the image type. See disk constants above. - :return: The backing pypowervm storage object that was created. - """ - pass - - @abc.abstractmethod - def connect_disk(self, instance, disk_info, stg_ftsk=None): - """Connects the disk image to the Virtual Machine. - - :param instance: nova instance to connect the disk to. - :param disk_info: The pypowervm storage element returned from - create_disk_from_image. Ex. VOptMedia, VDisk, LU, - or PV. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when this method is executed. - """ - raise NotImplementedError() - - @abc.abstractmethod - def extend_disk(self, instance, disk_info, size): - """Extends the disk. - - :param instance: instance to extend the disk for. - :param disk_info: dictionary with disk info. - :param size: the new size in gb. - """ - raise NotImplementedError() - - @abc.abstractmethod - def check_instance_shared_storage_local(self, context, instance): - """Check if instance files located on shared storage. - - This runs check on the destination host, and then calls - back to the source host to check the results. - - :param context: security context - :param instance: nova.objects.instance.Instance object - """ - raise NotImplementedError() - - @abc.abstractmethod - def check_instance_shared_storage_remote(self, context, data): - """Check if instance files located on shared storage. - - :param context: security context - :param data: result of check_instance_shared_storage_local - """ - raise NotImplementedError() - - def check_instance_shared_storage_cleanup(self, context, data): - """Do cleanup on host after check_instance_shared_storage calls - - :param context: security context - :param data: result of check_instance_shared_storage_local - """ - pass diff --git a/nova_powervm/virt/powervm/disk/imagecache.py b/nova_powervm/virt/powervm/disk/imagecache.py deleted file mode 100644 index 77b09850..00000000 --- a/nova_powervm/virt/powervm/disk/imagecache.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.virt import imagecache - -from nova_powervm.virt.powervm.disk import driver - -from oslo_log import log as logging -from pypowervm.tasks import storage as tsk_stg -from pypowervm.wrappers import storage as pvm_stg -from pypowervm.wrappers import virtual_io_server as pvm_vios - - -LOG = logging.getLogger(__name__) - - -class ImageManager(imagecache.ImageCacheManager): - - def __init__(self, vios_uuid, vg_uuid, adapter): - super(ImageManager, self).__init__() - self.vios_uuid = vios_uuid - self.vg_uuid = vg_uuid - self.adapter = adapter - - def _get_base(self): - """Returns the base directory of the cached images. - - :return: Volume Group containing all images/instances - """ - # Return VG for instances - return pvm_stg.VG.get( - self.adapter, uuid=self.vg_uuid, - parent_type=pvm_vios.VIOS.schema_type, parent_uuid=self.vios_uuid) - - def _scan_base_image(self, base_dir): - """Scan base images present in base_dir. - - :param base_dir: Volume group containing all images/instances - :return: List of all virtual disks containing a bootable image that - were created for caching purposes. - """ - # Find LVs in the _get_base VG with i_ - prefix = '%s_' % driver.DiskType.IMAGE[0] - return [image for image in base_dir.virtual_disks - if image.name.startswith(prefix)] - - def _age_and_verify_cached_images(self, context, all_instances, base_dir): - """Finds and removes unused images from the cache. - - :param context: nova context - :param all_instances: List of all instances on the node - :param base_dir: Volume group of cached images - """ - # Use the 'used_images' key from nova imagecache to get a dict that - # uses image_ids as keys. - cache = self._scan_base_image(base_dir) - running_inst = self._list_running_instances(context, all_instances) - adjusted_ids = [] - for img_id in running_inst.get('used_images'): - if img_id: - adjusted_ids.append( - driver.DiskAdapter.get_name_by_uuid(driver.DiskType.IMAGE, - img_id, short=True)) - # Compare base images with running instances remove unused - unused = [image for image in cache if image.name not in adjusted_ids] - # Remove unused - if unused: - for image in unused: - LOG.info("Removing unused cache image: '%s'", image.name) - tsk_stg.rm_vg_storage(base_dir, vdisks=unused) - - def update(self, context, all_instances): - """Remove cached images not being used by any instance. - - :param context: nova context - :param all_instances: List of all instances on the node - """ - - base_dir = self._get_base() - self._age_and_verify_cached_images(context, all_instances, base_dir) diff --git a/nova_powervm/virt/powervm/disk/localdisk.py b/nova_powervm/virt/powervm/disk/localdisk.py deleted file mode 100644 index fb3e499a..00000000 --- a/nova_powervm/virt/powervm/disk/localdisk.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_concurrency import lockutils -from oslo_log import log as logging -from oslo_utils import excutils - -from nova import exception as nova_exc -from nova import image - -from pypowervm import const as pvm_const -from pypowervm import exceptions as pvm_exc -from pypowervm.tasks import partition as pvm_tpar -from pypowervm.tasks import scsi_mapper as tsk_map -from pypowervm.tasks import storage as tsk_stg -from pypowervm.wrappers import storage as pvm_stg -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm.disk import driver as disk_dvr -from nova_powervm.virt.powervm.disk import imagecache -from nova_powervm.virt.powervm import exception as npvmex -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm import vm - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -IMAGE_API = image.API() - - -class LocalStorage(disk_dvr.DiskAdapter): - - capabilities = { - 'shared_storage': False, - 'has_imagecache': True, - # NOTE(efried): 'snapshot' capability set dynamically in __init__. - } - - def __init__(self, adapter, host_uuid): - super(LocalStorage, self).__init__(adapter, host_uuid) - - # Query to get the Volume Group UUID - if not CONF.powervm.volume_group_name: - raise npvmex.OptRequiredIfOtherOptValue( - if_opt='disk_driver', if_value='localdisk', - then_opt='volume_group_name') - self.vg_name = CONF.powervm.volume_group_name - vios_w, vg_w = tsk_stg.find_vg(adapter, self.vg_name) - self._vios_uuid = vios_w.uuid - self.vg_uuid = vg_w.uuid - self.image_cache_mgr = imagecache.ImageManager(self._vios_uuid, - self.vg_uuid, adapter) - self.cache_lock = lockutils.ReaderWriterLock() - # Set the 'snapshot' capability dynamically. If we're hosting I/O on - # the management partition, we can snapshot. If we're hosting I/O on - # traditional VIOS, we are limited by the fact that a VSCSI device - # can't be mapped to two partitions (the VIOS and the management) at - # once. - self.capabilities['snapshot'] = self.mp_uuid == self._vios_uuid - LOG.info("Local Storage driver initialized: volume group: '%s'", - self.vg_name) - - @property - def vios_uuids(self): - """List the UUIDs of the Virtual I/O Servers hosting the storage. - - For localdisk, there's only one. - """ - return [self._vios_uuid] - - @staticmethod - def _disk_match_func(disk_type, instance): - """Return a matching function to locate the disk for an instance. - - :param disk_type: One of the DiskType enum values. - :param instance: The instance whose disk is to be found. - :return: Callable suitable for the match_func parameter of the - pypowervm.tasks.scsi_mapper.find_maps method. - """ - disk_name = LocalStorage._get_disk_name(disk_type, instance, - short=True) - return tsk_map.gen_match_func(pvm_stg.VDisk, names=[disk_name]) - - @property - def capacity(self): - """Capacity of the storage in gigabytes.""" - vg_wrap = self._get_vg_wrap() - - return float(vg_wrap.capacity) - - @property - def capacity_used(self): - """Capacity of the storage in gigabytes that is used.""" - vg_wrap = self._get_vg_wrap() - - # Subtract available from capacity - return float(vg_wrap.capacity) - float(vg_wrap.available_size) - - def manage_image_cache(self, context, all_instances): - """Update the image cache - - :param context: nova context - :param all_instances: List of all instances on the node - """ - with self.cache_lock.write_lock(): - self.image_cache_mgr.update(context, all_instances) - - def delete_disks(self, storage_elems): - """Removes the specified disks. - - :param storage_elems: A list of the storage elements that are to be - deleted. Derived from the return value from - disconnect_disk. - """ - # All of local disk is done against the volume group. So reload - # that (to get new etag) and then update against it. - tsk_stg.rm_vg_storage(self._get_vg_wrap(), vdisks=storage_elems) - - def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None): - """Disconnects the storage adapters from the image disk. - - :param instance: instance to disconnect the image for. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when this method is executed. - :param disk_type: The list of disk types to remove or None which means - to remove all disks from the VM. - :return: A list of all the backing storage elements that were - disconnected from the I/O Server and VM. - """ - lpar_uuid = vm.get_pvm_uuid(instance) - - # Ensure we have a transaction manager. - if stg_ftsk is None: - stg_ftsk = pvm_tpar.build_active_vio_feed_task( - self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP]) - - # Build the match function - match_func = tsk_map.gen_match_func(pvm_stg.VDisk, prefixes=disk_type) - - # Make sure the remove function will run within the transaction manager - def rm_func(vios_w): - LOG.info("Disconnecting instance from storage disks.", - instance=instance) - return tsk_map.remove_maps(vios_w, lpar_uuid, - match_func=match_func) - - stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(rm_func) - - # Find the disk directly. - vios_w = stg_ftsk.wrapper_tasks[self._vios_uuid].wrapper - mappings = tsk_map.find_maps(vios_w.scsi_mappings, - client_lpar_id=lpar_uuid, - match_func=match_func) - - # Run the transaction manager if built locally. Must be done after - # the find to make sure the mappings were found previously. - if stg_ftsk.name == 'localdisk': - stg_ftsk.execute() - - return [x.backing_storage for x in mappings] - - def disconnect_disk_from_mgmt(self, vios_uuid, disk_name): - """Disconnect a disk from the management partition. - - :param vios_uuid: The UUID of the Virtual I/O Server serving the - mapping. - :param disk_name: The name of the disk to unmap. - """ - tsk_map.remove_vdisk_mapping(self.adapter, vios_uuid, self.mp_uuid, - disk_names=[disk_name]) - LOG.info("Unmapped boot disk %(disk_name)s from the management " - "partition from Virtual I/O Server %(vios_name)s.", - {'disk_name': disk_name, 'mp_uuid': self.mp_uuid, - 'vios_name': vios_uuid}) - - def _create_disk_from_image(self, context, instance, image_meta, - image_type=disk_dvr.DiskType.BOOT): - """Creates a disk and copies the specified image to it. - - Cleans up created disk if an error occurs. - - :param context: nova context used to retrieve image from glance - :param instance: instance to create the disk for. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param image_type: the image type. See disk constants above. - :return: The backing pypowervm storage object that was created. - """ - LOG.info('Create disk.', instance=instance) - - # Disk size to API is in bytes. Input from flavor is in Gb - disk_bytes = self._disk_gb_to_bytes(instance.flavor.root_gb, - floor=image_meta.size) - vol_name = self._get_disk_name(image_type, instance, short=True) - - with self.cache_lock.read_lock(): - img_udid = self._get_or_upload_image(context, image_meta) - # Transfer the image - return tsk_stg.crt_copy_vdisk( - self.adapter, self._vios_uuid, self.vg_uuid, img_udid, - image_meta.size, vol_name, disk_bytes) - - def _get_or_upload_image(self, context, image_meta): - """Return a cached image name - - Attempt to find a cached copy of the image. If there is no cached copy - of the image, create one. - - :param context: nova context used to retrieve image from glance - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :return: The name of the virtual disk containing the image - """ - - # Check for cached image - with lockutils.lock(image_meta.id): - vg_wrap = self._get_vg_wrap() - cache_name = self.get_name_by_uuid(disk_dvr.DiskType.IMAGE, - image_meta.id, short=True) - image = [disk for disk in vg_wrap.virtual_disks - if disk.name == cache_name] - if len(image) == 1: - return image[0].udid - - image = tsk_stg.upload_new_vdisk( - self.adapter, self._vios_uuid, self.vg_uuid, - disk_dvr.IterableToFileAdapter( - IMAGE_API.download(context, image_meta.id)), cache_name, - image_meta.size, d_size=image_meta.size, - upload_type=tsk_stg.UploadType.IO_STREAM, - file_format=image_meta.disk_format)[0] - return image.udid - - def connect_disk(self, instance, disk_info, stg_ftsk=None): - """Connects the disk image to the Virtual Machine. - - :param instance: nova instance to connect the disk to. - :param disk_info: The pypowervm storage element returned from - create_disk_from_image. Ex. VOptMedia, VDisk, LU, - or PV. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when this method is executed. - """ - lpar_uuid = vm.get_pvm_uuid(instance) - - # Ensure we have a transaction manager. - if stg_ftsk is None: - stg_ftsk = pvm_tpar.build_active_vio_feed_task( - self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP]) - - def add_func(vios_w): - LOG.info("Adding logical volume disk connection to VIOS %(vios)s.", - {'vios': vios_w.name}, instance=instance) - mapping = tsk_map.build_vscsi_mapping( - self.host_uuid, vios_w, lpar_uuid, disk_info) - return tsk_map.add_map(vios_w, mapping) - - stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(add_func) - - # Run the transaction manager if built locally. - if stg_ftsk.name == 'localdisk': - stg_ftsk.execute() - - @staticmethod - def _validate_resizable(vdisk): - """Validates that VDisk supports resizing - - :param vdisk: The VDisk to be resized - :raise ResizeError: If resizing is not supported for the given VDisk. - """ - if vdisk.backstore_type == pvm_stg.BackStoreType.USER_QCOW: - raise nova_exc.ResizeError( - reason=_("Resizing file-backed instances is not currently " - "supported.")) - - def extend_disk(self, instance, disk_info, size): - """Extends the disk. - - :param instance: instance to extend the disk for. - :param disk_info: dictionary with disk info. - :param size: the new size in gb. - """ - def _extend(): - # Get the volume group - vg_wrap = self._get_vg_wrap() - # Find the disk by name - vdisks = vg_wrap.virtual_disks - disk_found = None - for vdisk in vdisks: - # Vdisk name can be either disk_name or /path/to/disk_name - if vdisk.name.split('/')[-1] == vol_name.split('/')[-1]: - disk_found = vdisk - break - - if not disk_found: - LOG.error('Disk %s not found during resize.', vol_name, - instance=instance) - raise nova_exc.DiskNotFound( - location=self.vg_name + '/' + vol_name) - self._validate_resizable(disk_found) - - # Set the new size - disk_found.capacity = size - - # Post it to the VIOS - vg_wrap.update() - - # Get the disk name based on the instance and type - vol_name = self._get_disk_name(disk_info['type'], instance, short=True) - LOG.info('Extending disk: %s', vol_name, instance=instance) - try: - _extend() - except pvm_exc.Error: - with excutils.save_and_reraise_exception(logger=LOG): - # TODO(IBM): Handle etag mismatch and retry - LOG.exception("PowerVM Error extending disk.", - instance=instance) - - def check_instance_shared_storage_local(self, context, instance): - """Check if instance files located on shared storage. - - This runs check on the destination host, and then calls - back to the source host to check the results. - - :param context: security context - :param instance: nova.objects.instance.Instance object - """ - raise NotImplementedError() - - def check_instance_shared_storage_remote(self, context, data): - """Check if instance files located on shared storage. - - :param context: security context - :param data: result of check_instance_shared_storage_local - """ - raise NotImplementedError() - - def _get_vg_wrap(self): - return pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid, - parent_type=pvm_vios.VIOS, - parent_uuid=self._vios_uuid) diff --git a/nova_powervm/virt/powervm/disk/ssp.py b/nova_powervm/virt/powervm/disk/ssp.py deleted file mode 100644 index 8fe5ad47..00000000 --- a/nova_powervm/virt/powervm/disk/ssp.py +++ /dev/null @@ -1,470 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_log.log as logging -import random - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm.disk import driver as disk_drv -from nova_powervm.virt.powervm import exception as npvmex -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm import vm - -from nova import image -from oslo_utils import excutils -import pypowervm.const as pvm_const -from pypowervm.tasks import cluster_ssp as tsk_cs -from pypowervm.tasks import partition as tsk_par -from pypowervm.tasks import scsi_mapper as tsk_map -from pypowervm.tasks import storage as tsk_stg -import pypowervm.util as pvm_u -import pypowervm.wrappers.cluster as pvm_clust -import pypowervm.wrappers.storage as pvm_stg - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -IMAGE_API = image.API() - - -class SSPDiskAdapter(disk_drv.DiskAdapter): - """Provides a disk adapter for Shared Storage Pools. - - Shared Storage Pools are a clustered file system technology that can link - together Virtual I/O Servers. - - This adapter provides the connection for nova ephemeral storage (not - Cinder) to connect to virtual machines. - """ - - capabilities = { - 'shared_storage': True, - # NOTE(efried): Whereas the SSP disk driver definitely does image - # caching, it's not through the nova.virt.imagecache.ImageCacheManager - # API. Setting `has_imagecache` to True here would have the side - # effect of having a periodic task try to call this class's - # manage_image_cache method (not implemented here; and a no-op in the - # superclass) which would be harmless, but unnecessary. - 'has_imagecache': False, - 'snapshot': True, - } - - def __init__(self, adapter, host_uuid): - """Initialize the SSPDiskAdapter. - - :param adapter: pypowervm.adapter.Adapter for the PowerVM REST API. - :param host_uuid: PowerVM UUID of the managed system. - """ - super(SSPDiskAdapter, self).__init__(adapter, host_uuid) - - self._cluster = self._fetch_cluster(CONF.powervm.cluster_name) - self.clust_name = self._cluster.name - - # _ssp @property method will fetch and cache the SSP. - self.ssp_name = self._ssp.name - self.tier_name = self._tier.name - - LOG.info("SSP Storage driver initialized. Cluster '%(clust_name)s'; " - "SSP '%(ssp_name)s'; Tier '%(tier_name)s", - {'clust_name': self.clust_name, 'ssp_name': self.ssp_name, - 'tier_name': self.tier_name}) - - @property - def capacity(self): - """Capacity of the storage in gigabytes.""" - # Retrieving the Tier is faster (because don't have to refresh LUs.) - return float(self._tier.refresh().capacity) - - @property - def capacity_used(self): - """Capacity of the storage in gigabytes that is used.""" - ssp = self._ssp - return float(ssp.capacity) - float(ssp.free_space) - - def get_info(self): - """Return disk information for the driver. - - This method is used on cold migration to pass disk information from - the source to the destination. - - :return: returns a dict of disk information - """ - return {'cluster_name': self.clust_name, - 'ssp_name': self.ssp_name, - 'ssp_uuid': self._ssp.uuid} - - def validate(self, disk_info): - """Validate the disk information is compatible with this driver. - - This method is called during cold migration to ensure the disk - drivers on the destination host is compatible with the source host. - - :param disk_info: disk information dictionary - :returns: None if compatible, otherwise a reason for incompatibility - """ - if disk_info.get('ssp_uuid') != self._ssp.uuid: - return (_('The host is not a member of the same SSP cluster. ' - 'The source host cluster: %(source_clust_name)s. ' - 'The source host SSP: %(source_ssp_name)s.') % - {'source_clust_name': disk_info.get('cluster_name'), - 'source_ssp_name': disk_info.get('ssp_name')} - ) - - def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None): - """Disconnects the storage adapters from the image disk. - - :param instance: instance to disconnect the image for. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for - the I/O Operations. If provided, the Virtual I/O - Server mapping updates will be added to the FeedTask. - This defers the updates to some later point in time. - If the FeedTask is not provided, the updates will be - run immediately when this method is executed. - :param disk_type: The list of disk types to remove or None which means - to remove all disks from the VM. - :return: A list of all the backing storage elements that were - disconnected from the I/O Server and VM. - """ - if stg_ftsk is None: - stg_ftsk = tsk_par.build_active_vio_feed_task( - self.adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP]) - - lpar_uuid = vm.get_pvm_uuid(instance) - match_func = tsk_map.gen_match_func(pvm_stg.LU, prefixes=disk_type) - - # Delay run function to remove the mapping between the VM and the LU - def rm_func(vios_w): - LOG.info("Removing SSP disk connection to VIOS %(vios)s.", - {'vios': vios_w.name}, instance=instance) - return tsk_map.remove_maps(vios_w, lpar_uuid, - match_func=match_func) - - # Add the mapping to *each* VIOS on the LPAR's host. - # The LPAR's host has to be self.host_uuid, else the PowerVM API will - # fail. - # - # Note - this may not be all the VIOSes on the system...just the ones - # in the SSP cluster. - # - # The mappings will normally be the same on all VIOSes, unless a VIOS - # was down when a disk was added. So for the return value, we need to - # collect the union of all relevant mappings from all VIOSes. - lu_set = set() - for vios_uuid in self.vios_uuids: - # Add the remove for the VIO - stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func) - - # Find the active LUs so that a delete op knows what to remove. - vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper - mappings = tsk_map.find_maps(vios_w.scsi_mappings, - client_lpar_id=lpar_uuid, - match_func=match_func) - if mappings: - lu_set.update([x.backing_storage for x in mappings]) - - # Run the FeedTask if it was built locally - if stg_ftsk.name == 'ssp': - stg_ftsk.execute() - - return list(lu_set) - - def disconnect_disk_from_mgmt(self, vios_uuid, disk_name): - """Disconnect a disk from the management partition. - - :param vios_uuid: The UUID of the Virtual I/O Server serving the - mapping. - :param disk_name: The name of the disk to unmap. - """ - tsk_map.remove_lu_mapping(self.adapter, vios_uuid, self.mp_uuid, - disk_names=[disk_name]) - LOG.info("Unmapped boot disk %(disk_name)s from the management " - "partition from Virtual I/O Server %(vios_uuid)s.", - {'disk_name': disk_name, 'mp_uuid': self.mp_uuid, - 'vios_uuid': vios_uuid}) - - def delete_disks(self, storage_elems): - """Removes the disks specified by the mappings. - - :param storage_elems: A list of the storage elements (LU - ElementWrappers) that are to be deleted. Derived - from the return value from disconnect_disk. - """ - tsk_stg.rm_tier_storage(storage_elems, tier=self._tier) - - def _create_disk_from_image(self, context, instance, image_meta, - image_type=disk_drv.DiskType.BOOT): - """Creates a disk and copies the specified image to it. - - If the specified image has not already been uploaded, an Image LU is - created for it. A Disk LU is then created for the instance and linked - to the Image LU. - - :param context: nova context used to retrieve image from glance - :param instance: instance to create the disk for. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param image_type: The image type. See disk_drv.DiskType. - :return: The backing pypowervm LU storage object that was created. - """ - LOG.info('SSP: Create %(image_type)s disk from image %(image_id)s.', - dict(image_type=image_type, image_id=image_meta.id), - instance=instance) - - image_lu = tsk_cs.get_or_upload_image_lu( - self._tier, self._get_image_name(image_meta), - self._any_vios_uuid(), disk_drv.IterableToFileAdapter( - IMAGE_API.download(context, image_meta.id)), - image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM) - - boot_lu_name = self._get_disk_name(image_type, instance) - LOG.info('SSP: Disk name is %s', boot_lu_name, instance=instance) - - return tsk_stg.crt_lu( - self._tier, boot_lu_name, instance.flavor.root_gb, - typ=pvm_stg.LUType.DISK, clone=image_lu)[1] - - def get_disk_ref(self, instance, disk_type): - """Returns a reference to the disk for the instance.""" - lu_name = self._get_disk_name(disk_type, instance) - return pvm_stg.LUEnt.search( - self.adapter, parent=self._tier, name=lu_name, - lu_type=pvm_stg.LUType.DISK, one_result=True) - - def connect_disk(self, instance, disk_info, stg_ftsk=None): - """Connects the disk image to the Virtual Machine. - - :param instance: nova instance to connect the disk to. - :param disk_info: The pypowervm storage element returned from - create_disk_from_image. Ex. VOptMedia, VDisk, LU, - or PV. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when this method is executed. - """ - if stg_ftsk is None: - stg_ftsk = tsk_par.build_active_vio_feed_task( - self.adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP]) - - # Create the LU structure - lu = pvm_stg.LU.bld_ref(self.adapter, disk_info.name, disk_info.udid) - lpar_uuid = vm.get_pvm_uuid(instance) - - # This is the delay apply mapping - def add_func(vios_w): - LOG.info("Adding SSP disk connection to VIOS %(vios)s.", - {'vios': vios_w.name}, instance=instance) - mapping = tsk_map.build_vscsi_mapping( - self.host_uuid, vios_w, lpar_uuid, lu) - return tsk_map.add_map(vios_w, mapping) - - # Add the mapping to *each* VIOS on the LPAR's host. - # The LPAR's host has to be self.host_uuid, else the PowerVM API will - # fail. - # - # Note - this may not be all the VIOSes on the system...just the ones - # in the SSP cluster. - for vios_uuid in self.vios_uuids: - stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func) - - # If the FeedTask was built locally, then run it immediately - if stg_ftsk.name == 'ssp': - stg_ftsk.execute() - - def extend_disk(self, instance, disk_info, size): - """Extends the disk. - - :param instance: instance to extend the disk for. - :param disk_info: dictionary with disk info. - :param size: the new size in gb. - """ - raise NotImplementedError() - - def check_instance_shared_storage_local(self, context, instance): - """Check if instance files located on shared storage. - - This runs check on the destination host, and then calls - back to the source host to check the results. - - :param context: security context - :param instance: nova.objects.instance.Instance object - """ - - # Get the SSP unique id and use that for the data to pass - return {'ssp_uuid': self._cluster.ssp_uuid} - - def check_instance_shared_storage_remote(self, context, data): - """Check if instance files located on shared storage. - - :param context: security context - :param data: result of check_instance_shared_storage_local - """ - - # Check the data passed and see if we're in the same SSP - try: - if data: - ssp_uuid = data.get('ssp_uuid') - if ssp_uuid is not None: - return ssp_uuid == self._cluster.ssp_uuid - except Exception: - LOG.exception('Error checking for shared storage.') - return False - - def check_instance_shared_storage_cleanup(self, context, data): - """Do cleanup on host after check_instance_shared_storage calls - - :param context: security context - :param data: result of check_instance_shared_storage_local - """ - - # Nothing to cleanup since we just use the SSP UUID - pass - - def _fetch_cluster(self, clust_name): - """Bootstrap fetch the Cluster associated with the configured name. - - :param clust_name: The cluster_name from the config, used to perform a - search query. If '' or None (no cluster_name was specified in the - config), we query all clusters on the host and, if exactly one is - found, we use it. - :return: The Cluster EntryWrapper. - :raise ClusterNotFoundByName: If clust_name was nonempty but no such - Cluster was found on the host. - :raise TooManyClustersFound: If clust_name was nonempty but matched - more than one Cluster on the host. - :raise NoConfigNoClusterFound: If clust_name was empty and no Cluster - was found on the host. - :raise NoConfigTooManyClusters: If clust_name was empty, but more than - one Cluster was found on the host. - """ - try: - # Did config provide a name? - if clust_name: - # Search returns a list of wrappers - wraps = pvm_clust.Cluster.search(self.adapter, name=clust_name) - if len(wraps) == 0: - raise npvmex.ClusterNotFoundByName(clust_name=clust_name) - if len(wraps) > 1: - raise npvmex.TooManyClustersFound(clust_count=len(wraps), - clust_name=clust_name) - else: - # Otherwise, pull the entire feed of Clusters and, if - # exactly one result, use it. - wraps = pvm_clust.Cluster.get(self.adapter) - if len(wraps) == 0: - raise npvmex.NoConfigNoClusterFound() - if len(wraps) > 1: - raise npvmex.NoConfigTooManyClusters( - clust_count=len(wraps)) - clust_wrap = wraps[0] - except Exception: - with excutils.save_and_reraise_exception(logger=LOG): - LOG.exception("PowerVM error fetching cluster.") - return clust_wrap - - def _refresh_cluster(self): - """Refetch the Cluster from the host. - - This should be necessary only when the node list is needed and may have - changed. - - :return: The refreshed self._cluster. - """ - # TODO(IBM): If the Cluster doesn't exist when the driver is loaded, we - # raise one of the custom exceptions; but if it gets removed at some - # point while live, we'll (re)raise the 404 HttpError from the REST - # API. Do we need a crisper way to distinguish these two scenarios? - # Do we want to trap the 404 and raise a custom "ClusterVanished"? - self._cluster = self._cluster.refresh() - return self._cluster - - @property - def _ssp(self): - """Fetch or refresh the SSP corresponding to the Cluster. - - This must be invoked after a successful _fetch_cluster. - - :return: The fetched or refreshed SSP EntryWrapper. - """ - # TODO(IBM): Smarter refreshing (i.e. don't do it every time). - if getattr(self, '_ssp_wrap', None) is None: - self._ssp_wrap = pvm_stg.SSP.get_by_href(self.adapter, - self._cluster.ssp_uri) - else: - self._ssp_wrap = self._ssp_wrap.refresh() - return self._ssp_wrap - - @property - def _tier(self): - """(Cache and) return the Tier corresponding to the SSP. - - This must be invoked after _ssp has primed _ssp_wrap. - - If a value is already cached, it is NOT refreshed before it is - returned. The caller may refresh it via the refresh() method. - - :return: Tier EntryWrapper representing the default Tier on the - configured Shared Storage Pool. - """ - if getattr(self, '_tier_wrap', None) is None: - self._tier_wrap = tsk_stg.default_tier_for_ssp(self._ssp_wrap) - return self._tier_wrap - - @property - def vios_uuids(self): - """List the UUIDs of our cluster's VIOSes on this host. - - (If a VIOS is not on this host, we can't interact with it, even if its - URI and therefore its UUID happen to be available in the pypowervm - wrapper.) - - :return: A list of VIOS UUID strings. - """ - ret = [] - for n in self._cluster.nodes: - # Skip any nodes that we don't have the vios uuid or uri - if not (n.vios_uuid and n.vios_uri): - continue - node_host_uuid = pvm_u.get_req_path_uuid( - n.vios_uri, preserve_case=True, root=True) - if self.host_uuid != node_host_uuid: - continue - ret.append(n.vios_uuid) - return ret - - def _any_vios_uuid(self): - """Pick one of the Cluster's VIOSes and return its UUID. - - Use when it doesn't matter which VIOS an operation is invoked against. - Currently picks at random; may later be changed to use round-robin. - - :return: A single VIOS UUID string. - """ - return random.choice(self.vios_uuids) - - @staticmethod - def _disk_match_func(disk_type, instance): - """Return a matching function to locate the disk for an instance. - - :param disk_type: One of the DiskType enum values. - :param instance: The instance whose disk is to be found. - :return: Callable suitable for the match_func parameter of the - pypowervm.tasks.scsi_mapper.find_maps method. - """ - disk_name = SSPDiskAdapter._get_disk_name(disk_type, instance) - return tsk_map.gen_match_func(pvm_stg.LU, names=[disk_name]) diff --git a/nova_powervm/virt/powervm/driver.py b/nova_powervm/virt/powervm/driver.py deleted file mode 100644 index 9d2a31f6..00000000 --- a/nova_powervm/virt/powervm/driver.py +++ /dev/null @@ -1,1920 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import block_device -from nova.compute import task_states -from nova.compute import utils as compute_utils -from nova.compute import vm_states -from nova.console import type as console_type -from nova import context as ctx -from nova import exception -from nova import image -from nova import objects -from nova import utils as n_utils -from nova.virt import configdrive -from nova.virt import driver -import os_resource_classes as orc -from oslo_log import log as logging -from oslo_utils import importutils -import six -from taskflow.patterns import linear_flow as tf_lf -import time - -from pypowervm import adapter as pvm_apt -from pypowervm import const as pvm_const -from pypowervm import exceptions as pvm_exc -from pypowervm.helpers import log_helper as log_hlp -from pypowervm.helpers import vios_busy as vio_hlp -from pypowervm.tasks import cna as pvm_cna -from pypowervm.tasks import memory as pvm_mem -from pypowervm.tasks.monitor import host_cpu as pvm_hcpu -from pypowervm.tasks import partition as pvm_par -from pypowervm.tasks import power_opts as pvm_popts -from pypowervm.tasks import scsi_mapper as pvm_smap -from pypowervm.tasks import storage as pvm_stor -from pypowervm.tasks import vterm as pvm_vterm -from pypowervm import util as pvm_util -from pypowervm.wrappers import managed_system as pvm_ms - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm.disk import driver as disk_dvr -from nova_powervm.virt.powervm import event -from nova_powervm.virt.powervm import host as pvm_host -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm import image as img -from nova_powervm.virt.powervm import live_migration as lpm -from nova_powervm.virt.powervm import media -from nova_powervm.virt.powervm.nvram import manager as nvram_manager -from nova_powervm.virt.powervm import slot -from nova_powervm.virt.powervm.tasks import base as tf_base -from nova_powervm.virt.powervm.tasks import image as tf_img -from nova_powervm.virt.powervm.tasks import network as tf_net -from nova_powervm.virt.powervm.tasks import slot as tf_slot -from nova_powervm.virt.powervm.tasks import storage as tf_stg -from nova_powervm.virt.powervm.tasks import vm as tf_vm -from nova_powervm.virt.powervm import vm -from nova_powervm.virt.powervm import volume as vol_attach -from nova_powervm.virt.powervm.volume import iscsi - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -DISK_ADPT_NS = 'nova_powervm.virt.powervm.disk' -DISK_ADPT_MAPPINGS = { - 'localdisk': 'localdisk.LocalStorage', - 'ssp': 'ssp.SSPDiskAdapter' -} -# NVRAM store APIs for the NVRAM manager to use -NVRAM_NS = 'nova_powervm.virt.powervm.nvram.' -NVRAM_APIS = { - 'swift': 'swift.SwiftNvramStore', -} - -KEEP_NVRAM_STATES = {vm_states.SHELVED, } -FETCH_NVRAM_STATES = {vm_states.SHELVED, vm_states.SHELVED_OFFLOADED} - - -class PowerVMDriver(driver.ComputeDriver): - - """PowerVM Implementation of Compute Driver.""" - - def __init__(self, virtapi): - self.capabilities = { - # NOTE(edmondsw): 'has_imagecache' will be set dynamically - "supports_migrate_to_same_host": False, - "supports_attach_interface": True, - "supports_device_tagging": False, - "supports_tagged_attach_interface": False, - "supports_tagged_attach_volume": False, - "supports_extend_volume": True, - "supports_multiattach": False, - "supports_evacuate": True, - "supports_trusted_certs": False, - - # Supported image types - "supports_image_type_aki": False, - "supports_image_type_ami": False, - "supports_image_type_ari": False, - "supports_image_type_iso": False, - "supports_image_type_qcow2": False, - "supports_image_type_raw": True, - "supports_image_type_vdi": False, - "supports_image_type_vhd": False, - "supports_image_type_vhdx": False, - "supports_image_type_vmdk": False, - - } - super(PowerVMDriver, self).__init__(virtapi) - - def init_host(self, host): - """Initialize anything that is necessary for the driver to function. - - Includes catching up with currently running VM's on the given host. - """ - - # Live migrations - self.live_migrations = {} - # Set the nvram mgr to None so events are not handled until it's setup - self.nvram_mgr = None - self.store_api = None - # Get an adapter - self._get_adapter() - # First need to resolve the managed host UUID - self._get_host_uuid() - - # Make sure the Virtual I/O Server(s) are available. - pvm_par.validate_vios_ready(self.adapter) - - # Do a scrub of the I/O plane to make sure the system is in good shape - LOG.info("Clearing stale I/O connections on driver init.") - pvm_stor.ComprehensiveScrub(self.adapter).execute() - - # Initialize the disk adapter. Sets self.disk_dvr - self._setup_disk_adapter() - self.image_api = image.API() - - self._setup_rebuild_store() - - # Init Host CPU Statistics - self.host_cpu_cache = pvm_hcpu.HostCPUMetricCache(self.adapter, - self.host_uuid) - - # Cache for instance overhead. - # Key: max_mem (int MB) - # Value: overhead (int MB) - self._inst_overhead_cache = {} - - # Clean-up any orphan adapters - self._cleanup_orphan_adapters(CONF.powervm.pvm_vswitch_for_novalink_io) - - LOG.info("The compute driver has been initialized.") - - def cleanup_host(self, host): - """Clean up anything that is necessary for the driver gracefully stop. - - Includes ending remote sessions. This is optional. - """ - # Stop listening for events - try: - self.session.get_event_listener().shutdown() - except Exception: - pass - - LOG.info("The compute driver has been shutdown.") - - def _get_adapter(self): - # Build the adapter. May need to attempt the connection multiple times - # in case the REST server is starting. - self.session = pvm_apt.Session(conn_tries=300) - self.adapter = pvm_apt.Adapter( - self.session, helpers=[log_hlp.log_helper, - vio_hlp.vios_busy_retry_helper]) - # Register the event handler - eh = event.PowerVMNovaEventHandler(self) - self.session.get_event_listener().subscribe(eh) - - def _setup_disk_adapter(self): - """Set up the nova ephemeral disk adapter.""" - self.disk_dvr = importutils.import_object_ns( - DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()], - self.adapter, self.host_uuid) - has_imagecache = self.disk_dvr.capabilities['has_imagecache'] - self.capabilities['has_imagecache'] = has_imagecache - - def manage_image_cache(self, context, all_instances): - self.disk_dvr.manage_image_cache(context, all_instances) - - def _setup_rebuild_store(self): - """Set up the store for remote restart objects.""" - store = CONF.powervm.nvram_store.lower() - if store != 'none': - self.store_api = importutils.import_object( - NVRAM_NS + NVRAM_APIS[store]) - # Events will be handled once the nvram_mgr is set. - self.nvram_mgr = nvram_manager.NvramManager( - self.store_api, self.adapter, self.host_uuid) - # Do host startup for NVRAM for existing VMs on the host - n_utils.spawn(self._nvram_host_startup) - - def _nvram_host_startup(self): - """NVRAM Startup. - - When the compute node starts up, it's not known if any NVRAM events - were missed when the compute process was not running. During startup - put each LPAR on the queue to be updated, just incase. - """ - for lpar_w in vm.get_lpars(self.adapter): - # Find the instance for the LPAR. - inst = vm.get_instance(ctx.get_admin_context(), lpar_w.uuid) - if inst is not None and inst.host == CONF.host: - self.nvram_mgr.store(inst) - time.sleep(0) - - def _get_host_uuid(self): - """Get the System wrapper and its UUID for the (single) host.""" - syswraps = pvm_ms.System.get(self.adapter) - if len(syswraps) != 1: - raise Exception( - _("Expected exactly one host; found %d"), len(syswraps)) - self.host_wrapper = syswraps[0] - self.host_uuid = self.host_wrapper.uuid - LOG.info("Host UUID is:%s", self.host_uuid) - - @staticmethod - def _log_operation(op, instance): - """Log entry point of driver operations.""" - LOG.info('Operation: %(op)s. Virtual machine display name: ' - '%(display_name)s, name: %(name)s', - {'op': op, 'display_name': instance.display_name, - 'name': instance.name}, instance=instance) - - def get_info(self, instance, use_cache=True): - """Get the current status of an instance. - - :param instance: nova.objects.instance.Instance object - :param use_cache: unused in this driver - :returns: An InstanceInfo object - """ - return vm.get_vm_info(self.adapter, instance) - - def instance_exists(self, instance): - """Checks existence of an instance on the host. - - :param instance: The instance to lookup - - Returns True if an instance with the supplied ID exists on - the host, False otherwise. - """ - return vm.instance_exists(self.adapter, instance) - - def estimate_instance_overhead(self, instance_info): - """Estimate the virtualization overhead required to build an instance. - - Defaults to zero, Per-instance overhead calculations are desired. - - :param instance_info: Instance/flavor to calculate overhead for. - It can be Instance or Flavor object or a simple dict. The dict is - expected to contain: - { 'memory_mb': , 'extra_specs': {'powervm:max_mem': }} - Values not found will default to zero. - :return: Dict of estimated overhead values {'memory_mb': overhead} - """ - # Check if input passed is an object instance then extract Flavor - if isinstance(instance_info, objects.Instance): - instance_info = instance_info.get_flavor() - # If the instance info passed is dict then create Flavor object. - elif isinstance(instance_info, dict): - instance_info = objects.Flavor(**instance_info) - - max_mem = 0 - overhead = 0 - try: - cur_mem = instance_info.memory_mb - if hasattr(instance_info, 'extra_specs'): - if 'powervm:max_mem' in instance_info.extra_specs.keys(): - mem = instance_info.extra_specs.get('powervm:max_mem', - max_mem) - max_mem = int(mem) - - max_mem = max(cur_mem, max_mem) - if max_mem in self._inst_overhead_cache: - overhead = self._inst_overhead_cache[max_mem] - else: - overhead, avail = pvm_mem.calculate_memory_overhead_on_host( - self.adapter, self.host_uuid, {'max_mem': max_mem}) - self._inst_overhead_cache[max_mem] = overhead - - except Exception: - LOG.exception("PowerVM error estimating instance overhead.") - finally: - return {'memory_mb': overhead} - - def list_instances(self): - """Return the names of all the instances known to the virt host. - - :return: VM Names as a list. - """ - lpar_list = vm.get_lpar_names(self.adapter) - return lpar_list - - def get_host_cpu_stats(self): - """Return the current CPU state of the host.""" - self.host_cpu_cache.refresh() - total_cycles = self.host_cpu_cache.total_cycles - total_user_cycles = self.host_cpu_cache.total_user_cycles - total_fw_cycles = self.host_cpu_cache.total_fw_cycles - return { - 'kernel': self.host_cpu_cache.total_fw_cycles, - 'user': self.host_cpu_cache.total_user_cycles, - 'idle': (total_cycles - total_user_cycles - total_fw_cycles), - # Not reported by PowerVM - 'iowait': 0, - 'frequency': self.host_cpu_cache.cpu_freq} - - def instance_on_disk(self, instance): - """Checks access of instance files on the host. - - :param instance: nova.objects.instance.Instance to lookup - - Returns True if files of an instance with the supplied ID accessible on - the host, False otherwise. - - .. note:: - Used in rebuild for HA implementation and required for validation - of access to instance shared disk files - """ - - # If the instance is booted from volume then we shouldn't - # really care if instance "disks" are on shared storage. - context = ctx.get_admin_context() - block_device_info = self._get_block_device_info(context, instance) - if self._is_booted_from_volume(block_device_info): - LOG.debug('Instance booted from volume.', instance=instance) - return True - - # If configured for shared storage, see if we can find the disks - if self.disk_dvr.capabilities['shared_storage']: - LOG.debug('Looking for instance disks on shared storage.', - instance=instance) - # Try to get a reference to the disk - try: - if self.disk_dvr.get_disk_ref(instance, - disk_dvr.DiskType.BOOT): - LOG.debug('Disks found on shared storage.', - instance=instance) - return True - except Exception: - LOG.exception("PowerVM error checking instance on disk.", - instance=instance) - - LOG.debug('Instance disks not found on this host.', instance=instance) - return False - - def spawn(self, context, instance, image_meta, injected_files, - admin_password, allocations, network_info=None, - block_device_info=None): - """Create a new instance/VM/domain on the virtualization platform. - - Once this successfully completes, the instance should be - running (power_state.RUNNING). - - If this fails, any partial instance should be completely - cleaned up, and the virtualization platform should be in the state - that it was before this call began. - - :param context: security context - :param instance: nova.objects.instance.Instance - This function should use the data there to guide - the creation of the new instance. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param injected_files: User files to inject into instance. - :param admin_password: Administrator password to set in instance. - :param allocations: Information about resources allocated to the - instance via placement, of the form returned by - SchedulerReportClient.get_allocations_for_consumer. - :param network_info: instance network information - :param block_device_info: Information about block devices to be - attached to the instance. - """ - self._log_operation('spawn', instance) - - # Extract the block devices. - bdms = self._extract_bdm(block_device_info) - - # Define the flow - flow_spawn = tf_lf.Flow("spawn") - - # Determine if this is a VM recreate - task_state = instance.task_state - rebuild_spawning = task_states.REBUILD_SPAWNING - recreate = (task_state == rebuild_spawning and 'id' not in image_meta) - - # Create the transaction manager (FeedTask) for Storage I/O. - xag = self._get_inst_xag(instance, bdms, recreate=recreate) - stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter, xag=xag) - - # Build the PowerVM Slot lookup map. Only the recreate action needs - # the volume driver iterator (to look up volumes and their client - # mappings). - vol_drv_iter = (self._vol_drv_iter(instance, bdms, stg_ftsk=stg_ftsk) - if recreate else None) - slot_mgr = slot.build_slot_mgr( - instance, self.store_api, adapter=self.adapter, - vol_drv_iter=vol_drv_iter) - - # Create the LPAR, check if NVRAM restore is needed. - vm_state = instance.vm_state - if self.nvram_mgr and (recreate or vm_state in FETCH_NVRAM_STATES): - nvram_mgr = self.nvram_mgr - else: - nvram_mgr = None - - # If we're recreating pass None in for the FeedTask. This will make the - # Create task produce a FeedTask that will be used to scrub stale - # adapters immediately after the LPAR is created. - flow_spawn.add(tf_vm.Create( - self.adapter, self.host_wrapper, instance, - stg_ftsk=(None if recreate else stg_ftsk), nvram_mgr=nvram_mgr, - slot_mgr=slot_mgr)) - - # Create a flow for the IO - flow_spawn.add(tf_net.PlugVifs( - self.virtapi, self.adapter, instance, network_info, - self.host_uuid, slot_mgr)) - flow_spawn.add(tf_net.PlugMgmtVif( - self.adapter, instance, self.host_uuid, slot_mgr)) - - # Only add the image disk if this is from Glance. - if not self._is_booted_from_volume(block_device_info): - - # If a recreate, just hookup the existing disk on shared storage. - if recreate: - flow_spawn.add(tf_stg.FindDisk( - self.disk_dvr, context, instance, disk_dvr.DiskType.BOOT)) - else: - # Creates the boot image. - flow_spawn.add(tf_stg.CreateDiskForImg( - self.disk_dvr, context, instance, image_meta)) - # Connects up the disk to the LPAR - flow_spawn.add(tf_stg.ConnectDisk( - self.disk_dvr, instance, stg_ftsk=stg_ftsk)) - - # Determine if there are volumes to connect. If so, add a connection - # for each type. - self._add_volume_connection_tasks( - context, instance, bdms, flow_spawn, stg_ftsk, slot_mgr) - - # If the config drive is needed, add those steps. Should be done - # after all the other I/O. - if configdrive.required_by(instance) and not recreate: - flow_spawn.add(tf_stg.CreateAndConnectCfgDrive( - self.adapter, instance, injected_files, - network_info, admin_password, stg_ftsk=stg_ftsk)) - - # Add the transaction manager flow to the end of the 'I/O - # connection' tasks. This will run all the connections in parallel. - flow_spawn.add(stg_ftsk) - - # Update load source of IBMi VM - distro = instance.system_metadata.get('image_os_distro', '') - if distro.lower() == img.OSDistro.OS400: - boot_type = self._get_boot_connectivity_type( - bdms, block_device_info) - flow_spawn.add(tf_vm.UpdateIBMiSettings( - self.adapter, instance, boot_type)) - - # Save the slot map information - flow_spawn.add(tf_slot.SaveSlotStore(instance, slot_mgr)) - - pwr_opts = pvm_popts.PowerOnOpts() - if CONF.powervm.remove_vopt_media_on_boot: - media_name = media.ConfigDrivePowerVM.get_cfg_drv_name(instance) - pwr_opts.remove_optical( - media_name, time=CONF.powervm.remove_vopt_media_time) - - # Last step is to power on the system. - flow_spawn.add(tf_vm.PowerOn(self.adapter, instance, - pwr_opts=pwr_opts)) - - # Run the flow. - tf_base.run(flow_spawn, instance=instance) - - def _add_volume_connection_tasks(self, context, instance, bdms, - flow, stg_ftsk, slot_mgr): - """Determine if there are volumes to connect to this instance. - - If there are volumes to connect to this instance add a task to the - flow for each volume. - - :param context: security context. - :param instance: Instance object as returned by DB layer. - :param bdms: block device mappings. - :param flow: the flow to add the tasks to. - :param stg_ftsk: the storage task flow. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is attached to a VM. - """ - for bdm, vol_drv in self._vol_drv_iter(instance, bdms, - stg_ftsk=stg_ftsk): - # First connect the volume. This will update the - # connection_info. - flow.add(tf_stg.ConnectVolume(vol_drv, slot_mgr)) - - # Save the BDM so that the updated connection info is - # persisted. - flow.add(tf_stg.SaveBDM(bdm, instance)) - - def _add_volume_disconnection_tasks(self, context, instance, bdms, - flow, stg_ftsk, slot_mgr): - """Determine if there are volumes to disconnect from this instance. - - If there are volumes to disconnect from this instance add a task to the - flow for each volume. - - :param context: security context. - :param instance: Instance object as returned by DB layer. - :param bdms: block device mappings. - :param flow: the flow to add the tasks to. - :param stg_ftsk: the storage task flow. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is detached from a VM. - """ - # TODO(thorst) Do we need to do something on the disconnect for slots? - for bdm, vol_drv in self._vol_drv_iter(instance, bdms, - stg_ftsk=stg_ftsk): - flow.add(tf_stg.DisconnectVolume(vol_drv, slot_mgr)) - - def _get_block_device_info(self, context, instance): - """Retrieves the instance's block_device_info.""" - - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - return driver.get_block_device_info(instance, bdms) - - def _is_booted_from_volume(self, block_device_info): - """Determine whether the root device is listed in block_device_info. - - If it is, this can be considered a 'boot from Cinder Volume'. - - :param block_device_info: The block device info from the compute - manager. - :return: True if the root device is in block_device_info and False if - it is not. - """ - if block_device_info is None: - return False - - root_bdm = block_device.get_root_bdm( - driver.block_device_info_get_mapping(block_device_info)) - return root_bdm is not None - - @property - def need_legacy_block_device_info(self): - return False - - def _destroy(self, context, instance, block_device_info=None, - network_info=None, destroy_disks=True, shutdown=True): - """Internal destroy method used by multiple operations. - - :param context: security context - :param instance: Instance object as returned by DB layer. - :param block_device_info: Information about block devices that should - be detached from the instance. - This can be None when destroying the original - VM during confirm resize/migration. In that - case, the storage mappings have already been - removed from the original VM, so no work to - do. - :param network_info: The network information associated with the - instance - :param destroy_disks: Indicates if disks should be destroyed - :param shutdown: Indicate whether to shutdown the VM first - """ - - def _setup_flow_and_run(): - # Extract the block devices. - bdms = self._extract_bdm(block_device_info) - - # Define the flow - flow = tf_lf.Flow("destroy") - - if shutdown: - # Power Off the LPAR. If its disks are about to be deleted, - # VSP hard shutdown it. - flow.add(tf_vm.PowerOff(self.adapter, instance, - force_immediate=destroy_disks)) - - # Create the transaction manager (FeedTask) for Storage I/O. - xag = self._get_inst_xag(instance, bdms) - stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter, - xag=xag) - - # Build the PowerVM Slot lookup map. - slot_mgr = slot.build_slot_mgr(instance, self.store_api) - - # Call the unplug VIFs task. While CNAs get removed from the LPAR - # directly on the destroy, this clears up the I/O Host side. - flow.add(tf_vm.Get(self.adapter, self.host_uuid, instance)) - flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info, - self.host_uuid, slot_mgr)) - - # Add the disconnect/deletion of the vOpt to the transaction - # manager. - if configdrive.required_by(instance): - flow.add(tf_stg.DeleteVOpt( - self.adapter, instance, stg_ftsk=stg_ftsk)) - - # Determine if there are volumes to disconnect. If so, remove each - # volume (within the transaction manager) - self._add_volume_disconnection_tasks( - context, instance, bdms, flow, stg_ftsk, slot_mgr) - - # Only detach the disk adapters if this is not a boot from volume - # since volumes are handled above. This is only for disks. - destroy_disk_task = None - if not self._is_booted_from_volume(block_device_info): - # Detach the disk storage adapters (when the stg_ftsk runs) - flow.add(tf_stg.DetachDisk( - self.disk_dvr, instance, stg_ftsk=stg_ftsk)) - - # Delete the storage disks - if destroy_disks: - destroy_disk_task = tf_stg.DeleteDisk( - self.disk_dvr, instance) - - # It's possible that volume disconnection may have failed for disks - # which had been removed from the VIOS by the storage back end - # (e.g. if we're destroying an evacuated instance). In that case, - # the storage is already gone, but we had no way to identify its - # mappings because no disk name/UDID. We now remove such mappings - # based on their association with the LPAR ID. - def _rm_vscsi_maps(vwrap): - removals = pvm_smap.remove_maps(vwrap, pvm_inst_uuid) - if removals: - LOG.warning("Removing %(num_maps)d storage-less VSCSI " - "mappings associated with LPAR ID " - "%(lpar_uuid)s from VIOS %(vios_name)s.", - {'num_maps': len(removals), - 'lpar_uuid': pvm_inst_uuid, - 'vios_name': vwrap.name}, instance=instance) - return removals - stg_ftsk.add_functor_subtask(_rm_vscsi_maps) - - # Add the transaction manager flow to the end of the 'storage - # connection' tasks. This will run all the disconnection ops - # in parallel - flow.add(stg_ftsk) - - # The disks shouldn't be destroyed until the unmappings are done. - if destroy_disk_task: - flow.add(destroy_disk_task) - - # Last step is to delete the LPAR from the system and delete - # the NVRAM from the store. - # Note: If moving to a Graph Flow, will need to change to depend on - # the prior step. - flow.add(tf_vm.Delete(self.adapter, instance)) - - if (destroy_disks and instance.vm_state not in KEEP_NVRAM_STATES): - if instance.host in [None, CONF.host]: - # If the disks are being destroyed and not one of the - # operations that we should keep the NVRAM around for, then - # it's probably safe to delete the NVRAM from the store. - flow.add(tf_vm.DeleteNvram(self.nvram_mgr, instance)) - flow.add(tf_slot.DeleteSlotStore(instance, slot_mgr)) - - # Run the flow - tf_base.run(flow, instance=instance) - - try: - pvm_inst_uuid = vm.get_pvm_uuid(instance) - _setup_flow_and_run() - except exception.InstanceNotFound: - LOG.warning('VM was not found during destroy operation.', - instance=instance) - return - except Exception as e: - LOG.exception("PowerVM error destroying instance.", - instance=instance) - # Convert to a Nova exception - raise exception.InstanceTerminationFailure( - reason=six.text_type(e)) - - def destroy(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): - """Destroy (shutdown and delete) the specified instance. - - If the instance is not found (for example if networking failed), this - function should still succeed. It's probably a good idea to log a - warning in that case. - - :param context: security context - :param instance: Instance object as returned by DB layer. - :param network_info: - :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` - :param block_device_info: Information about block devices that should - be detached from the instance. - :param destroy_disks: Indicates if disks should be destroyed - :param migrate_data: a LiveMigrateData object - """ - if instance.task_state == task_states.RESIZE_REVERTING: - LOG.info('Destroy called for migrated/resized instance.', - instance=instance) - # This destroy is part of resize or migrate. It's called to - # revert the resize/migration on the destination host. - - # Get the VM and see if we've renamed it to the resize name, - # if not delete as usual because then we know it's not the - # original VM. - pvm_inst_uuid = vm.get_pvm_uuid(instance) - vm_name = vm.get_vm_qp(self.adapter, pvm_inst_uuid, - qprop='PartitionName', log_errors=False) - if vm_name == self._gen_resize_name(instance, same_host=True): - # Since it matches it must have been a resize, don't delete it! - LOG.info('Ignoring destroy call during resize revert.', - instance=instance) - return - - # Run the destroy - self._log_operation('destroy', instance) - self._destroy( - context, instance, block_device_info=block_device_info, - network_info=network_info, destroy_disks=destroy_disks, - shutdown=True) - - def attach_volume(self, context, connection_info, instance, mountpoint, - disk_bus=None, device_type=None, encryption=None): - """Attach the volume to the instance at mountpoint using info.""" - self._log_operation('attach_volume', instance) - - # Define the flow - flow = tf_lf.Flow("attach_volume") - - # Get the LPAR Wrapper - flow.add(tf_vm.Get(self.adapter, self.host_uuid, instance)) - - # Determine if there are volumes to connect. If so, add a connection - # for each type. - slot_mgr = slot.build_slot_mgr(instance, self.store_api) - vol_drv = vol_attach.build_volume_driver( - self.adapter, self.host_uuid, instance, connection_info) - flow.add(tf_stg.ConnectVolume(vol_drv, slot_mgr)) - - # Save the new slot info - flow.add(tf_slot.SaveSlotStore(instance, slot_mgr)) - - # Run the flow - tf_base.run(flow, instance=instance) - - # The volume connector may have updated the system metadata. Save - # the instance to persist the data. Spawn/destroy auto saves instance, - # but the attach does not. Detach does not need this save - as the - # detach flows do not (currently) modify system metadata. May need - # to revise in the future as volume connectors evolve. - instance.save() - - def extend_volume(self, connection_info, instance, requested_size): - """Extend the disk attached to the instance. - - :param dict connection_info: The connection for the extended volume. - :param nova.objects.instance.Instance instance: - The instance whose volume gets extended. - :param int requested_size: The requested new volume size in bytes. This - parameter is unused by this driver. - :return: None - """ - vol_drv = vol_attach.build_volume_driver( - self.adapter, self.host_uuid, instance, connection_info) - vol_drv.extend_volume() - - def detach_volume(self, context, connection_info, instance, mountpoint, - encryption=None): - """Detach the volume attached to the instance.""" - self._log_operation('detach_volume', instance) - - # Define the flow - flow = tf_lf.Flow("detach_volume") - - # Get a volume adapter for this volume - vol_drv = vol_attach.build_volume_driver( - self.adapter, self.host_uuid, instance, connection_info) - - # Before attempting to detach a volume, ensure the instance exists - # If a live migration fails, the compute manager will call detach - # for each volume attached to the instance, against the destination - # host. If the migration failed, then the VM is probably not on - # the destination host. - if not vm.instance_exists(self.adapter, instance): - LOG.info('During volume detach, the instance was not found on ' - 'this host.', instance=instance) - - # Check if there is live migration cleanup to do on this volume. - mig = self.live_migrations.get(instance.uuid, None) - if mig is not None and isinstance(mig, lpm.LiveMigrationDest): - mig.cleanup_volume(vol_drv) - return - - # Add a task to detach the volume - slot_mgr = slot.build_slot_mgr(instance, self.store_api) - flow.add(tf_stg.DisconnectVolume(vol_drv, slot_mgr)) - - # Save the new slot info - flow.add(tf_slot.SaveSlotStore(instance, slot_mgr)) - - # Run the flow - tf_base.run(flow, instance=instance) - - def snapshot(self, context, instance, image_id, update_task_state): - """Snapshots the specified instance. - - :param context: security context - :param instance: nova.objects.instance.Instance - :param image_id: Reference to a pre-created image that will hold the - snapshot. - :param update_task_state: Callback function to update the task_state - on the instance while the snapshot operation progresses. The - function takes a task_state argument and an optional - expected_task_state kwarg which defaults to - nova.compute.task_states.IMAGE_SNAPSHOT. See - nova.objects.instance.Instance.save for expected_task_state usage. - """ - if not self.disk_dvr.capabilities.get('snapshot'): - raise exception.NotSupportedWithOption( - message=_("The snapshot operation is not supported in " - "conjunction with a [powervm]/disk_driver setting " - "of %s.") % CONF.powervm.disk_driver) - self._log_operation('snapshot', instance) - - # Define the flow - flow = tf_lf.Flow("snapshot") - - # Notify that we're starting the process - flow.add(tf_img.UpdateTaskState(update_task_state, - task_states.IMAGE_PENDING_UPLOAD)) - - # Connect the instance's boot disk to the management partition, and - # scan the scsi bus and bring the device into the management partition. - flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance)) - - # Notify that the upload is in progress - flow.add(tf_img.UpdateTaskState( - update_task_state, task_states.IMAGE_UPLOADING, - expected_state=task_states.IMAGE_PENDING_UPLOAD)) - - # Stream the disk to glance - flow.add(tf_img.StreamToGlance(context, self.image_api, image_id, - instance)) - - # Disconnect the boot disk from the management partition and delete the - # device - flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance)) - - # Run the flow - tf_base.run(flow, instance=instance) - - def rescue(self, context, instance, network_info, image_meta, - rescue_password): - """Rescue the specified instance. - - :param nova.context.RequestContext context: - The context for the rescue. - :param nova.objects.instance.Instance instance: - The instance being rescued. - :param nova.network.model.NetworkInfo network_info: - Necessary network information for the resume. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param rescue_password: new root password to set for rescue. - """ - self._log_operation('rescue', instance) - - # Define the flow - flow = tf_lf.Flow("rescue") - - # Power Off the LPAR - flow.add(tf_vm.PowerOff(self.adapter, instance)) - - # Create and populate the rescue disk. - flow.add(tf_stg.CreateDiskForImg( - self.disk_dvr, context, instance, image_meta, - image_type=disk_dvr.DiskType.RESCUE)) - - # Connect up the disk to the LPAR - flow.add(tf_stg.ConnectDisk(self.disk_dvr, instance)) - - # Last step is to power on the system. - flow.add(tf_vm.PowerOn( - self.adapter, instance, - pwr_opts=pvm_popts.PowerOnOpts().bootmode(pvm_popts.BootMode.SMS))) - - # Run the flow - tf_base.run(flow, instance=instance) - - def unrescue(self, instance, network_info): - """Unrescue the specified instance. - - :param instance: nova.objects.instance.Instance - """ - self._log_operation('unrescue', instance) - - # Define the flow - flow = tf_lf.Flow("unrescue") - - # Power Off the LPAR - flow.add(tf_vm.PowerOff(self.adapter, instance)) - - # Detach the disk adapter for the rescue image - flow.add(tf_stg.DetachDisk( - self.disk_dvr, instance, disk_type=[disk_dvr.DiskType.RESCUE])) - - # Delete the storage disk for the rescue image - flow.add(tf_stg.DeleteDisk(self.disk_dvr, instance)) - - # Last step is to power on the system. - flow.add(tf_vm.PowerOn(self.adapter, instance)) - - # Run the flow - tf_base.run(flow, instance=instance) - - def power_off(self, instance, timeout=0, retry_interval=0): - """Power off the specified instance. - - :param instance: nova.objects.instance.Instance - :param timeout: time to wait for GuestOS to shutdown - :param retry_interval: How often to signal guest while - waiting for it to shutdown - """ - self._log_operation('power_off', instance) - force_immediate = (timeout == 0) - timeout = timeout or None - vm.power_off(self.adapter, instance, force_immediate=force_immediate, - timeout=timeout) - - def power_on(self, context, instance, network_info, - block_device_info=None): - """Power on the specified instance. - - :param instance: nova.objects.instance.Instance - """ - self._log_operation('power_on', instance) - vm.power_on(self.adapter, instance) - - def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): - """Reboot the specified instance. - - After this is called successfully, the instance's state - goes back to power_state.RUNNING. The virtualization - platform should ensure that the reboot action has completed - successfully even in cases in which the underlying domain/vm - is paused or halted/stopped. - - :param instance: nova.objects.instance.Instance - :param network_info: - :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` - :param reboot_type: Either a HARD or SOFT reboot - :param block_device_info: Info pertaining to attached volumes - :param bad_volumes_callback: Function to handle any bad volumes - encountered - """ - self._log_operation(reboot_type + ' reboot', instance) - vm.reboot(self.adapter, instance, reboot_type == 'HARD') - # pypowervm exceptions are sufficient to indicate real failure. - # Otherwise, pypowervm thinks the instance is up. - return True - - def get_available_resource(self, nodename): - """Retrieve resource information. - - This method is called when nova-compute launches, and - as part of a periodic task - - :param nodename: - node which the caller want to get resources from - a driver that manages only one node can safely ignore this - :return: Dictionary describing resources - """ - # Do this here so it refreshes each time this method is called. - self.host_wrapper = pvm_ms.System.get(self.adapter)[0] - return self._get_available_resource() - - def _get_available_resource(self): - # Get host information - data = pvm_host.build_host_resource_from_ms(self.host_wrapper) - - # Add the disk information - data["local_gb"] = self.disk_dvr.capacity - data["local_gb_used"] = self.disk_dvr.capacity_used - - return data - - def update_provider_tree(self, provider_tree, nodename, allocations=None): - """Update a ProviderTree with current provider and inventory data. - - :param nova.compute.provider_tree.ProviderTree provider_tree: - A nova.compute.provider_tree.ProviderTree object representing all - the providers in the tree associated with the compute node, and any - sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE`` - trait) associated via aggregate with any of those providers (but - not *their* tree- or aggregate-associated providers), as currently - known by placement. - :param nodename: - String name of the compute node (i.e. - ComputeNode.hypervisor_hostname) for which the caller is requesting - updated provider information. - :param allocations: Currently ignored by this driver. - """ - # Get (legacy) resource information. Same as get_available_resource, - # but we don't need to refresh self.host_wrapper as it was *just* - # refreshed by get_available_resource in the resource tracker's - # update_available_resource flow. - data = self._get_available_resource() - - # TODO(efried): Fix these to reflect something like reality - # For now, duplicate the logic the resource tracker uses via - # update_compute_node when get_inventory/update_provider_tree is not - # implemented. - cpu_alloc_ratio = CONF.cpu_allocation_ratio or 16.0 - cpu_reserved = CONF.reserved_host_cpus - mem_alloc_ratio = CONF.ram_allocation_ratio or 1.5 - mem_reserved = CONF.reserved_host_memory_mb - disk_alloc_ratio = CONF.disk_allocation_ratio or 1.0 - disk_reserved = self._get_reserved_host_disk_gb_from_config() - - inventory = { - orc.VCPU: { - 'total': data['vcpus'], - 'max_unit': data['vcpus'], - 'allocation_ratio': cpu_alloc_ratio, - 'reserved': cpu_reserved, - }, - orc.MEMORY_MB: { - 'total': data['memory_mb'], - 'max_unit': data['memory_mb'], - 'allocation_ratio': mem_alloc_ratio, - 'reserved': mem_reserved, - }, - orc.DISK_GB: { - # TODO(efried): Proper DISK_GB sharing when SSP driver in play - 'total': int(data['local_gb']), - 'max_unit': int(data['local_gb']), - 'allocation_ratio': disk_alloc_ratio, - 'reserved': disk_reserved, - }, - } - provider_tree.update_inventory(nodename, inventory) - - def get_host_uptime(self): - """Returns the result of calling "uptime" on the target host.""" - # trivial implementation from libvirt/driver.py for consistency - out, err = n_utils.execute('env', 'LANG=C', 'uptime') - return out - - def attach_interface(self, context, instance, image_meta, vif): - """Attach an interface to the instance.""" - self.plug_vifs(instance, [vif]) - - def detach_interface(self, context, instance, vif): - """Detach an interface from the instance.""" - self.unplug_vifs(instance, [vif]) - - def plug_vifs(self, instance, network_info): - """Plug VIFs into networks.""" - self._log_operation('plug_vifs', instance) - - # Define the flow - flow = tf_lf.Flow("plug_vifs") - - # Get the LPAR Wrapper - flow.add(tf_vm.Get(self.adapter, self.host_uuid, instance)) - - # Run the attach - slot_mgr = slot.build_slot_mgr(instance, self.store_api) - flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance, - network_info, self.host_uuid, slot_mgr)) - - # Save the new slot info - flow.add(tf_slot.SaveSlotStore(instance, slot_mgr)) - - # Run the flow - try: - tf_base.run(flow, instance=instance) - except exception.InstanceNotFound: - raise exception.VirtualInterfacePlugException( - _("Plug vif failed because instance %s was not found.") - % instance.name) - except Exception: - LOG.exception("PowerVM error plugging vifs.", instance=instance) - raise exception.VirtualInterfacePlugException( - _("Plug vif failed because of an unexpected error.")) - - def unplug_vifs(self, instance, network_info): - """Unplug VIFs from networks.""" - self._log_operation('unplug_vifs', instance) - - # Define the flow - flow = tf_lf.Flow("unplug_vifs") - - # Get the LPAR Wrapper - flow.add(tf_vm.Get(self.adapter, self.host_uuid, instance)) - - # Run the detach - slot_mgr = slot.build_slot_mgr(instance, self.store_api) - flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info, - self.host_uuid, slot_mgr)) - - # Save the new slot info - flow.add(tf_slot.SaveSlotStore(instance, slot_mgr)) - - # Run the flow - try: - tf_base.run(flow, instance=instance) - except exception.InstanceNotFound: - LOG.warning('VM was not found during unplug operation as it is ' - 'already possibly deleted.', instance=instance) - except Exception: - LOG.exception("PowerVM error trying to unplug vifs.", - instance=instance) - raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid) - - def get_available_nodes(self, refresh=False): - """Returns nodenames of all nodes managed by the compute service. - - This method is for multi compute-nodes support. If a driver supports - multi compute-nodes, this method returns a list of nodenames managed - by the service. Otherwise, this method should return - [hypervisor_hostname]. - """ - - return [CONF.host] - - def legacy_nwinfo(self): - """Indicate if the driver requires the legacy network_info format.""" - return False - - def get_host_ip_addr(self): - """Retrieves the IP address of the Host.""" - # This code was pulled from the libvirt driver. - ips = compute_utils.get_machine_ips() - if CONF.my_ip not in ips: - LOG.warning('my_ip address (%(my_ip)s) was not found on ' - 'any of the interfaces: %(ifaces)s', - {'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)}) - return CONF.my_ip - - def get_volume_connector(self, instance): - """Get connector information for the instance for attaching to volumes. - - Connector information is a dictionary representing the ip of the - machine that will be making the connection, the name of the iscsi - initiator and the hostname of the machine as follows:: - - { - 'ip': ip, - 'initiator': initiator, - 'host': hostname - } - - """ - # Put the values in the connector - connector = {} - wwpn_list = vol_attach.get_wwpns_for_volume_connector( - self.adapter, self.host_uuid, instance) - if wwpn_list is not None: - connector["wwpns"] = wwpn_list - connector["multipath"] = CONF.powervm.volume_use_multipath - connector['host'] = vol_attach.get_hostname_for_volume(instance) - initiator_dict = iscsi.get_iscsi_initiators(self.adapter) - if initiator_dict: - connector['initiator'] = list(initiator_dict.values())[0] - return connector - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, network_info, - block_device_info=None, - timeout=0, retry_interval=0): - - disk_info = {} - - if flavor and flavor.root_gb < instance.root_gb: - raise exception.InstanceFaultRollback( - exception.ResizeError(reason=_('Cannot reduce disk size.'))) - - same_host = dest == self.get_host_ip_addr() - if same_host: - self._log_operation('resize', instance) - else: - self._log_operation('migration', instance) - - # Can't migrate the disks if they are not on shared storage - if not self._is_booted_from_volume(block_device_info): - - if not self.disk_dvr.capabilities['shared_storage']: - raise exception.InstanceFaultRollback( - exception.ResizeError( - reason=_('Cannot migrate local disks.'))) - - # Get disk info from disk driver. - disk_info = dict(disk_info, **self.disk_dvr.get_info()) - - # Define the migrate flow - flow = tf_lf.Flow("migrate_vm") - - # Power off the VM - flow.add(tf_vm.PowerOff(self.adapter, instance)) - - if not same_host: - # If VM is moving to a new host make sure the NVRAM is at the very - # latest. - flow.add(tf_vm.StoreNvram(self.nvram_mgr, instance, - immediate=True)) - if flavor.root_gb > instance.root_gb: - # Resize the root disk - flow.add(tf_stg.ExtendDisk( - self.disk_dvr, instance, dict(type='boot'), flavor.root_gb)) - - # Disconnect any volumes that are attached. They are reattached - # on the new VM (or existing VM if this is just a resize.) - # Extract the block devices. - bdms = self._extract_bdm(block_device_info) - if bdms: - # Create the transaction manager (FeedTask) for Storage I/O. - xag = self._get_inst_xag(instance, bdms) - stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter, - xag=xag) - - # Get the slot map. This is so we build the client - # adapters in the same slots. - slot_mgr = slot.build_slot_mgr( - instance, self.store_api, adapter=self.adapter, - vol_drv_iter=self._vol_drv_iter(instance, bdms, - stg_ftsk=stg_ftsk)) - - # Determine if there are volumes to disconnect. If so, remove each - # volume (within the transaction manager) - self._add_volume_disconnection_tasks(context, instance, bdms, flow, - stg_ftsk, slot_mgr) - - # Add the transaction manager flow to the end of the 'storage - # disconnection' tasks. This will run all the disconnections in - # parallel - flow.add(stg_ftsk) - - # We rename the VM to help identify if this is a resize and so it's - # easy to see the VM is being migrated from pvmctl. We use the resize - # name so we don't destroy it on a revert when it's on the same host. - new_name = self._gen_resize_name(instance, same_host=same_host) - flow.add(tf_vm.Rename(self.adapter, instance, new_name)) - try: - tf_base.run(flow, instance=instance) - except Exception as e: - raise exception.InstanceFaultRollback(e) - - return disk_info - - @staticmethod - def _gen_resize_name(instance, same_host=False): - """Generate a temporary name for the source VM being resized/migrated. - - :param instance: nova.objects.instance.Instance being migrated/resized. - :param same_host: Boolean indicating whether this resize is being - performed for the sake of a resize (True) or a - migration (False). - :return: A new name which can be assigned to the source VM. - """ - prefix = 'resize_' if same_host else 'migrate_' - return pvm_util.sanitize_partition_name_for_api(prefix + instance.name) - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - block_device_info=None, power_on=True): - """Completes a resize or cold migration. - - :param context: the context for the migration/resize - :param migration: the migrate/resize information - :param instance: nova.objects.instance.Instance being migrated/resized - :param disk_info: the newly transferred disk information - :param network_info: - :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param resize_instance: True if the instance disks are being resized, - False otherwise - :param block_device_info: instance volume block device info - :param power_on: True if the instance should be powered on, False - otherwise - """ - - # See if this was to the same host - same_host = migration.source_compute == migration.dest_compute - - if same_host: - self._log_operation('finish resize', instance) - else: - self._log_operation('finish migration', instance) - - # Ensure the disk drivers are compatible. - booted_from_vol = self._is_booted_from_volume(block_device_info) - if (not same_host and not booted_from_vol): - # Can't migrate the disks if they are not on shared storage - if not self.disk_dvr.capabilities['shared_storage']: - raise exception.InstanceFaultRollback( - exception.ResizeError( - reason=_('Cannot migrate local disks.'))) - # Call the disk driver to evaluate the disk info - reason = self.disk_dvr.validate(disk_info) - if reason: - raise exception.InstanceFaultRollback( - exception.ResizeError(reason=reason)) - - # Extract the block devices. - bdms = self._extract_bdm(block_device_info) - - # Define the flow - flow = tf_lf.Flow("finish_migration") - - # If attaching disks or volumes - if bdms or not same_host: - # Create the transaction manager (FeedTask) for Storage I/O. - xag = self._get_inst_xag(instance, bdms) - stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter, - xag=xag) - # We need the slot manager - # a) If migrating to a different host: to restore the proper slots; - # b) If adding/removing block devices, to register the slots. - slot_mgr = slot.build_slot_mgr( - instance, self.store_api, adapter=self.adapter, - vol_drv_iter=self._vol_drv_iter(instance, bdms, - stg_ftsk=stg_ftsk)) - else: - stg_ftsk = None - - if same_host: - # This is just a resize. - new_name = self._gen_resize_name(instance, same_host=True) - flow.add(tf_vm.Resize(self.adapter, self.host_wrapper, instance, - name=new_name)) - else: - # This is a migration over to another host. We have a lot of work. - # Create the LPAR - flow.add(tf_vm.Create(self.adapter, self.host_wrapper, instance, - stg_ftsk=stg_ftsk, nvram_mgr=self.nvram_mgr, - slot_mgr=slot_mgr)) - - # Create a flow for the network IO - flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance, - network_info, self.host_uuid, slot_mgr)) - flow.add(tf_net.PlugMgmtVif( - self.adapter, instance, self.host_uuid, slot_mgr)) - - # Need to attach the boot disk, if present. - if not self._is_booted_from_volume(block_device_info): - flow.add(tf_stg.FindDisk(self.disk_dvr, context, instance, - disk_dvr.DiskType.BOOT)) - - # Connects up the disk to the LPAR - # TODO(manas) Connect the disk flow into the slot lookup map - flow.add(tf_stg.ConnectDisk( - self.disk_dvr, instance, stg_ftsk=stg_ftsk)) - - if bdms: - # Determine if there are volumes to connect. If so, add a - # connection for each type. - self._add_volume_connection_tasks( - context, instance, bdms, flow, stg_ftsk, slot_mgr) - - if stg_ftsk: - # Add the transaction manager flow to the end of the 'storage - # connection' tasks to run all the connections in parallel - flow.add(stg_ftsk) - - if power_on: - flow.add(tf_vm.PowerOn(self.adapter, instance)) - - # Run the flow - try: - tf_base.run(flow, instance=instance) - except Exception as e: - raise exception.InstanceFaultRollback(e) - - def confirm_migration(self, context, migration, instance, network_info): - """Confirms a resize, destroying the source VM. - - :param migration: the migrate/resize information - :param instance: nova.objects.instance.Instance - :param network_info: - :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` - """ - # See if this was to the same host - same_host = migration.source_compute == migration.dest_compute - if same_host: - # This was a local resize, don't delete our only VM! - self._log_operation('confirm resize', instance) - vm.rename(self.adapter, instance, instance.name) - return - - # Confirming the migrate means we need to delete source VM. - self._log_operation('confirm migration', instance) - - # Destroy the old VM. - destroy_disks = not self.disk_dvr.capabilities['shared_storage'] - self._destroy(context, instance, block_device_info=None, - destroy_disks=destroy_disks, shutdown=False) - - def finish_revert_migration(self, context, instance, network_info, - block_device_info=None, power_on=True): - """Finish reverting a resize on the source host. - - :param context: the context for the finish_revert_migration - :param instance: nova.objects.instance.Instance being migrated/resized - :param network_info: - :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` - :param block_device_info: instance volume block device info - :param power_on: True if the instance should be powered on, False - otherwise - """ - self._log_operation('revert resize/migration', instance) - - # This method is always run on the source host, so we just need to - # revert the VM back to it's old sizings, if it was even changed - # at all. If it was a migration, then it wasn't changed but it - # shouldn't hurt to "update" it with the prescribed flavor. This - # makes it easy to handle both resize and migrate. - # - # The flavor should be the 'old' flavor now. - vm.power_off(self.adapter, instance) - vm.update(self.adapter, self.host_wrapper, instance) - - if power_on: - vm.power_on(self.adapter, instance) - - def ensure_filtering_rules_for_instance(self, instance, network_info): - """Setting up filtering rules and waiting for its completion. - - To migrate an instance, filtering rules to hypervisors - and firewalls are inevitable on destination host. - ( Waiting only for filtering rules to hypervisor, - since filtering rules to firewall rules can be set faster). - - Concretely, the below method must be called. - - setup_basic_filtering (for nova-basic, etc.) - - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - - to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), - so , no need to be called. - - Don't use thread for this method since migration should - not be started when setting-up filtering rules operations - are not completed. - - :param instance: nova.objects.instance.Instance object - - """ - # No op for PowerVM - pass - - def check_can_live_migrate_destination(self, context, instance, - src_compute_info, dst_compute_info, - block_migration=False, - disk_over_commit=False): - """Check if it is possible to execute live migration. - - This runs checks on the destination host, and then calls - back to the source host to check the results. - - :param context: security context - :param instance: nova.db.sqlalchemy.models.Instance - :param src_compute_info: Info about the sending machine - :param dst_compute_info: Info about the receiving machine - :param block_migration: if true, prepare for block migration - :param disk_over_commit: if true, allow disk over commit - - :returns: a dict containing migration info (hypervisor-dependent) - """ - LOG.info("Checking live migration capability on destination host.", - instance=instance) - - mig = lpm.LiveMigrationDest(self, instance) - self.live_migrations[instance.uuid] = mig - return mig.check_destination(context, src_compute_info, - dst_compute_info) - - def cleanup_live_migration_destination_check(self, context, - dest_check_data): - """Do required cleanup on dest host after check_can_live_migrate calls - - :param context: security context - :param dest_check_data: result of check_can_live_migrate_destination - """ - LOG.info("Cleaning up from checking live migration capability " - "on destination.") - - def check_can_live_migrate_source(self, context, instance, - dest_check_data, block_device_info=None): - """Check if it is possible to execute live migration. - - This checks if the live migration can succeed, based on the - results from check_can_live_migrate_destination. - - :param context: security context - :param instance: nova.db.sqlalchemy.models.Instance - :param dest_check_data: result of check_can_live_migrate_destination - :param block_device_info: result of _get_instance_block_device_info - :returns: a dict containing migration info (hypervisor-dependent) - """ - LOG.info("Checking live migration capability on source host.", - instance=instance) - mig = lpm.LiveMigrationSrc(self, instance, dest_check_data) - self.live_migrations[instance.uuid] = mig - - # Get a volume driver for each volume - vol_drvs = self._build_vol_drivers(context, instance, - block_device_info) - - return mig.check_source(context, block_device_info, vol_drvs) - - def pre_live_migration(self, context, instance, block_device_info, - network_info, disk_info, migrate_data=None): - """Prepare an instance for live migration - - :param context: security context - :param instance: nova.objects.instance.Instance object - :param block_device_info: instance block device information - :param network_info: instance network information - :param disk_info: instance disk information - :param migrate_data: a LiveMigrateData object - """ - LOG.info("Pre live migration processing.", instance=instance) - mig = self.live_migrations[instance.uuid] - - # Get a volume driver for each volume - vol_drvs = self._build_vol_drivers(context, instance, - block_device_info) - - # Run pre-live migration - return mig.pre_live_migration(context, block_device_info, network_info, - disk_info, migrate_data, vol_drvs) - - def live_migration(self, context, instance, dest, - post_method, recover_method, block_migration=False, - migrate_data=None): - """Live migration of an instance to another host. - - :param context: security context - :param instance: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :param dest: destination host - :param post_method: - post operation method. - expected nova.compute.manager._post_live_migration. - :param recover_method: - recovery method when any exception occurs. - expected nova.compute.manager._rollback_live_migration. - :param block_migration: if true, migrate VM disk. - :param migrate_data: a LiveMigrateData object - - """ - self._log_operation('live_migration', instance) - try: - mig = self.live_migrations[instance.uuid] - try: - mig.live_migration(context, migrate_data) - except pvm_exc.JobRequestTimedOut as timeout_ex: - # If the migration operation exceeds configured timeout - LOG.error("Live migration timed out. Aborting migration", - instance=instance) - mig.migration_abort() - self._migration_exception_util(context, instance, dest, - recover_method, - migrate_data, - mig, ex=timeout_ex) - except Exception as e: - LOG.exception("PowerVM error during live migration.", - instance=instance) - self._migration_exception_util(context, instance, dest, - recover_method, - migrate_data, - mig, ex=e) - - LOG.debug("Calling post live migration method.", instance=instance) - # Post method to update host in OpenStack and finish live-migration - post_method(context, instance, dest, block_migration, migrate_data) - finally: - # Remove the migration record on the source side. - del self.live_migrations[instance.uuid] - - def _migration_exception_util(self, context, instance, dest, - recover_method, migrate_data, mig, ex): - """Migration exception utility. - - :param context: security context - :param instance: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :param dest: destination host - :param recover_method: - recovery method when any exception occurs. - expected nova.compute.manager._rollback_live_migration. - :param migrate_data: a LiveMigrateData object - :param mig: live_migration object - :param ex: exception reason - - """ - LOG.warning("Rolling back live migration.", instance=instance) - try: - mig.rollback_live_migration(context) - recover_method(context, instance, dest, migrate_data=migrate_data) - except Exception: - LOG.exception("PowerVM error rolling back live migration.", - instance=instance) - - raise lpm.LiveMigrationFailed(name=instance.name, - reason=six.text_type(ex)) - - def rollback_live_migration_at_destination(self, context, instance, - network_info, - block_device_info, - destroy_disks=True, - migrate_data=None): - """Clean up destination node after a failed live migration. - - :param context: security context - :param instance: instance object that was being migrated - :param network_info: instance network information - :param block_device_info: instance block device information - :param destroy_disks: - if true, destroy disks at destination during cleanup - :param migrate_data: a LiveMigrateData object - - """ - # Run the rollback - mig = self.live_migrations[instance.uuid] - mig.rollback_live_migration_at_destination( - context, instance, network_info, block_device_info, - destroy_disks=destroy_disks, migrate_data=migrate_data) - - # Remove the active migration - del self.live_migrations[instance.uuid] - - def check_instance_shared_storage_local(self, context, instance): - """Check if instance files located on shared storage. - - This runs check on the destination host, and then calls - back to the source host to check the results. - - :param context: security context - :param instance: nova.objects.instance.Instance object - """ - # Defer to the disk driver method. - return self.disk_dvr.check_instance_shared_storage_local( - context, instance) - - def check_instance_shared_storage_remote(self, context, data): - """Check if instance files located on shared storage. - - :param context: security context - :param data: result of check_instance_shared_storage_local - """ - # Defer to the disk driver method. - return self.disk_dvr.check_instance_shared_storage_remote( - context, data) - - def check_instance_shared_storage_cleanup(self, context, data): - """Do cleanup on host after check_instance_shared_storage calls - - :param context: security context - :param data: result of check_instance_shared_storage_local - """ - # Defer to the disk driver method. - return self.disk_dvr.check_instance_shared_storage_cleanup( - context, data) - - def post_live_migration(self, context, instance, block_device_info, - migrate_data=None): - """Post operation of live migration at source host. - - :param context: security context - :instance: instance object that was migrated - :block_device_info: instance block device information - :param migrate_data: a LiveMigrateData object - """ - # Build the volume drivers - vol_drvs = self._build_vol_drivers(context, instance, - block_device_info) - - mig = self.live_migrations[instance.uuid] - mig.post_live_migration(vol_drvs, migrate_data) - - def post_live_migration_at_source(self, context, instance, network_info): - """Unplug VIFs from networks at source. - - :param context: security context - :param instance: instance object reference - :param network_info: instance network information - """ - LOG.info("Post live migration processing on source host.", - instance=instance) - mig = self.live_migrations[instance.uuid] - mig.post_live_migration_at_source(network_info) - - def post_live_migration_at_destination(self, context, instance, - network_info, - block_migration=False, - block_device_info=None): - """Post operation of live migration at destination host. - - :param context: security context - :param instance: instance object that is migrated - :param network_info: instance network information - :param block_migration: if true, post operation of block_migration. - :param block_device_info: instance block device information. - """ - LOG.info("Post live migration processing on destination host.", - instance=instance) - mig = self.live_migrations[instance.uuid] - mig.instance = instance - - bdms = self._extract_bdm(block_device_info) - # Get a volume driver iterator for volume and BDM mapping - vol_drv_iter = self._vol_drv_iter(instance, bdms) - - # Run post live migration - mig.post_live_migration_at_destination(network_info, vol_drv_iter) - del self.live_migrations[instance.uuid] - - def _vol_drv_iter(self, instance, bdms, stg_ftsk=None): - """Yields a bdm and volume driver.""" - # Get a volume driver for each volume - for bdm in bdms or []: - vol_drv = vol_attach.build_volume_driver( - self.adapter, self.host_uuid, instance, - bdm.get('connection_info'), stg_ftsk=stg_ftsk) - yield bdm, vol_drv - - def _build_vol_drivers(self, context, instance, block_device_info): - """Builds the volume connector drivers for a block device info.""" - # Get a volume driver for each volume - bdms = self._extract_bdm(block_device_info) - return [vol_drv for bdm, vol_drv in self._vol_drv_iter(instance, bdms)] - - def unfilter_instance(self, instance, network_info): - """Stop filtering instance.""" - # No op for PowerVM - pass - - @staticmethod - def _extract_bdm(block_device_info): - """Returns the block device mapping out of the block device info. - - The block device mapping is a list of instances of block device - classes from nova.virt.block_device. Each block device - represents one volume connection. - - An example string representation of a DriverVolumeBlockDevice - from the early Liberty time frame is: - {'guest_format': None, - 'boot_index': 0, - 'mount_device': u'/dev/sda', - 'connection_info': {u'driver_volume_type': u'fibre_channel', - u'serial': u'e11765ea-dd14-4aa9-a953-4fd6b4999635', - u'data': {u'initiator_target_map': - {u'21000024ff747e59': - [u'500507680220E522', - u'500507680210E522'], - u'21000024ff747e58': - [u'500507680220E522', - u'500507680210E522']}, - u'vendor': u'IBM', - u'target_discovered':False, - u'target_UID': u'600507680282...', - u'qos_specs': None, - u'volume_id': u'e11765ea-...', - u'target_lun': u'2', - u'access_mode': u'rw', - u'target_wwn': u'500507680220E522'} - }, - 'disk_bus': None, - 'device_type': u'disk', - 'delete_on_termination': True} - """ - if block_device_info is None: - return [] - return block_device_info.get('block_device_mapping', []) - - def get_vnc_console(self, context, instance): - """Get connection info for a vnc console. - - :param context: security context - :param instance: nova.objects.instance.Instance - - :return: An instance of console.type.ConsoleVNC - """ - self._log_operation('get_vnc_console', instance) - lpar_uuid = vm.get_pvm_uuid(instance) - - # Build the connection to the VNC. - host = CONF.vnc.server_proxyclient_address - use_x509_auth = CONF.powervm.vnc_use_x509_auth - ca_certs = CONF.powervm.vnc_ca_certs - server_key = CONF.powervm.vnc_server_key - server_cert = CONF.powervm.vnc_server_cert - try: - # Open up a remote vterm with the host and certificates configured - # This will only use TLS if the use_x509_auth is set to True - port = pvm_vterm.open_remotable_vnc_vterm( - self.adapter, lpar_uuid, host, vnc_path=lpar_uuid, - use_x509_auth=use_x509_auth, ca_certs=ca_certs, - server_cert=server_cert, server_key=server_key) - except pvm_exc.HttpNotFound: - raise exception.InstanceNotFound(instance_id=instance.uuid) - except pvm_exc.Error as exc: - # Otherwise wrapper the error in an exception that can be handled - LOG.exception("Unable to open console.", instance=instance) - msg = (_("VNC based terminal for instance %(instance_name)s " - "failed to open: %(exc_msg)s") - % {'instance_name': instance.name, - 'exc_msg': exc.args[0]}) - # Need to raise ConsoleTypeUnavailable with overwritten message - # because otherwise the exception will not be caught. It is - # disallowed to send a non-nova exception over the wire. - raise exception.ConsoleTypeUnavailable(msg) - - # Note that the VNC viewer will wrap the internal_access_path with - # the HTTP content. - return console_type.ConsoleVNC(host=host, port=port, - internal_access_path=lpar_uuid) - - def _get_inst_xag(self, instance, bdms, recreate=False): - """Returns the extended attributes required for a given instance. - - This is used in coordination with the FeedTask. It identifies ahead - of time what each request requires for its general operations. - - :param instance: Nova instance for which the volume adapter is needed. - :param bdms: The BDMs for the operation. - :param recreate: (Optional, Default: False) If set to true, will return - all of the storage XAGs so that a full scrub can be - done (since specific slots are needed). - :return: List of extended attributes required for the operation. - """ - if recreate: - return {pvm_const.XAG.VIO_FMAP, pvm_const.XAG.VIO_SMAP, - pvm_const.XAG.VIO_STOR} - # All operations for deploy/destroy require scsi by default. This is - # either vopt, local/SSP disks, etc... - xags = {pvm_const.XAG.VIO_SMAP} - - # BDMs could be none, if there are no cinder volumes. - bdms = bdms if bdms else [] - - # If we have any volumes, add the volumes required mapping XAGs. - for bdm in bdms: - driver_type = bdm.get('connection_info').get('driver_volume_type') - vol_cls = vol_attach.get_volume_class(driver_type) - xags.update(set(vol_cls.min_xags())) - - LOG.debug('Instance XAGs: %(xags)s.', {'xags': ','.join(xags)}, - instance=instance) - return list(xags) - - def _get_boot_connectivity_type(self, bdms, block_device_info): - """Get connectivity information for the instance. - - :param bdms: The BDMs for the operation. If boot volume of - the instance is ssp lu or local disk, the bdms is None. - :param block_device_info: Instance volume block device info. - :return: The boot connectivity type. - If boot volume is an npiv volume, returns 'fibre_channel'. - Otherwise, returns 'vscsi'. - """ - if self._is_booted_from_volume(block_device_info) and bdms is not None: - for bdm in bdms: - if bdm.get('boot_index') == 0: - return self._get_connectivity_type(bdm) - # Default connectivity type is 'vscsi' - return 'vscsi' - - @staticmethod - def _get_connectivity_type(bdm): - conn_info = bdm.get('connection_info') - if 'connection-type' in conn_info['data']: - connectivity_type = conn_info['data']['connection-type'] - return ('vscsi' if connectivity_type == 'pv_vscsi' - else connectivity_type) - # Seemingly bogus path (driver_volume_type shouldn't be in 'data'), - # preserved for potential compatibility. - if 'driver_volume_type' in conn_info['data']: - return conn_info['data']['driver_volume_type'] - # Actual location for driver_volume_type. Default to vscsi. - return conn_info.get('driver_volume_type', 'vscsi') - - def deallocate_networks_on_reschedule(self, instance): - """Does the driver want networks deallocated on reschedule? - - :param instance: the instance object. - :returns: Boolean value. If True deallocate networks on reschedule. - """ - return True - - def _cleanup_orphan_adapters(self, vswitch_name): - """Finds and removes trunk VEAs that have no corresponding CNA.""" - orphans = pvm_cna.find_orphaned_trunks(self.adapter, vswitch_name) - for orphan in orphans: - LOG.info("Deleting orphan CNA: %s", orphan.dev_name) - orphan.delete() diff --git a/nova_powervm/virt/powervm/event.py b/nova_powervm/virt/powervm/event.py deleted file mode 100644 index 35186548..00000000 --- a/nova_powervm/virt/powervm/event.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright 2014, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet import greenthread -from nova.compute import power_state -from nova.compute import task_states -from nova import context as ctx -from nova import exception -from nova.virt import event -from oslo_concurrency import lockutils -from oslo_log import log as logging -from pypowervm import adapter as pvm_apt -from pypowervm import util as pvm_util -from pypowervm.wrappers import event as pvm_evt - -from nova_powervm.virt.powervm import vm - - -LOG = logging.getLogger(__name__) - -_INST_ACTIONS_HANDLED = {'PartitionState', 'NVRAM'} -_NO_EVENT_TASK_STATES = { - task_states.SPAWNING, - task_states.RESIZE_MIGRATING, - task_states.RESIZE_REVERTING, - task_states.REBOOTING, - task_states.REBOOTING_HARD, - task_states.REBOOT_STARTED_HARD, - task_states.PAUSING, - task_states.UNPAUSING, - task_states.SUSPENDING, - task_states.RESUMING, - task_states.POWERING_OFF, - task_states.POWERING_ON, - task_states.RESCUING, - task_states.UNRESCUING, - task_states.REBUILDING, - task_states.REBUILD_SPAWNING, - task_states.MIGRATING, - task_states.DELETING, - task_states.SOFT_DELETING, - task_states.RESTORING, - task_states.SHELVING, - task_states.SHELVING_OFFLOADING, - task_states.UNSHELVING, -} - -_LIFECYCLE_EVT_LOCK = 'pvm_lifecycle_event' - -_CONTEXT = None - - -def _get_instance(inst, pvm_uuid): - global _CONTEXT - if inst is not None: - return inst - with lockutils.lock('get_context_once'): - if _CONTEXT is None: - _CONTEXT = ctx.get_admin_context() - LOG.debug('PowerVM Nova Event Handler: Getting inst for id %s', pvm_uuid) - return vm.get_instance(_CONTEXT, pvm_uuid) - - -class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler): - """Used to receive and handle events from PowerVM and convert to Nova.""" - def __init__(self, driver): - self._driver = driver - self._lifecycle_handler = PowerVMLifecycleEventHandler(self._driver) - self._uuid_cache = {} - - def _get_inst_uuid(self, inst, pvm_uuid): - """Retrieve instance UUID from cache keyed by the PVM UUID. - - :param inst: the instance object. - :param pvm_uuid: the PowerVM uuid of the vm - :return inst: the instance object. - :return inst_uuid: The nova instance uuid - """ - inst_uuid = self._uuid_cache.get(pvm_uuid) - if not inst_uuid: - inst = _get_instance(inst, pvm_uuid) - inst_uuid = inst.uuid if inst else None - if inst_uuid: - self._uuid_cache[pvm_uuid] = inst_uuid - return inst, inst_uuid - - def _handle_inst_event(self, inst, pvm_uuid, details): - """Handle an instance event. - - This method will check if an instance event signals a change in the - state of the instance as known to OpenStack and if so, trigger an - event upward. - - :param inst: the instance object. - :param pvm_uuid: the PowerVM uuid of the vm - :param details: Parsed Details from the event - :return inst: The nova instance, which may be None - """ - # If the NVRAM has changed for this instance and a store is configured. - if 'NVRAM' in details and self._driver.nvram_mgr is not None: - # Schedule the NVRAM for the instance to be stored. - # We'll need to fetch the instance object in the event we don't - # have the object and the UUID isn't cached. By updating the - # object reference here and returning it the process method will - # save the object in its cache. - inst, inst_uuid = self._get_inst_uuid(inst, pvm_uuid) - if inst_uuid is None: - return None - - LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid) - self._driver.nvram_mgr.store(inst_uuid) - - # If the state of the vm changed see if it should be handled - if 'PartitionState' in details: - self._lifecycle_handler.process(inst, pvm_uuid) - - return inst - - def process(self, events): - """Process the event that comes back from PowerVM. - - :param events: The pypowervm Event wrapper. - """ - inst_cache = {} - for pvm_event in events: - try: - if pvm_event.etype in (pvm_evt.EventType.NEW_CLIENT, - pvm_evt.EventType.CACHE_CLEARED): - # TODO(efried): Should we pull and check all the LPARs? - self._uuid_cache.clear() - continue - # See if this uri (from data) ends with a PowerVM UUID. - pvm_uuid = pvm_util.get_req_path_uuid( - pvm_event.data, preserve_case=True) - if pvm_uuid is None: - continue - # Is it an instance event? - if not pvm_event.data.endswith('LogicalPartition/' + pvm_uuid): - continue - - # Are we deleting? Meaning we need to clear the cache entry. - if pvm_event.etype == pvm_evt.EventType.DELETE_URI: - try: - del self._uuid_cache[pvm_uuid] - except KeyError: - pass - continue - # Pull all the pieces of the event. - details = (pvm_event.detail.split(',') if pvm_event.detail - else []) - # Is it one we care about? - if not _INST_ACTIONS_HANDLED & set(details): - continue - - inst_cache[pvm_event.data] = self._handle_inst_event( - inst_cache.get(pvm_event.data), pvm_uuid, details) - - except Exception: - # We deliberately keep this exception clause as broad as - # possible - we don't want *any* error to stop us from - # attempting to process the next event. - LOG.exception('Unable to process PowerVM event %s', - str(pvm_event)) - - -class PowerVMLifecycleEventHandler(object): - """Because lifecycle events are weird, we need our own handler. - - Lifecycle events that come back from the hypervisor are very 'surface - value'. They tell you that it started, stopped, migrated, etc... However, - multiple events may come in quickly that represent a bigger action. For - instance a restart will generate a stop and then a start rapidly. - - Nova being asynchronous can flip those events around. Where the start - would flow through before the stop. That is bad. - - We need to make sure that these events that can be linked to bigger - lifecycle events can be wiped out if the converse action is run against - it. Ex. Don't send a stop event up to nova if you received a start event - shortly after it. - """ - def __init__(self, driver): - self._driver = driver - self._delayed_event_threads = {} - - @lockutils.synchronized(_LIFECYCLE_EVT_LOCK) - def _emit_event(self, pvm_uuid, inst): - # Get the current state - try: - pvm_state = vm.get_vm_qp(self._driver.adapter, pvm_uuid, - 'PartitionState') - except exception.InstanceNotFound: - LOG.debug("LPAR %s was deleted while event was delayed.", pvm_uuid, - instance=inst) - return - - LOG.debug('New state %s for partition %s', pvm_state, pvm_uuid, - instance=inst) - - inst = _get_instance(inst, pvm_uuid) - if inst is None: - LOG.debug("Not emitting LifecycleEvent: no instance for LPAR %s", - pvm_uuid) - return - - # If we're in the middle of a nova-driven operation, no event necessary - if inst.task_state in _NO_EVENT_TASK_STATES: - LOG.debug("Not emitting LifecycleEvent: instance task_state is %s", - inst.task_state, instance=inst) - return - - # See if it's really a change of state from what OpenStack knows - transition = vm.translate_event(pvm_state, inst.power_state) - if transition is None: - LOG.debug("No LifecycleEvent necessary for pvm_state(%s) and " - "power_state(%s).", pvm_state, - power_state.STATE_MAP[inst.power_state], instance=inst) - return - - # Log as if normal event - lce = event.LifecycleEvent(inst.uuid, transition) - LOG.info('Sending LifecycleEvent for instance state change to: %s', - pvm_state, instance=inst) - self._driver.emit_event(lce) - - # Delete out the queue - del self._delayed_event_threads[pvm_uuid] - - @lockutils.synchronized(_LIFECYCLE_EVT_LOCK) - def process(self, inst, pvm_uuid): - """Emits the event, or adds it to the queue for delayed emission. - - :param inst: The nova instance. May be None. - :param pvm_uuid: The PowerVM LPAR UUID. - """ - # Cancel out the current delay event. Can happen as it goes - # from SHUTTING_DOWN to NOT_ACTIVATED, multiple delayed events - # can come in at once. Only want the last. - if pvm_uuid in self._delayed_event_threads: - self._delayed_event_threads[pvm_uuid].cancel() - - # Spawn in the background - elem = greenthread.spawn_after(15, self._emit_event, pvm_uuid, inst) - self._delayed_event_threads[pvm_uuid] = elem diff --git a/nova_powervm/virt/powervm/exception.py b/nova_powervm/virt/powervm/exception.py deleted file mode 100644 index d5e4b419..00000000 --- a/nova_powervm/virt/powervm/exception.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2015, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Exceptions specific to the powervm nova driver.""" - -import abc -from nova import exception as nex - -import six - -from nova_powervm.virt.powervm.i18n import _ - - -@six.add_metaclass(abc.ABCMeta) -class AbstractMediaException(nex.NovaException): - pass - - -@six.add_metaclass(abc.ABCMeta) -class AbstractDiskException(nex.NovaException): - pass - - -class NoMediaRepoVolumeGroupFound(AbstractMediaException): - msg_fmt = _("Unable to locate the volume group %(vol_grp)s to store the " - "virtual optical media within. Unable to create the " - "media repository.") - - -class NoDiskDiscoveryException(nex.NovaException): - """Failed to discover any disk.""" - msg_fmt = _("Having scanned SCSI bus %(bus)x on the management partition, " - "disk with UDID %(udid)s failed to appear after %(polls)d " - "polls over %(timeout)d seconds.") - - -class UniqueDiskDiscoveryException(nex.NovaException): - """Expected to discover exactly one disk, but discovered >1.""" - msg_fmt = _("Expected to find exactly one disk on the management " - "partition at %(path_pattern)s; found %(count)d.") - - -class DeviceDeletionException(nex.NovaException): - """Expected to delete a disk, but the disk is still present afterward.""" - msg_fmt = _("Device %(devpath)s is still present on the management " - "partition after attempting to delete it. Polled %(polls)d " - "times over %(timeout)d seconds.") - - -class InstanceDiskMappingFailed(AbstractDiskException): - msg_fmt = _("Failed to map boot disk of instance %(instance_name)s to " - "the management partition from any Virtual I/O Server.") - - -class NewMgmtMappingNotFoundException(nex.NovaException): - """Just created a mapping to the mgmt partition, but can't find it.""" - msg_fmt = _("Failed to find newly-created mapping of storage element " - "%(stg_name)s from Virtual I/O Server %(vios_name)s to the " - "management partition.") - - -class VGNotFound(AbstractDiskException): - msg_fmt = _("Unable to locate the volume group '%(vg_name)s' for this " - "operation.") - - -class ClusterNotFoundByName(AbstractDiskException): - msg_fmt = _("Unable to locate the Cluster '%(clust_name)s' for this " - "operation.") - - -class NoConfigNoClusterFound(AbstractDiskException): - msg_fmt = _('Unable to locate any Cluster for this operation.') - - -class TooManyClustersFound(AbstractDiskException): - msg_fmt = _("Unexpectedly found %(clust_count)d Clusters " - "matching name '%(clust_name)s'.") - - -class NoConfigTooManyClusters(AbstractDiskException): - msg_fmt = _("No cluster_name specified. Refusing to select one of the " - "%(clust_count)d Clusters found.") - - -class VolumeAttachFailed(nex.NovaException): - msg_fmt = _("Unable to attach storage (id: %(volume_id)s) to virtual " - "machine %(instance_name)s. %(reason)s") - - -class VolumeExtendFailed(nex.NovaException): - msg_fmt = _("Unable to extend volume (id: %(volume_id)s) on virtual " - "machine %(instance_name)s.") - - -class VolumeDetachFailed(nex.NovaException): - msg_fmt = _("Unable to detach volume (id: %(volume_id)s) from virtual " - "machine %(instance_name)s. %(reason)s") - - -class VolumePreMigrationFailed(nex.NovaException): - msg_fmt = _("Unable to perform pre live migration steps on volume (id: " - "%(volume_id)s) from virtual machine %(instance_name)s.") - - -class PowerVMAPIFailed(nex.NovaException): - msg_fmt = _("PowerVM API failed to complete for instance=%(inst_name)s." - "%(reason)s") - - -class ViosNotAvailable(nex.NovaException): - msg_fmt = _("No Virtual I/O Servers are available. The driver attempted " - "to wait for a VIOS to become active for %(wait_time)d " - "seconds. The compute agent is not able to start if no " - "Virtual I/O Servers are available. Please check the RMC " - "connectivity between the PowerVM NovaLink and the Virtual " - "I/O Servers and then restart the Nova Compute Agent.") - - -class NoActiveViosForFeedTask(nex.NovaException): - msg_fmt = _("There are no active Virtual I/O Servers available.") - - -class InvalidRebuild(nex.NovaException): - msg_fmt = _("Unable to rebuild virtual machine on new host. Error is " - "%(error)s") - - -class OptRequiredIfOtherOptValue(nex.NovaException): - msg_fmt = _("The %(then_opt)s option is required if %(if_opt)s is " - "specified as '%(if_value)s'.") diff --git a/nova_powervm/virt/powervm/host.py b/nova_powervm/virt/powervm/host.py deleted file mode 100644 index b9197032..00000000 --- a/nova_powervm/virt/powervm/host.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2014, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import math -from nova.objects import fields -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from nova import conf as cfg - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -# Power VM hypervisor info -# Normally, the hypervisor version is a string in the form of '8.0.0' and -# converted to an int with nova.virt.utils.convert_version_to_int() however -# there isn't currently a mechanism to retrieve the exact version. -# Complicating this is the fact that nova conductor only allows live migration -# from the source host to the destination if the source is equal to or less -# than the destination version. PowerVM live migration limitations are -# checked by the PowerVM capabilities flags and not specific version levels. -# For that reason, we'll just publish the major level. -IBM_POWERVM_HYPERVISOR_VERSION = 8 - -# The types of LPARS that are supported. -POWERVM_SUPPORTED_INSTANCES = [ - (fields.Architecture.PPC64, fields.HVType.PHYP, fields.VMMode.HVM), - (fields.Architecture.PPC64LE, fields.HVType.PHYP, fields.VMMode.HVM), -] - -# cpu_info that will be returned by build_host_stats_from_entry() -HOST_STATS_CPU_INFO = jsonutils.dumps({'vendor': 'ibm', 'arch': 'ppc64'}) - - -def build_host_resource_from_ms(ms_wrapper): - """Build the host resource dict from an MS adapter wrapper - - This method builds the host resource dictionary from the - ManagedSystem Entry wrapper - - :param ms_wrapper: ManagedSystem Entry Wrapper. - """ - data = {} - - # Calculate the vcpus - proc_units = float(ms_wrapper.proc_units_configurable) - proc_units_avail = float(ms_wrapper.proc_units_avail) - pu_used = proc_units - proc_units_avail - data['vcpus'] = int(math.ceil(proc_units)) - data['vcpus_used'] = int(math.ceil(pu_used)) - - data['memory_mb'] = ms_wrapper.memory_configurable - used_memory = ms_wrapper.memory_configurable - ms_wrapper.memory_free - data['memory_mb_used'] = used_memory - data["hypervisor_type"] = fields.HVType.PHYP - data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION - data["hypervisor_hostname"] = CONF.host - data["cpu_info"] = HOST_STATS_CPU_INFO - data["numa_topology"] = None - data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES - - stats = {'proc_units': '%.2f' % proc_units, - 'proc_units_used': '%.2f' % pu_used, - 'memory_region_size': ms_wrapper.memory_region_size - } - data["stats"] = stats - - data["pci_passthrough_devices"] = _build_pci_json(ms_wrapper) - - return data - - -def _build_pci_json(sys_w): - """Build the JSON string for the pci_passthrough_devices host resource. - - :param sys_w: pypowervm.wrappers.managed_system.System wrapper of the host. - :return: JSON string representing a list of "PCI passthrough device" dicts, - See nova.objects.pci_device.PciDevice. - """ - # Produce SR-IOV PCI data. Devices are validated by virtue of the network - # name associated with their label, which must be cleared via an entry in - # the pci_passthrough_whitelist in the nova.conf. Each Claim allocates a - # device and filters it from the list for subsequent claims; so we generate - # the maximum number of "devices" (VFs) we could possibly create on each - # port. These are NOT real VFs. The real VFs get created on the fly by - # VNIC.create. - pci_devs = [ - {"physical_network": pport.label or 'default', - "label": pport.label or 'default', - "dev_type": fields.PciDeviceType.SRIOV_VF, - "address": '*:%d:%d.%d' % (sriov.sriov_adap_id, pport.port_id, vfn), - "parent_addr": "*:*:*.*", - "vendor_id": "*", - "product_id": "*", - "numa_node": 1} - for sriov in sys_w.asio_config.sriov_adapters - for pport in sriov.phys_ports - for vfn in range(pport.supp_max_lps)] - - return jsonutils.dumps(pci_devs) diff --git a/nova_powervm/virt/powervm/i18n.py b/nova_powervm/virt/powervm/i18n.py deleted file mode 100644 index a720c75b..00000000 --- a/nova_powervm/virt/powervm/i18n.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_i18n - -# Initialize message translators and short cut methods -_translators = oslo_i18n.TranslatorFactory(domain='nova-powervm') -_ = _translators.primary diff --git a/nova_powervm/virt/powervm/image.py b/nova_powervm/virt/powervm/image.py deleted file mode 100644 index ebcabe6b..00000000 --- a/nova_powervm/virt/powervm/image.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities related to glance image management for the PowerVM driver.""" - -from nova import utils - - -class OSDistro(object): - """Mirror of image os distro.Enum.""" - AIX = 'aix' - RHEL = 'rhel' - OS400 = 'ibmi' - SLES = 'sles' - UBUNTU = 'ubuntu' - UNKNOWN = 'Unknown' - ALL_VALUES = (AIX, RHEL, OS400, SLES, UBUNTU, UNKNOWN) - - -def stream_blockdev_to_glance(context, image_api, image_id, metadata, devpath): - """Stream the entire contents of a block device to a glance image. - - :param context: Nova security context - :param image_api: Handle to the glance image API. - :param image_id: UUID of the prepared glance image. - :param metadata: Dictionary of metadata for the image. - :param devpath: String path to device file of block device to be uploaded, - e.g. "/dev/sde". - """ - # Make the device file owned by the current user for the duration of the - # operation. - with utils.temporary_chown(devpath), open(devpath, 'rb') as stream: - # Stream it. This is synchronous. - image_api.update(context, image_id, metadata, stream) - - -def generate_snapshot_metadata(context, image_api, image_id, instance): - """Generate a metadata dictionary for an instance snapshot. - - :param context: Nova security context - :param image_api: Handle to the glance image API. - :param image_id: UUID of the prepared glance image. - :param instance: The Nova instance whose disk is to be snapshotted. - :return: A dict of metadata suitable for image_api.update. - """ - # TODO(esberglu): Update this to v2 metadata - image = image_api.get(context, image_id) - metadata = { - 'name': image['name'], - 'status': 'active', - 'disk_format': 'raw', - 'container_format': 'bare', - 'properties': { - 'image_location': 'snapshot', - 'image_state': 'available', - 'owner_id': instance.project_id, - } - } - return metadata diff --git a/nova_powervm/virt/powervm/live_migration.py b/nova_powervm/virt/powervm/live_migration.py deleted file mode 100644 index 5ce55fe9..00000000 --- a/nova_powervm/virt/powervm/live_migration.py +++ /dev/null @@ -1,478 +0,0 @@ -# Copyright 2015, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc -import six - -from nova import exception -from nova.objects import migrate_data as mig_obj -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import excutils -from pypowervm.tasks import management_console as mgmt_task -from pypowervm.tasks import migration as mig -from pypowervm.tasks import storage as stor_task -from pypowervm.tasks import vterm -from pypowervm import util - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm import media -from nova_powervm.virt.powervm.tasks import storage as tf_stg -from nova_powervm.virt.powervm import vif -from nova_powervm.virt.powervm import vm - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class LiveMigrationFailed(exception.NovaException): - msg_fmt = _("Live migration of instance '%(name)s' failed for reason: " - "%(reason)s") - - -class LiveMigrationVolume(exception.NovaException): - msg_fmt = _("Cannot migrate %(name)s because the volume %(volume)s " - "cannot be attached on the destination host %(host)s.") - - -def _verify_migration_capacity(host_w, instance): - """Check that the counts are valid for in progress and supported.""" - mig_stats = host_w.migration_data - active_migrations_in_progress = mig_stats['active_migrations_in_progress'] - active_migrations_supported = mig_stats['active_migrations_supported'] - if (active_migrations_in_progress >= active_migrations_supported): - - msg = (_("Cannot migrate %(name)s because the host %(host)s only " - "allows %(allowed)s concurrent migrations and " - "%(running)s migrations are currently running.") % - dict(name=instance.name, host=host_w.system_name, - running=mig_stats['active_migrations_in_progress'], - allowed=mig_stats['active_migrations_supported'])) - raise exception.MigrationPreCheckError(reason=msg) - - -@six.add_metaclass(abc.ABCMeta) -class LiveMigration(object): - - def __init__(self, drvr, instance, mig_data): - self.drvr = drvr - self.instance = instance - self.mig_data = mig_data - - -class LiveMigrationDest(LiveMigration): - - def __init__(self, drvr, instance): - super(LiveMigrationDest, self).__init__( - drvr, instance, mig_obj.PowerVMLiveMigrateData()) - - @staticmethod - def _get_dest_user_id(): - """Get the user id to use on the target host.""" - # We'll always use wlp - return 'wlp' - - def check_destination(self, context, src_compute_info, dst_compute_info): - """Check the destination host - - Here we check the destination host to see if it's capable of migrating - the instance to this host. - - :param context: security context - :param src_compute_info: Info about the sending machine - :param dst_compute_info: Info about the receiving machine - :returns: a PowerVMLiveMigrateData object - """ - - # Refresh the host wrapper since we're pulling values that may change - self.drvr.host_wrapper.refresh() - - src_stats = src_compute_info['stats'] - dst_stats = dst_compute_info['stats'] - # Check the lmb sizes for compatibility - src_memory_region_size = src_stats['memory_region_size'] - dst_memory_region_size = dst_stats['memory_region_size'] - if (src_memory_region_size != dst_memory_region_size): - msg = (_("Cannot migrate instance '%(name)s' because the " - "memory region size of the source (%(source_mrs)d MB) " - "does not match the memory region size of the target " - "(%(target_mrs)d MB).") % - dict(name=self.instance.name, - source_mrs=src_stats['memory_region_size'], - target_mrs=dst_stats['memory_region_size'])) - - raise exception.MigrationPreCheckError(reason=msg) - - _verify_migration_capacity(self.drvr.host_wrapper, self.instance) - - self.mig_data.host_mig_data = self.drvr.host_wrapper.migration_data - self.mig_data.dest_ip = CONF.my_ip - self.mig_data.dest_user_id = self._get_dest_user_id() - self.mig_data.dest_sys_name = self.drvr.host_wrapper.system_name - self.mig_data.dest_proc_compat = ( - ','.join(self.drvr.host_wrapper.proc_compat_modes)) - - LOG.debug('src_compute_info: %s', src_compute_info) - LOG.debug('dst_compute_info: %s', dst_compute_info) - LOG.debug('Migration data: %s', self.mig_data) - - return self.mig_data - - def pre_live_migration(self, context, block_device_info, network_infos, - disk_info, migrate_data, vol_drvs): - - """Prepare an instance for live migration - - :param context: security context - :param block_device_info: instance block device information - :param network_infos: instance network information - :param disk_info: instance disk information - :param migrate_data: a PowerVMLiveMigrateData object - :param vol_drvs: volume drivers for the attached volumes - """ - LOG.debug('Running pre live migration on destination. Migration data: ' - '%s', migrate_data, instance=self.instance) - - # Set the ssh auth key. - mgmt_task.add_authorized_key(self.drvr.adapter, - migrate_data.public_key) - - # For each network info, run the pre-live migration. This tells the - # system what the target vlans will be. - vea_vlan_mappings = {} - for network_info in network_infos: - vif.pre_live_migrate_at_destination( - self.drvr.adapter, self.drvr.host_uuid, self.instance, - network_info, vea_vlan_mappings) - migrate_data.vea_vlan_mappings = vea_vlan_mappings - - # For each volume, make sure it's ready to migrate - for vol_drv in vol_drvs: - LOG.info('Performing pre migration for volume %(volume)s', - dict(volume=vol_drv.volume_id), instance=self.instance) - try: - vol_drv.pre_live_migration_on_destination( - migrate_data.vol_data) - except Exception: - LOG.exception("PowerVM error preparing instance for live " - "migration.", instance=self.instance) - # It failed. - vol_exc = LiveMigrationVolume( - host=self.drvr.host_wrapper.system_name, - name=self.instance.name, volume=vol_drv.volume_id) - raise exception.MigrationPreCheckError(reason=vol_exc.message) - - # Scrub stale/orphan mappings and storage to minimize probability of - # collisions on the destination. - stor_task.ComprehensiveScrub(self.drvr.adapter).execute() - - # Save the migration data, we'll use it if the LPM fails - self.pre_live_vol_data = migrate_data.vol_data - return migrate_data - - def post_live_migration_at_destination(self, network_infos, vol_drv_iter): - """Do post migration cleanup on destination host. - - :param network_infos: instance network information - :param vol_drv_iter: volume driver iterator for the attached volumes - and BDM information. - """ - # The LPAR should be on this host now. - LOG.debug("Post live migration at destination.", - instance=self.instance) - - # For each volume, make sure it completes the migration - for bdm, vol_drv in vol_drv_iter: - LOG.info('Performing post migration for volume %(volume)s', - dict(volume=vol_drv.volume_id), instance=self.instance) - try: - vol_drv.post_live_migration_at_destination( - self.pre_live_vol_data) - # Save the BDM for the updated connection info. - tf_stg.SaveBDM(bdm, self.instance).execute() - except Exception: - LOG.exception("PowerVM error cleaning up destination host " - "after migration.", instance=self.instance) - # It failed. - raise LiveMigrationVolume( - host=self.drvr.host_wrapper.system_name, - name=self.instance.name, volume=vol_drv.volume_id) - - def rollback_live_migration_at_destination( - self, context, instance, network_infos, block_device_info, - destroy_disks=True, migrate_data=None): - """Clean up destination node after a failed live migration. - - :param context: security context - :param instance: instance object that was being migrated - :param network_infos: instance network infos - :param block_device_info: instance block device information - :param destroy_disks: - if true, destroy disks at destination during cleanup - :param migrate_data: a LiveMigrateData object - - """ - # Clean up any network infos - for network_info in network_infos: - vif.rollback_live_migration_at_destination( - self.drvr.adapter, self.drvr.host_uuid, self.instance, - network_info, migrate_data.vea_vlan_mappings) - - def cleanup_volume(self, vol_drv): - """Cleanup a volume after a failed migration. - - :param vol_drv: volume driver for the attached volume - """ - LOG.info('Performing detach for volume %(volume)s', - dict(volume=vol_drv.volume_id), instance=self.instance) - # Ensure the volume data is present before trying cleanup - if hasattr(self, 'pre_live_vol_data'): - try: - vol_drv.cleanup_volume_at_destination(self.pre_live_vol_data) - except Exception: - LOG.exception("PowerVM error cleaning volume after failed " - "migration.", instance=self.instance) - # Log the exception but no need to raise one because - # the VM is still on the source host. - - -class LiveMigrationSrc(LiveMigration): - - def check_source(self, context, block_device_info, vol_drvs): - """Check the source host - - Here we check the source host to see if it's capable of migrating - the instance to the destination host. There may be conditions - that can only be checked on the source side. - - Also, get the instance ready for the migration by removing any - virtual optical devices attached to the LPAR. - - :param context: security context - :param block_device_info: result of _get_instance_block_device_info - :param vol_drvs: volume drivers for the attached volumes - :returns: a PowerVMLiveMigrateData object - """ - - lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance) - self.lpar_w = lpar_w - - LOG.debug('Dest Migration data: %s', self.mig_data, - instance=self.instance) - - # Check proc compatibility modes - if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in - self.mig_data.dest_proc_compat.split(',')): - msg = (_("Cannot migrate %(name)s because its " - "processor compatibility mode %(mode)s " - "is not in the list of modes \"%(modes)s\" " - "supported by the target host.") % - dict(name=self.instance.name, - mode=lpar_w.proc_compat_mode, - modes=', '.join( - self.mig_data.dest_proc_compat.split(',')))) - - raise exception.MigrationPreCheckError(reason=msg) - - # Check if VM is ready for migration - self._check_migration_ready(lpar_w, self.drvr.host_wrapper) - - if lpar_w.migration_state != 'Not_Migrating': - msg = (_("Live migration of instance '%(name)s' failed because " - "the migration state is: %(state)s") % - dict(name=self.instance.name, - state=lpar_w.migration_state)) - raise exception.MigrationPreCheckError(reason=msg) - - # Check the number of migrations for capacity - _verify_migration_capacity(self.drvr.host_wrapper, self.instance) - - self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter) - - # Get the 'source' pre-migration data for the volume drivers. - vol_data = {} - for vol_drv in vol_drvs: - vol_drv.pre_live_migration_on_source(vol_data) - self.mig_data.vol_data = vol_data - - LOG.debug('Source migration data: %s', self.mig_data, - instance=self.instance) - - # Create a FeedTask to scrub any orphaned mappings/storage associated - # with this LPAR. (Don't run it yet - we want to do the VOpt removal - # within the same FeedTask.) - stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter, - lpar_w.id) - # Add subtasks to remove the VOpt devices under the same FeedTask. - media.ConfigDrivePowerVM(self.drvr.adapter).dlt_vopt( - lpar_w.uuid, stg_ftsk=stg_ftsk, remove_mappings=False) - # Now execute the FeedTask, performing both scrub and VOpt removal. - stg_ftsk.execute() - - # Ensure the vterm is non-active - vterm.close_vterm(self.drvr.adapter, lpar_w.uuid) - - return self.mig_data - - def live_migration(self, context, migrate_data): - """Start the live migration. - - :param context: security context - :param migrate_data: a PowerVMLiveMigrateData object - """ - LOG.debug("Starting migration. Migrate data: %s", migrate_data, - instance=self.instance) - - # The passed in mig data has more info (dest data added), so replace - self.mig_data = migrate_data - - # Get the vFC and vSCSI live migration mappings - vol_data = migrate_data.vol_data - vfc_mappings = vol_data.get('vfc_lpm_mappings') - if vfc_mappings is not None: - vfc_mappings = jsonutils.loads(vfc_mappings) - vscsi_mappings = vol_data.get('vscsi_lpm_mappings') - if vscsi_mappings is not None: - vscsi_mappings = jsonutils.loads(vscsi_mappings) - - # Run the pre-live migration on the network objects - network_infos = self.instance.info_cache.network_info - trunks_to_del = [] - for network_info in network_infos: - trunks_to_del.extend(vif.pre_live_migrate_at_source( - self.drvr.adapter, self.drvr.host_uuid, self.instance, - network_info)) - - # Convert the network mappings into something the API can understand. - vlan_mappings = self._convert_nl_io_mappings( - migrate_data.vea_vlan_mappings) - - try: - # Migrate the LPAR! - mig.migrate_lpar( - self.lpar_w, self.mig_data.dest_sys_name, - validate_only=False, tgt_mgmt_svr=self.mig_data.dest_ip, - tgt_mgmt_usr=self.mig_data.dest_user_id, - virtual_fc_mappings=vfc_mappings, - virtual_scsi_mappings=vscsi_mappings, - vlan_mappings=vlan_mappings, sdn_override=True, - vlan_check_override=True) - - # Delete the source side network trunk adapters - for trunk_to_del in trunks_to_del: - trunk_to_del.delete() - except Exception: - with excutils.save_and_reraise_exception(logger=LOG): - LOG.exception("Live migration failed.", instance=self.instance) - finally: - LOG.debug("Finished migration.", instance=self.instance) - - def _convert_nl_io_mappings(self, mappings): - if not mappings: - return None - - resp = [] - for mac, value in six.iteritems(mappings): - resp.append("%s/%s" % (util.sanitize_mac_for_api(mac), value)) - return resp - - def post_live_migration(self, vol_drvs, migrate_data): - """Post operation of live migration at source host. - - This method is focused on storage. - - :param vol_drvs: volume drivers for the attached volume - :param migrate_data: a PowerVMLiveMigrateData object - """ - # For each volume, make sure the source is cleaned - for vol_drv in vol_drvs: - LOG.info('Performing post migration for volume %(volume)s', - dict(volume=vol_drv.volume_id), instance=self.instance) - try: - vol_drv.post_live_migration_at_source(migrate_data.vol_data) - except Exception: - LOG.exception("PowerVM error cleaning source host after live " - "migration.", instance=self.instance) - # Log the exception but no need to raise one because - # the VM is already moved. By raising an exception that - # results in the VM being on the new host but the instance - # data reflecting it on the old host. - - def post_live_migration_at_source(self, network_infos): - """Do post migration cleanup on source host. - - This method is network focused. - - :param network_infos: instance network information - """ - LOG.debug("Post live migration at source.", instance=self.instance) - for network_info in network_infos: - vif.post_live_migrate_at_source( - self.drvr.adapter, self.drvr.host_uuid, self.instance, - network_info) - - def rollback_live_migration(self, context): - """Roll back a failed migration. - - :param context: security context - """ - LOG.debug("Rollback live migration.", instance=self.instance) - # If an error happened then let's try to recover - # In most cases the recovery will happen automatically, but if it - # doesn't, then force it. - try: - self.lpar_w.refresh() - if self.lpar_w.migration_state != 'Not_Migrating': - self.migration_recover() - - except Exception: - LOG.exception("Migration rollback failed.", instance=self.instance) - finally: - LOG.debug("Finished migration rollback.", instance=self.instance) - - def _check_migration_ready(self, lpar_w, host_w): - """See if the lpar is ready for LPM. - - :param lpar_w: LogicalPartition wrapper - :param host_w: ManagedSystem wrapper - """ - ready, msg = lpar_w.can_lpm(host_w, - migr_data=self.mig_data.host_mig_data) - if not ready: - msg = (_("Live migration of instance '%(name)s' failed because it " - "is not ready. Reason: %(reason)s") % - dict(name=self.instance.name, reason=msg)) - raise exception.MigrationPreCheckError(reason=msg) - - def migration_abort(self): - """Abort the migration. - - Invoked if the operation exceeds the configured timeout. - """ - LOG.debug("Abort migration.", instance=self.instance) - try: - mig.migrate_abort(self.lpar_w) - except Exception: - LOG.exception("Abort of live migration has failed. This is " - "non-blocking.", instance=self.instance) - - def migration_recover(self): - """Recover migration if the migration failed for any reason. """ - LOG.debug("Recover migration.", instance=self.instance) - mig.migrate_recover(self.lpar_w, force=True) diff --git a/nova_powervm/virt/powervm/media.py b/nova_powervm/virt/powervm/media.py deleted file mode 100644 index c68786df..00000000 --- a/nova_powervm/virt/powervm/media.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2015, 2019 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from nova.api.metadata import base as instance_metadata -from nova.network import model as network_model -from nova.virt import configdrive -import os -import retrying -from taskflow import task -import tempfile - -from oslo_log import log as logging -from oslo_utils import excutils - -from pypowervm import const as pvm_const -from pypowervm.tasks import scsi_mapper as tsk_map -from pypowervm.tasks import storage as tsk_stg -from pypowervm.tasks import vopt as tsk_vopt -from pypowervm import util as pvm_util -from pypowervm.utils import transaction as pvm_tx -from pypowervm.wrappers import storage as pvm_stg -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import vm - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -_LLA_SUBNET = "fe80::/64" - - -class ConfigDrivePowerVM(object): - - def __init__(self, adapter): - """Creates the config drive manager for PowerVM. - - :param adapter: The pypowervm adapter to communicate with the system. - """ - self.adapter = adapter - - # Validate that the virtual optical exists - self.vios_uuid, self.vg_uuid = tsk_vopt.validate_vopt_repo_exists( - self.adapter, - vopt_media_volume_group=CONF.powervm.vopt_media_volume_group, - vopt_media_rep_size=CONF.powervm.vopt_media_rep_size) - - @staticmethod - def _sanitize_network_info(network_info): - """Will sanitize the network info for the config drive. - - Newer versions of cloud-init look at the vif type information in - the network info and utilize it to determine what to do. There are - a limited number of vif types, and it seems to be built on the idea - that the neutron vif type is the cloud init vif type (which is not - quite right). - - This sanitizes the network info that gets passed into the config - drive to work properly with cloud-inits. - """ - network_info = copy.deepcopy(network_info) - for vif in network_info: - if vif.get('type') != 'ovs': - LOG.debug('Changing vif type from %(type)s to vif for vif ' - '%(id)s.', {'type': vif.get('type'), - 'id': vif.get('id')}) - vif['type'] = 'vif' - return network_info - - def _create_cfg_dr_iso(self, instance, injected_files, network_info, - iso_path, admin_pass=None): - """Creates an ISO file that contains the injected files. - - Used for config drive. - - :param instance: The VM instance from OpenStack. - :param injected_files: A list of file paths that will be injected into - the ISO. - :param network_info: The network_info from the nova spawn method. - :param iso_path: The absolute file path for the new ISO - :param admin_pass: Optional password to inject for the VM. - """ - LOG.info("Creating config drive.", instance=instance) - extra_md = {} - if admin_pass is not None: - extra_md['admin_pass'] = admin_pass - - # Sanitize the vifs for the network config - network_info = self._sanitize_network_info(network_info) - - inst_md = instance_metadata.InstanceMetadata(instance, - content=injected_files, - extra_md=extra_md, - network_info=network_info) - - # fix instance uuid to match uuid assigned to VM - inst_md.uuid = vm.get_pvm_uuid(instance).lower() - - with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: - LOG.info("Config drive ISO building to path %(iso_path)s.", - {'iso_path': iso_path}, instance=instance) - # In case, if there's an OSError related failure while - # creating config drive, retry make drive operation. - - def _retry_on_oserror(exc): - return isinstance(exc, OSError) - - @retrying.retry(retry_on_exception=_retry_on_oserror, - stop_max_attempt_number=2) - def _make_cfg_drive(iso_path): - cdb.make_drive(iso_path) - - try: - _make_cfg_drive(iso_path) - except OSError: - with excutils.save_and_reraise_exception(logger=LOG): - # If we get here, that means there's an exception during - # second attempt, log the same and fail the deploy - # operation. - LOG.exception("Config drive ISO could not be built.", - instance=instance) - - @staticmethod - def get_cfg_drv_name(instance): - return pvm_util.sanitize_file_name_for_api( - instance.uuid.replace('-', ''), prefix='cfg_', suffix='.iso', - max_len=pvm_const.MaxLen.VOPT_NAME) - - def create_cfg_drv_vopt(self, instance, injected_files, network_info, - lpar_uuid, admin_pass=None, mgmt_cna=None, - stg_ftsk=None): - """Creates the config drive virtual optical and attach to VM. - - :param instance: The VM instance from OpenStack. - :param injected_files: A list of file paths that will be injected into - the ISO. - :param network_info: The network_info from the nova spawn method. - :param lpar_uuid: The UUID of the client LPAR - :param admin_pass: (Optional) password to inject for the VM. - :param mgmt_cna: (Optional) The management (RMC) CNA wrapper. - :param stg_ftsk: (Optional) If provided, the tasks to create and attach - the Media to the VM will be deferred on to the - FeedTask passed in. The execute can be done all in - one method (batched together). If None (the default), - the media will be created and attached immediately. - """ - # If there is a management client network adapter, then we should - # convert that to a VIF and add it to the network info - if mgmt_cna is not None and CONF.powervm.use_rmc_ipv6_scheme: - network_info = copy.deepcopy(network_info) - network_info.append(self._mgmt_cna_to_vif(mgmt_cna)) - - # Pick a file name for when we upload the media to VIOS - file_name = self.get_cfg_drv_name(instance) - - # Create and upload the media - with tempfile.NamedTemporaryFile(mode='rb') as fh: - self._create_cfg_dr_iso(instance, injected_files, network_info, - fh.name, admin_pass=admin_pass) - vopt, f_uuid = tsk_stg.upload_vopt( - self.adapter, self.vios_uuid, fh, file_name, - os.path.getsize(fh.name)) - - # Run the attach of the virtual optical - self._attach_vopt(instance, lpar_uuid, vopt, stg_ftsk) - - def _attach_vopt(self, instance, lpar_uuid, vopt, stg_ftsk=None): - """Will attach the vopt to the VIOS. - - If the stg_ftsk is provided, adds the mapping to the stg_ftsk, but - won't attach until the stg_ftsk is independently executed. - - :param instance: The VM instance from OpenStack. - :param lpar_uuid: The UUID of the client LPAR - :param vopt: The virtual optical device to add. - :param stg_ftsk: (Optional) If provided, the tasks to create the - storage mappings to connect the Media to the VM will - be deferred on to the FeedTask passed in. The execute - can be done all in one method (batched together). If - None (the default), the media will be attached - immediately. - """ - # If no transaction manager, build locally so that we can run - # immediately - if stg_ftsk is None: - wtsk = pvm_tx.WrapperTask('media_attach', pvm_vios.VIOS.getter( - self.adapter, entry_uuid=self.vios_uuid, - xag=[pvm_const.XAG.VIO_SMAP])) - else: - wtsk = stg_ftsk.wrapper_tasks[self.vios_uuid] - - # Define the function to build and add the mapping - def add_func(vios_w): - LOG.info("Adding config drive mapping to Virtual I/O Server " - "%(vios)s", {'vios': vios_w.name}, instance=instance) - mapping = tsk_map.build_vscsi_mapping(None, vios_w, - lpar_uuid, vopt) - return tsk_map.add_map(vios_w, mapping) - - wtsk.add_functor_subtask(add_func) - - # If built locally, then execute - if stg_ftsk is None: - wtsk.execute() - - def _mgmt_cna_to_vif(self, cna): - """Converts the mgmt CNA to VIF format for network injection.""" - # See IEFT RFC 4291 appendix A for information on this algorithm - mac = vm.norm_mac(cna.mac) - ipv6_link_local = self._mac_to_link_local(mac) - - subnet = network_model.Subnet( - version=6, cidr=_LLA_SUBNET, - ips=[network_model.FixedIP(address=ipv6_link_local)]) - network = network_model.Network(id='mgmt', subnets=[subnet], - injected='yes') - return network_model.VIF(id='mgmt_vif', address=mac, - network=network) - - @staticmethod - def _mac_to_link_local(mac): - # Convert the address to IPv6. The first step is to separate out the - # mac address - splits = mac.split(':') - - # Insert into the middle the key ff:fe - splits.insert(3, 'ff') - splits.insert(4, 'fe') - - # Do the bit flip on the first octet. - splits[0] = "%.2x" % (int(splits[0], 16) ^ 0b00000010) - - # Convert to the IPv6 link local format. The prefix is fe80::. Join - # the hexes together at every other digit. - ll = ['fe80:'] - ll.extend([splits[x] + splits[x + 1] - for x in range(0, len(splits), 2)]) - return ':'.join(ll) - - def dlt_vopt(self, lpar_uuid, stg_ftsk=None, remove_mappings=True): - """Deletes the virtual optical and scsi mappings for a VM. - - :param lpar_uuid: The pypowervm UUID of the LPAR to remove. - :param stg_ftsk: (Optional) A FeedTask. If provided, the actions to - modify the storage will be added as batched functions - onto the FeedTask. If not provided (the default) the - operation to delete the vOpt will execute immediately. - :param remove_mappings: (Optional, Default: True) If set to true, will - remove the SCSI mappings as part of the - operation. If false, will leave the mapping - but detach the storage from it. If the VM is - running, it may be necessary to do the latter - as some operating systems will not allow the - removal. - """ - # If no transaction manager, build locally so that we can run - # immediately - if stg_ftsk is None: - built_stg_ftsk = True - vio_w = pvm_vios.VIOS.get(self.adapter, root_id=self.vios_uuid, - xag=[pvm_const.XAG.VIO_SMAP]) - stg_ftsk = pvm_tx.FeedTask('media_detach', [vio_w]) - else: - built_stg_ftsk = False - - # Run the remove maps method. - self.add_dlt_vopt_tasks(lpar_uuid, stg_ftsk, - remove_mappings=remove_mappings) - - # If built locally, then execute - if built_stg_ftsk: - stg_ftsk.execute() - - def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk, remove_mappings=True): - """Deletes the virtual optical and (optionally) scsi mappings for a VM. - - :param lpar_uuid: The pypowervm UUID of the LPAR whose vopt is to be - removed. - :param stg_ftsk: A FeedTask handling storage I/O. The task to remove - the mappings and media from the VM will be deferred on - to the FeedTask passed in. The execute can be done all - in one method (batched together). No updates are - actually made here; they are simply added to the - FeedTask. - :param remove_mappings: (Optional, Default: True) If set to true, will - remove the SCSI mappings as part of the - operation. If false, will leave the mapping - but detach the storage from it. If the VM is - running, it may be necessary to do the latter - as some operating systems will not allow the - removal. - """ - # The function to find the VOpt - match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia) - - def rm_vopt_mapping(vios_w): - return tsk_map.remove_maps(vios_w, lpar_uuid, - match_func=match_func) - - def detach_vopt_from_map(vios_w): - return tsk_map.detach_storage(vios_w, lpar_uuid, - match_func=match_func) - - # Add a function to remove the map or detach the vopt - stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask( - rm_vopt_mapping if remove_mappings else detach_vopt_from_map) - - # Find the vOpt device (before the remove is done) so that it can be - # removed. - partition_id = vm.get_vm_id(self.adapter, lpar_uuid) - media_mappings = tsk_map.find_maps( - stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings, - client_lpar_id=partition_id, match_func=match_func) - media_elems = [x.backing_storage for x in media_mappings] - - def rm_vopt(): - LOG.info("Removing virtual optical for VM with UUID %s.", - lpar_uuid) - vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid, - parent_type=pvm_vios.VIOS, - parent_uuid=self.vios_uuid) - tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems) - - # Don't add this task if there is no media to delete (eg. config drive) - if media_elems: - stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt)) diff --git a/nova_powervm/virt/powervm/mgmt.py b/nova_powervm/virt/powervm/mgmt.py deleted file mode 100644 index 6073eb1a..00000000 --- a/nova_powervm/virt/powervm/mgmt.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities related to the PowerVM management partition. - -The management partition is a special LPAR that runs the PowerVM REST API -service. It itself appears through the REST API as a LogicalPartition of type -aixlinux, but with the is_mgmt_partition property set to True. - -The PowerVM Nova Compute service runs on the management partition. -""" -import glob -import os - -from nova import exception -import nova.privsep.path -from oslo_concurrency import lockutils -from oslo_log import log as logging -from pypowervm.tasks import partition as pvm_par -import retrying - -from nova_powervm.virt.powervm import exception as npvmex - - -LOG = logging.getLogger(__name__) - -_MP_UUID = None - - -@lockutils.synchronized("mgmt_lpar_uuid") -def mgmt_uuid(adapter): - """Returns the management partitions UUID.""" - global _MP_UUID - if not _MP_UUID: - _MP_UUID = pvm_par.get_this_partition(adapter).uuid - return _MP_UUID - - -def discover_vscsi_disk(mapping, scan_timeout=300): - """Bring a mapped device into the management partition and find its name. - - Based on a VSCSIMapping, scan the appropriate virtual SCSI host bus, - causing the operating system to discover the mapped device. Find and - return the path of the newly-discovered device based on its UDID in the - mapping. - - Note: scanning the bus will cause the operating system to discover *all* - devices on that bus. However, this method will only return the path for - the specific device from the input mapping, based on its UDID. - - :param mapping: The pypowervm.wrappers.virtual_io_server.VSCSIMapping - representing the mapping of the desired disk to the - management partition. - :param scan_timeout: The maximum number of seconds after scanning to wait - for the specified device to appear. - :return: The udev-generated ("/dev/sdX") name of the discovered disk. - :raise NoDiskDiscoveryException: If the disk did not appear after the - specified timeout. - :raise UniqueDiskDiscoveryException: If more than one disk appears with the - expected UDID. - """ - # Calculate the Linux slot number from the client adapter slot number. - lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num - # We'll match the device ID based on the UDID, which is actually the last - # 32 chars of the field we get from PowerVM. - udid = mapping.backing_storage.udid[-32:] - - LOG.debug("Trying to discover VSCSI disk with UDID %(udid)s on slot " - "%(slot)x.", {'udid': udid, 'slot': lslot}) - - # Find the special file to scan the bus, and scan it. - # This glob should yield exactly one result, but use the loop just in case. - for scanpath in glob.glob( - '/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot): - # Writing '- - -' to this sysfs file triggers bus rescan - nova.privsep.path.writefile(scanpath, 'a', '- - -') - - # Now see if our device showed up. If so, we can reliably match it based - # on its Linux ID, which ends with the disk's UDID. - dpathpat = '/dev/disk/by-id/*%s' % udid - - # The bus scan is asynchronous. Need to poll, waiting for the device to - # spring into existence. Stop when glob finds at least one device, or - # after the specified timeout. Sleep 1/4 second between polls. - @retrying.retry(retry_on_result=lambda result: not result, wait_fixed=250, - stop_max_delay=scan_timeout * 1000) - def _poll_for_dev(globpat): - return glob.glob(globpat) - try: - disks = _poll_for_dev(dpathpat) - except retrying.RetryError as re: - raise npvmex.NoDiskDiscoveryException( - bus=lslot, udid=udid, polls=re.last_attempt.attempt_number, - timeout=scan_timeout) - # If we get here, _poll_for_dev returned a nonempty list. If not exactly - # one entry, this is an error. - if len(disks) != 1: - raise npvmex.UniqueDiskDiscoveryException(path_pattern=dpathpat, - count=len(disks)) - - # The by-id path is a symlink. Resolve to the /dev/sdX path - dpath = os.path.realpath(disks[0]) - LOG.debug("Discovered VSCSI disk with UDID %(udid)s on slot %(slot)x at " - "path %(devname)s.", - {'udid': udid, 'slot': lslot, 'devname': dpath}) - return dpath - - -def remove_block_dev(devpath, scan_timeout=10): - """Remove a block device from the management partition. - - This method causes the operating system of the management partition to - delete the device special files associated with the specified block device. - - :param devpath: Any path to the block special file associated with the - device to be removed. - :param scan_timeout: The maximum number of seconds after scanning to wait - for the specified device to disappear. - :raise InvalidDevicePath: If the specified device or its 'delete' special - file cannot be found. - :raise DeviceDeletionException: If the deletion was attempted, but the - device special file is still present - afterward. - """ - # Resolve symlinks, if any, to get to the /dev/sdX path - devpath = os.path.realpath(devpath) - try: - os.stat(devpath) - except OSError: - raise exception.InvalidDevicePath(path=devpath) - devname = devpath.rsplit('/', 1)[-1] - delpath = '/sys/block/%s/device/delete' % devname - try: - os.stat(delpath) - except OSError: - raise exception.InvalidDevicePath(path=delpath) - LOG.debug("Deleting block device %(devpath)s from the management " - "partition via special file %(delpath)s.", - {'devpath': devpath, 'delpath': delpath}) - # Writing '1' to this sysfs file deletes the block device and rescans. - nova.privsep.path.writefile(delpath, 'a', '1') - - # The bus scan is asynchronous. Need to poll, waiting for the device to - # disappear. Stop when stat raises OSError (dev file not found) - which is - # success - or after the specified timeout (which is failure). Sleep 1/4 - # second between polls. - @retrying.retry(retry_on_result=lambda result: result, wait_fixed=250, - stop_max_delay=scan_timeout * 1000) - def _poll_for_del(statpath): - try: - os.stat(statpath) - return True - except OSError: - # Device special file is absent, as expected - return False - try: - _poll_for_del(devpath) - except retrying.RetryError as re: - # stat just kept returning (dev file continued to exist). - raise npvmex.DeviceDeletionException( - devpath=devpath, polls=re.last_attempt.attempt_number, - timeout=scan_timeout) - # Else stat raised - the device disappeared - all done. diff --git a/nova_powervm/virt/powervm/nvram/__init__.py b/nova_powervm/virt/powervm/nvram/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/virt/powervm/nvram/api.py b/nova_powervm/virt/powervm/nvram/api.py deleted file mode 100644 index 1e9c464f..00000000 --- a/nova_powervm/virt/powervm/nvram/api.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -from nova import exception as nex -import six - -from nova_powervm.virt.powervm.i18n import _ - - -class NVRAMUploadException(nex.NovaException): - msg_fmt = _("The NVRAM could not be stored for instance %(instance)s. " - "Reason: %(reason)s") - - -class NVRAMDownloadException(nex.NovaException): - msg_fmt = _("The NVRAM could not be fetched for instance %(instance)s. " - "Reason: %(reason)s") - - -class NVRAMDeleteException(nex.NovaException): - msg_fmt = _("The NVRAM could not be deleted for instance %(instance)s. " - "Reason: %(reason)s") - - -class NVRAMConfigOptionNotSet(nex.NovaException): - msg_fmt = _("The configuration option '%(option)s' must be set.") - - -@six.add_metaclass(abc.ABCMeta) -class NvramStore(object): - - @abc.abstractmethod - def store(self, instance, data, force=True): - """Store the NVRAM into the storage service. - - :param instance: The nova instance object OR instance UUID. - :param data: the NVRAM data base64 encoded string - :param force: boolean whether an update should always be saved, - otherwise, check to see if it's changed. - """ - - @abc.abstractmethod - def fetch(self, instance): - """Fetch the NVRAM from the storage service. - - :param instance: The nova instance object OR instance UUID. - :returns: the NVRAM data base64 encoded string - """ - - @abc.abstractmethod - def delete(self, instance): - """Delete the NVRAM from the storage service. - - :param instance: The nova instance object OR instance UUID. - """ diff --git a/nova_powervm/virt/powervm/nvram/manager.py b/nova_powervm/virt/powervm/nvram/manager.py deleted file mode 100644 index 8c67b100..00000000 --- a/nova_powervm/virt/powervm/nvram/manager.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from nova import utils as n_utils -from oslo_concurrency import lockutils -from oslo_log import log as logging -from oslo_utils import uuidutils -from pypowervm import const as pvm_const -from pypowervm import exceptions as pvm_exc -import six -import time - -from nova_powervm.virt.powervm.nvram import api -from nova_powervm.virt.powervm import vm - -LOG = logging.getLogger(__name__) -LOCK_NVRAM_UPDT_SET = 'nvram_update_set' -LOCK_NVRAM_STORE = 'nvram_update' - - -class NvramManager(object): - """The manager of the NVRAM store and fetch process. - - This class uses two locks. One for controlling access to the set of - instance uuids to update the NVRAM for and another to control actually - updating the NVRAM for the instance itself. - - An update to the instance uuid store should always lock the update lock - first and then get the set lock. There should never be a case where the set - lock is acquired before the update lock. This can lead to deadlock cases. - - NVRAM events for an instance come in spurts primarily during power on and - off, from what has been observed so far. By using a set of instance uuids, - rapid requests to store the NVRAM can be collapsed down into a single - request (optimal). - """ - - def __init__(self, store_api, adapter, host_uuid): - """Create the manager. - - :param store_api: the NvramStore api to use. - :param adapter: pypowervm Adapter - :param host_uuid: powervm host uuid string - """ - super(NvramManager, self).__init__() - self._api = store_api - self._adapter = adapter - self._host_uuid = host_uuid - - self._update_set = set() - self._queue = eventlet.queue.LightQueue() - self._shutdown = False - self._update_thread = n_utils.spawn(self._update_thread) - LOG.debug('NVRAM store manager started.') - - def shutdown(self): - """Shutdown the NVRAM Manager.""" - - LOG.debug('NVRAM store manager shutting down.') - self._shutdown = True - # Remove all pending updates - self._clear_set() - # Signal the thread to stop - self._queue.put(None) - self._update_thread.wait() - - def store(self, instance, immediate=False): - """Store the NVRAM for an instance. - - :param instance: The instance UUID OR instance object of the instance - to store the NVRAM for. - :param immediate: Force the update to take place immediately. - Otherwise, the request is queued for asynchronous - update. - """ - inst_uuid = (instance if - uuidutils.is_uuid_like(instance) else instance.uuid) - if immediate: - self._update_nvram(instance_uuid=inst_uuid) - else: - # Add it to the list to update - self._add_to_set(inst_uuid) - # Trigger the thread - self._queue.put(inst_uuid, block=False) - # Sleep so the thread gets a chance to run - time.sleep(0) - - def fetch(self, instance): - """Fetch the NVRAM for an instance. - - :param instance: The instance UUID OR instance object of the instance - to fetch the NVRAM for. - :returns: The NVRAM data for the instance. - """ - inst_uuid = (instance if - uuidutils.is_uuid_like(instance) else instance.uuid) - try: - return self._api.fetch(inst_uuid) - except Exception as e: - LOG.exception(('Could not fetch NVRAM for instance with UUID %s.'), - inst_uuid) - raise api.NVRAMDownloadException(instance=inst_uuid, - reason=six.text_type(e)) - - @lockutils.synchronized(LOCK_NVRAM_STORE) - def remove(self, instance): - """Remove the stored NVRAM for an instance. - - :param instance: The nova instance object OR instance UUID. - """ - inst_uuid = (instance if - uuidutils.is_uuid_like(instance) else instance.uuid) - # Remove any pending updates - self._pop_from_set(uuid=inst_uuid) - # Remove it from the store - try: - self._api.delete(inst_uuid) - except Exception: - # Delete exceptions should not end the operation - LOG.exception(('Could not delete NVRAM for instance with UUID ' - '%s.'), inst_uuid) - - @lockutils.synchronized(LOCK_NVRAM_UPDT_SET) - def _add_to_set(self, instance_uuid): - """Add an instance uuid to the set of uuids to store the NVRAM.""" - self._update_set.add(instance_uuid) - - @lockutils.synchronized(LOCK_NVRAM_UPDT_SET) - def _pop_from_set(self, uuid=None): - """Pop an instance uuid off the set of instances to update. - - :param uuid: The uuid of the instance to update or if not specified - pull the next instance uuid off the set. - :returns: The instance uuid. - """ - try: - if uuid is None: - return self._update_set.pop() - else: - self._update_set.remove(uuid) - return uuid - except KeyError: - return None - - @lockutils.synchronized(LOCK_NVRAM_UPDT_SET) - def _clear_set(self): - """Clear the set of instance uuids to store NVRAM for.""" - self._update_set.clear() - - @lockutils.synchronized(LOCK_NVRAM_STORE) - def _update_nvram(self, instance_uuid=None): - """Perform an update of NVRAM for instance. - - :param instance_uuid: The instance uuid of the instance to update or if - not specified pull the next one off the set to - update. - """ - if instance_uuid is None: - instance_uuid = self._pop_from_set() - if instance_uuid is None: - return - else: - # Remove any pending updates - self._pop_from_set(uuid=instance_uuid) - - try: - LOG.debug('Updating NVRAM for instance with uuid: %s', - instance_uuid) - data = vm.get_instance_wrapper( - self._adapter, instance_uuid, xag=[pvm_const.XAG.NVRAM]).nvram - LOG.debug('NVRAM for instance with uuid %(uuid)s: %(data)s', - {'uuid': instance_uuid, 'data': data}) - if data is not None: - self._api.store(instance_uuid, data) - except pvm_exc.Error: - # Update exceptions should not end the operation. - LOG.exception('Could not update NVRAM for instance with uuid %s.', - instance_uuid) - - def _update_thread(self): - """The thread that is charged with updating the NVRAM store.""" - - LOG.debug('NVRAM store manager update thread started.') - # Loop until it's time to shut down - while not self._shutdown: - if self._queue.get(block=True) is None: - LOG.debug('NVRAM store manager update thread is ending.') - return - - self._update_nvram() - time.sleep(0) diff --git a/nova_powervm/virt/powervm/nvram/swift.py b/nova_powervm/virt/powervm/nvram/swift.py deleted file mode 100644 index 4d73d327..00000000 --- a/nova_powervm/virt/powervm/nvram/swift.py +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import hashlib -import os -import retrying -import six -import tempfile -import types - -from nova_powervm import conf as cfg -from nova_powervm.conf import powervm -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm.nvram import api - -from oslo_concurrency import lockutils -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import uuidutils -from swiftclient import exceptions as swft_exc -from swiftclient import service as swft_srv - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class SwiftNvramStore(api.NvramStore): - - def __init__(self): - super(SwiftNvramStore, self).__init__() - self.container = CONF.powervm.swift_container - # Build the swift service options - self.options = self._init_swift_options() - self.swift_service = swft_srv.SwiftService(options=self.options) - self._container_found = False - - @staticmethod - def _init_swift_options(): - """Initialize all the options needed to communicate with Swift.""" - - for opt in powervm.swift_opts: - if opt.required and getattr(CONF.powervm, opt.name) is None: - raise api.NVRAMConfigOptionNotSet(option=opt.name) - - options = { - 'auth_version': CONF.powervm.swift_auth_version, - 'os_username': CONF.powervm.swift_username, - 'os_user_domain_name': CONF.powervm.swift_user_domain_name, - 'os_password': CONF.powervm.swift_password, - 'os_project_name': CONF.powervm.swift_project_name, - 'os_project_domain_name': CONF.powervm.swift_project_domain_name, - 'os_auth_url': CONF.powervm.swift_auth_url, - 'os_cacert': CONF.powervm.swift_cacert, - 'os_endpoint_type': CONF.powervm.swift_endpoint_type, - } - - return options - - def _run_operation(self, f, *args, **kwargs): - """Convenience method to call the Swift client service.""" - - # Get the function to call - func = getattr(self.swift_service, f) - try: - result = func(*args, **kwargs) - # For generators we have to copy the results because the - # service is going out of scope. - if isinstance(result, types.GeneratorType): - results = [] - LOG.debug('SwiftOperation results:') - for r in result: - results.append(copy.deepcopy(r)) - LOG.debug(str(r)) - result = results - else: - LOG.debug('SwiftOperation result: %s', str(result)) - return result - except swft_srv.SwiftError: - with excutils.save_and_reraise_exception(logger=LOG): - LOG.exception("Error running swift operation.") - - @classmethod - def _get_name_from_listing(cls, results): - names = [] - for result in results: - if result['success']: - for obj in result['listing']: - names.append(obj['name']) - return names - - def _get_container_names(self): - results = self._run_operation('list', options={'long': True}) - return self._get_name_from_listing(results) - - def _get_object_names(self, container, prefix=None): - # If this is the first pass, the container may not exist yet. Check - # to make sure it does, otherwise the list of the object names will - # fail. - if not self._container_found: - container_names = self._get_container_names() - self._container_found = (container in container_names) - - # If the container was still not found, then just return an empty - # list. There are no objects. - if not self._container_found: - return [] - - results = self._run_operation( - 'list', options={'long': True, 'prefix': prefix}, - container=container) - return self._get_name_from_listing(results) - - def _store(self, inst_key, data, exists=None): - """Store the NVRAM into the storage service. - - :param inst_key: The key by which to store the data in the repository. - :param data: the NVRAM data base64 encoded string - :param exists: (Optional, Default: None) If specified, tells the upload - whether or not the object exists. Should be a boolean - or None. If left as None, the method will look up - whether or not it exists. - """ - - # If the object doesn't exist, we tell it to 'leave_segments'. This - # prevents a lookup and saves the logs from an ERROR in the swift - # client (that really isn't an error...sigh). It should be empty - # if not the first upload (which defaults to leave_segments=False) - # so that it overrides the existing element on a subsequent upload. - if exists is None: - exists = self._exists(inst_key) - options = dict(leave_segments=True) if not exists else None - - # The swift client already has a retry opertaion. The retry method - # takes a 'reset' function as a parameter. This parameter is 'None' - # for all operations except upload. For upload, it's set to a default - # method that throws a ClientException if the object to upload doesn't - # implement tell/see/reset. If the authentication error occurs during - # upload, this ClientException is raised with no retry. For any other - # operation, swift client will retry and succeed. - @retrying.retry(retry_on_result=lambda result: result, - wait_fixed=250, stop_max_attempt_number=2) - def _run_upload_operation(): - """Run the upload operation - - Attempts retry for a maximum number of two times. The upload - operation will fail with ClientException, if there is an - authentication error. The second attempt only happens if the - first attempt failed with ClientException. A return value of - True means we should retry, and False means no failure during - upload, thus no retry is required. - - Raises RetryError if the upload failed during second attempt, - as the number of attempts for retry is reached. - - """ - source = six.StringIO(data) - obj = swft_srv.SwiftUploadObject(source, object_name=inst_key) - - results = self._run_operation('upload', self.container, - [obj], options=options) - for result in results: - if not result['success']: - # TODO(arun-mani - Bug 1611011): Filed for updating swift - # client to return http status code in case of failure - if isinstance(result['error'], swft_exc.ClientException): - # If upload failed during nvram/slot_map update due to - # expired keystone token, retry swift-client operation - # to allow regeneration of token - LOG.warning('NVRAM upload failed due to invalid ' - 'token. Retrying upload.') - return True - # The upload failed. - raise api.NVRAMUploadException(instance=inst_key, - reason=result) - return False - try: - _run_upload_operation() - except retrying.RetryError as re: - # The upload failed. - reason = (_('Unable to store NVRAM after %d attempts') % - re.last_attempt.attempt_number) - raise api.NVRAMUploadException(instance=inst_key, reason=reason) - - @lockutils.synchronized('nvram') - def store(self, instance, data, force=True): - """Store the NVRAM into the storage service. - - :param instance: The nova instance object OR instance UUID. - :param data: the NVRAM data base64 encoded string - :param force: boolean whether an update should always be saved, - otherwise, check to see if it's changed. - """ - inst_uuid = (instance if - uuidutils.is_uuid_like(instance) else instance.uuid) - exists = self._exists(inst_uuid) - if not force and exists: - # See if the entry exists and has not changed. - results = self._run_operation('stat', options={'long': True}, - container=self.container, - objects=[inst_uuid]) - result = results[0] - if result['success']: - existing_hash = result['headers']['etag'] - if six.PY3: - data = data.encode('ascii') - md5 = hashlib.md5(data).hexdigest() - if existing_hash == md5: - LOG.info(('NVRAM has not changed for instance with ' - 'UUID %s.'), inst_uuid) - return - - self._store(inst_uuid, data, exists=exists) - LOG.debug('NVRAM updated for instance with UUID %s', inst_uuid) - - def store_slot_map(self, inst_key, data): - """Store the Slot Map to Swift. - - :param inst_key: The instance key to use for the storage operation. - :param data: The data of the object to store. This should be a string. - """ - self._store(inst_key, data) - - def fetch_slot_map(self, inst_key): - """Fetch the Slot Map object. - - :param inst_key: The instance key to use for the storage operation. - :returns: The slot map (as a string) - """ - return self._fetch(inst_key)[0] - - def fetch(self, instance): - """Fetch the NVRAM from the storage service. - - :param instance: The nova instance object or instance UUID. - :returns: the NVRAM data base64 encoded string - """ - inst_uuid = (instance if - uuidutils.is_uuid_like(instance) else instance.uuid) - data, result = self._fetch(inst_uuid) - if not data: - raise api.NVRAMDownloadException(instance=inst_uuid, - reason=result) - return data - - def _exists(self, object_key): - # Search by prefix, but since this is just a prefix, we need to loop - # and do a check to make sure it fully looks. - obj_names = self._get_object_names(self.container, prefix=object_key) - for obj in obj_names: - if object_key == obj: - return True - return False - - def _fetch(self, object_key): - # Check if the object exists. If not, return a result accordingly. - if not self._exists(object_key): - return None, _('Object does not exist in Swift.') - - try: - # Create a temp file for download into - with tempfile.NamedTemporaryFile(delete=False) as f: - options = { - 'out_file': f.name - } - # The file is now created and closed for the swift client to use. - results = self._run_operation( - 'download', container=self.container, objects=[object_key], - options=options) - for result in results: - if result['success']: - with open(f.name, 'r') as f: - return f.read(), result - else: - return None, result - finally: - try: - os.remove(f.name) - except Exception: - LOG.warning('Could not remove temporary file: %s', f.name) - - def delete_slot_map(self, inst_key): - """Delete the Slot Map from Swift. - - :param inst_key: The instance key to use for the storage operation. - """ - for result in self._run_operation('delete', container=self.container, - objects=[inst_key]): - - LOG.debug('Delete slot map result: %s', str(result)) - if not result['success']: - raise api.NVRAMDeleteException(reason=result, - instance=inst_key) - - def delete(self, instance): - """Delete the NVRAM into the storage service. - - :param instance: The nova instance object OR instance UUID. - """ - inst_uuid = (instance if - uuidutils.is_uuid_like(instance) else instance.uuid) - for result in self._run_operation('delete', container=self.container, - objects=[inst_uuid]): - - LOG.debug('Delete result for instance with UUID %(inst_uuid)s: ' - '%(res)s', {'inst_uuid': inst_uuid, 'res': result}) - if not result['success']: - raise api.NVRAMDeleteException(instance=inst_uuid, - reason=result) diff --git a/nova_powervm/virt/powervm/slot.py b/nova_powervm/virt/powervm/slot.py deleted file mode 100644 index aed83224..00000000 --- a/nova_powervm/virt/powervm/slot.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from oslo_log import log as logging - -from pypowervm import exceptions as pvm_exc -from pypowervm.tasks import slot_map -from pypowervm.tasks import storage as pvm_tstor - -from nova_powervm.virt.powervm import exception as p_exc - - -LOG = logging.getLogger(__name__) - -_SLOT_KEY = "CLIENT_SLOT_DATA" -_SLOT_VOLUME_TYPES = ['vscsi', 'fileio', 'rbd', 'iscsi'] - - -def build_slot_mgr(instance, store_api, adapter=None, vol_drv_iter=None): - """Builds the NovaSlotManager - - A slot manager serves two purposes. First is to store the 'slots' in which - client adapters are created. These adapters host I/O to the VMs and - it is important to save which VM slots these adapters are created within. - - The second purpose for the slot mgr is to consume that data. When a VM - rebuild operation is kicked off, the client adapters must go into the - same exact slots. This slot mgr serves up that necessary metadata. - - :param instance: The nova instance to get the slot map for. - :param store_api: The Swift Storage API that will save the Slot Map data. - This may be None, indicating that the Slot Map data - should not be persisted. - :param adapter: pypowervm.adapter.Adapter for REST communication. Required - if rebuilding. May be omitted if the slot map instance - will only be used for saving source mappings. - :param vol_drv_iter: Iterator over volume drivers in this driver instance. - May be omitted if the slot map instance will only be - used for saving source mappings. - :return: The appropriate PowerVM SlotMapStore implementation. If the NVRAM - store is set up, a Swift-backed implementation is returned. If - there is no NVRAM set up, a no-op implementation is returned. - """ - if store_api is not None: - return SwiftSlotManager(store_api, instance=instance, adapter=adapter, - vol_drv_iter=vol_drv_iter) - return NoopSlotManager(instance=instance) - - -class NovaSlotManager(slot_map.SlotMapStore): - """Used to manage the slots for a PowerVM-based system. - - Slots are used by the Virtual Machine to place 'adapters' on the system. - This slot store serves two purposes. It will guide the spawn (or rebuild) - operation with what slots it should use. - - Second, it serves as a storage of the client slot data (which can then - be saved to an external location for rebuild server scenarios). - - This class extends the base pypowervm facilities for this, but adds the - context of the 'Nova' objects. It should be extended by the backing - storage implementations. - """ - - def __init__(self, instance=None, adapter=None, vol_drv_iter=None): - super(NovaSlotManager, self).__init__( - '%s_slot_map' % instance.uuid) - self.instance = instance - self.adapter = adapter - self.vol_drv_iter = vol_drv_iter if vol_drv_iter else () - self._build_map = None - self._vios_wraps = [] - self.is_rebuild = (self.adapter and vol_drv_iter) - - @property - def build_map(self): - """Returns a 'BuildSlotMap' from pypowervm. - - Identifies for build out of a VM what slots should be used for the - adapters. - """ - if self._build_map is None: - if self.is_rebuild: - self.init_recreate_map(self.adapter, self.vol_drv_iter) - else: - self._build_map = slot_map.BuildSlotMap(self) - return self._build_map - - def init_recreate_map(self, adapter, vol_drv_iter): - """To be used on a target system. Builds the 'slot recreate' map. - - This is to initialize on the target system how the client slots should - be rebuilt on the client VM. - - This should not be called unless it is a VM recreate. - - :param adapter: The pypowervm adapter. - :param vol_drv_iter: An iterator of the volume drivers. - """ - # This should only be called on a rebuild. Focus on being correct - # first. Performance is secondary. - - # We need to scrub existing stale mappings, including those for the VM - # we're creating. It is critical that this happen *before* we create - # any of the mappings we actually want this VM to have. - scrub_ftsk = pvm_tstor.ComprehensiveScrub(adapter) - scrub_ftsk.execute() - self._vios_wraps = scrub_ftsk.feed - - pv_vscsi_vol_to_vio = {} - fabric_names = [] - for bdm, vol_drv in vol_drv_iter: - if vol_drv.vol_type() in _SLOT_VOLUME_TYPES: - self._pv_vscsi_vol_to_vio(vol_drv, pv_vscsi_vol_to_vio) - elif len(fabric_names) == 0 and vol_drv.vol_type() == 'npiv': - fabric_names = vol_drv._fabric_names() - - # Run the full initialization now that we have the pre-requisite data - try: - self._build_map = slot_map.RebuildSlotMap( - self, self._vios_wraps, pv_vscsi_vol_to_vio, fabric_names) - except pvm_exc.InvalidHostForRebuild as e: - raise p_exc.InvalidRebuild(error=six.text_type(e)) - - def _pv_vscsi_vol_to_vio(self, vol_drv, vol_to_vio): - """Find which physical volumes are on what VIOSes. - - Builds: { "udid" : [ "vios_uuid", "vios_uuid"], ...} - """ - for vios_w in self._vios_wraps: - on_vio, udid = vol_drv.is_volume_on_vios(vios_w) - if not on_vio: - continue - - if udid not in vol_to_vio: - vol_to_vio[udid] = [] - vol_to_vio[udid].append(vios_w.uuid) - - -class SwiftSlotManager(NovaSlotManager): - """Used to store the slot metadata for the VM. - - When rebuilding a PowerVM virtual machine, the slots must line up in their - original location. This is so that the VM boots from the same location - and the boot order (as well as other data) is preserved. - - This implementation is used to store the implementation in Swift. It is - only used if the operator has choosen to use Swift to store the NVRAM - metadata. - """ - - def __init__(self, store_api, **kwargs): - self.store_api = store_api - super(SwiftSlotManager, self).__init__(**kwargs) - - def _load(self, key): - return self.store_api.fetch_slot_map(key) - - def _save(self, key, blob): - self.store_api.store_slot_map(key, blob) - - def _delete(self, key): - try: - self.store_api.delete_slot_map(key) - except Exception: - LOG.warning("Unable to delete the slot map from Swift backing " - "store with ID %(key)s. Will require manual cleanup.", - {'key': key}, instance=self.instance) - - -class NoopSlotManager(NovaSlotManager): - """No op Slot Map (for when Swift is not used - which is standard).""" - pass diff --git a/nova_powervm/virt/powervm/tasks/__init__.py b/nova_powervm/virt/powervm/tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/nova_powervm/virt/powervm/tasks/base.py b/nova_powervm/virt/powervm/tasks/base.py deleted file mode 100644 index f4334508..00000000 --- a/nova_powervm/virt/powervm/tasks/base.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2016, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_log import log as logging -from taskflow import engines as tf_eng -from taskflow.listeners import timing as tf_tm - - -LOG = logging.getLogger(__name__) - - -def run(flow, instance=None): - """Run a TaskFlow Flow with task timing and logging with instance. - - :param flow: A taskflow.flow.Flow to run. - :param instance: A nova instance, for logging. - :return: The result of taskflow.engines.run(), a dictionary of named - results of the Flow's execution. - """ - def log_with_instance(*args, **kwargs): - """Wrapper for LOG.info(*args, **kwargs, instance=instance).""" - if instance is not None: - kwargs['instance'] = instance - LOG.info(*args, **kwargs) - - eng = tf_eng.load(flow) - with tf_tm.PrintingDurationListener(eng, printer=log_with_instance): - return eng.run() diff --git a/nova_powervm/virt/powervm/tasks/image.py b/nova_powervm/virt/powervm/tasks/image.py deleted file mode 100644 index 018b45f1..00000000 --- a/nova_powervm/virt/powervm/tasks/image.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from taskflow import task - -from nova_powervm.virt.powervm import image - - -LOG = logging.getLogger(__name__) - - -class UpdateTaskState(task.Task): - - def __init__(self, update_task_state, task_state, expected_state=None): - """Invoke the update_task_state callback with the desired arguments. - - :param update_task_state: update_task_state callable passed into - snapshot. - :param task_state: The new task state (from nova.compute.task_states) - to set. - :param expected_state: Optional. The expected state of the task prior - to this request. - """ - self.update_task_state = update_task_state - self.task_state = task_state - self.kwargs = {} - if expected_state is not None: - # We only want to pass expected state if it's not None! That's so - # we take the update_task_state method's default. - self.kwargs['expected_state'] = expected_state - super(UpdateTaskState, self).__init__( - name='update_task_state_%s' % task_state) - - def execute(self): - self.update_task_state(self.task_state, **self.kwargs) - - -class StreamToGlance(task.Task): - - """Task around streaming a block device to glance.""" - - def __init__(self, context, image_api, image_id, instance): - """Initialize the flow for streaming a block device to glance. - - Requires: disk_path: path to the block device file for the instance's - boot disk. - - :param context: Nova security context. - :param image_api: Handle to the glance API. - :param image_id: UUID of the prepared glance image. - :param instance: Instance whose backing device is being captured. - """ - self.context = context - self.image_api = image_api - self.image_id = image_id - self.instance = instance - super(StreamToGlance, self).__init__(name='stream_to_glance', - requires='disk_path') - - def execute(self, disk_path): - metadata = image.generate_snapshot_metadata( - self.context, self.image_api, self.image_id, self.instance) - LOG.info("Starting stream of boot device (local blockdev %(devpath)s) " - "to glance image %(img_id)s.", - {'devpath': disk_path, 'img_id': self.image_id}, - instance=self.instance) - image.stream_blockdev_to_glance(self.context, self.image_api, - self.image_id, metadata, disk_path) diff --git a/nova_powervm/virt/powervm/tasks/network.py b/nova_powervm/virt/powervm/tasks/network.py deleted file mode 100644 index e8918add..00000000 --- a/nova_powervm/virt/powervm/tasks/network.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -from nova import exception - -from oslo_log import log as logging -from pypowervm.wrappers import network as pvm_net -from taskflow import task - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import vif -from nova_powervm.virt.powervm import vm - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class UnplugVifs(task.Task): - - """The task to unplug Virtual Network Interfaces from a VM.""" - - def __init__(self, adapter, instance, network_infos, host_uuid, - slot_mgr): - """Create the task. - - :param adapter: The pypowervm adapter. - :param instance: The nova instance. - :param network_infos: The network information containing the nova - VIFs to create. - :param host_uuid: The host system's PowerVM UUID. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a VIF is detached from the VM. - """ - self.adapter = adapter - self.network_infos = network_infos or [] - self.host_uuid = host_uuid - self.slot_mgr = slot_mgr - self.instance = instance - - super(UnplugVifs, self).__init__( - name='unplug_vifs', requires=['lpar_wrap']) - - def execute(self, lpar_wrap): - # If the state is not in an OK state for deleting, then throw an - # error up front. - modifiable, reason = lpar_wrap.can_modify_io() - if not modifiable: - LOG.error("Unable to remove VIFs in the instance's current state. " - "The reason reported by the system is: %(reason)s", - {'reason': reason}, instance=self.instance) - raise exception.VirtualInterfaceUnplugException(reason=reason) - - # Get all the current Client Network Adapters (CNA) on the VM itself. - cna_w_list = vm.get_cnas(self.adapter, self.instance) - - # Walk through the VIFs and delete the corresponding CNA on the VM. - for network_info in self.network_infos: - vif.unplug(self.adapter, self.host_uuid, self.instance, - network_info, self.slot_mgr, cna_w_list=cna_w_list) - - return cna_w_list - - -class PlugVifs(task.Task): - - """The task to plug the Virtual Network Interfaces to a VM.""" - - def __init__(self, virt_api, adapter, instance, network_infos, host_uuid, - slot_mgr): - """Create the task. - - Provides 'vm_cnas' - the list of the Virtual Machine's Client Network - Adapters as they stand after all VIFs are plugged. May be None, in - which case the Task requiring 'vm_cnas' should discover them afresh. - - :param virt_api: The VirtAPI for the operation. - :param adapter: The pypowervm adapter. - :param instance: The nova instance. - :param network_infos: The network information containing the nova - VIFs to create. - :param host_uuid: The host system's PowerVM UUID. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a VIF is attached to the VM. - """ - self.virt_api = virt_api - self.adapter = adapter - self.network_infos = network_infos or [] - self.host_uuid = host_uuid - self.slot_mgr = slot_mgr - self.crt_network_infos, self.update_network_infos = [], [] - self.cnas, self.vnics = None, None - self.instance = instance - - super(PlugVifs, self).__init__(name='plug_vifs', provides='vm_cnas', - requires=['lpar_wrap']) - - def _vif_exists(self, network_info): - """Does the instance have a CNA/VNIC (as appropriate) for a given net? - - :param network_info: A network information dict. This method expects - it to contain keys 'vnic_type' (value is 'direct' - for VNIC; otherwise assume CNA); and 'address' - (MAC address). - :return: True if a CNA/VNIC (as appropriate) with the network_info's - MAC address exists on the instance. False otherwise. - """ - # Are we looking for a VNIC or a CNA? - if network_info['vnic_type'] == 'direct': - if self.vnics is None: - self.vnics = vm.get_vnics(self.adapter, self.instance) - vifs = self.vnics - else: - if self.cnas is None: - self.cnas = vm.get_cnas(self.adapter, self.instance) - vifs = self.cnas - - return network_info['address'] in [vm.norm_mac(v.mac) for v in vifs] - - def execute(self, lpar_wrap): - # We will have two types of network infos. One is for newly created - # vifs. The others are those that exist, but should be re-'treated' - for network_info in self.network_infos: - if self._vif_exists(network_info): - self.update_network_infos.append(network_info) - else: - self.crt_network_infos.append(network_info) - - # If there are no vifs to create or update, then just exit immediately. - if not self.crt_network_infos and not self.update_network_infos: - return [] - - # Check to see if the LPAR is OK to add VIFs to. - modifiable, reason = lpar_wrap.can_modify_io() - if not modifiable and self.crt_network_infos: - LOG.error("Unable to create VIF(s) in the instance's current " - "state. The reason reported by the system is: " - "%(reason)s", {'reason': reason}, instance=self.instance) - raise exception.VirtualInterfaceCreateException() - - # TODO(KYLEH): We're setting up to wait for an instance event. The - # event needs to come back to our compute manager so we need to ensure - # the instance.host is set to our host. We shouldn't need to do this - # but in the evacuate/recreate case it may reflect the old host. - # See: https://bugs.launchpad.net/nova/+bug/1535918 - undo_host_change = False - if self.instance.host != CONF.host: - LOG.warning('Instance was not assigned to this host. ' - 'It was assigned to: %s', self.instance.host, - instance=self.instance) - # Update the instance... - old_host = self.instance.host - self.instance.host = CONF.host - self.instance.save() - undo_host_change = True - - # For existing VIFs that we just need to update, run the plug but do - # not wait for the neutron event as that likely won't be sent (it was - # already done). - for network_info in self.update_network_infos: - LOG.info("Updating VIF with mac %(mac)s", - {'mac': network_info['address']}, instance=self.instance) - vif.plug(self.adapter, self.host_uuid, self.instance, - network_info, self.slot_mgr, new_vif=False) - - # For the VIFs, run the creates (and wait for the events back) - try: - with self.virt_api.wait_for_instance_event( - self.instance, self._get_vif_events(), - deadline=CONF.vif_plugging_timeout, - error_callback=self._vif_callback_failed): - for network_info in self.crt_network_infos: - LOG.info('Creating VIF with mac %(mac)s.', - {'mac': network_info['address']}, - instance=self.instance) - new_vif = vif.plug( - self.adapter, self.host_uuid, self.instance, - network_info, self.slot_mgr, new_vif=True) - if self.cnas is not None and isinstance(new_vif, - pvm_net.CNA): - self.cnas.append(new_vif) - except eventlet.timeout.Timeout: - LOG.error('Error waiting for VIF to be created.', - instance=self.instance) - raise exception.VirtualInterfaceCreateException() - finally: - if undo_host_change: - LOG.info('Undoing temporary host assignment to instance.', - instance=self.instance) - self.instance.host = old_host - self.instance.save() - - return self.cnas - - def _vif_callback_failed(self, event_name, instance): - LOG.error('VIF plug failure for callback on event %(event)s.', - {'event': event_name}, instance=instance) - if CONF.vif_plugging_is_fatal: - raise exception.VirtualInterfaceCreateException() - - def _get_vif_events(self): - """Returns the VIF events that need to be received for a VIF plug. - - In order for a VIF plug to be successful, certain events should be - received from other components within the OpenStack ecosystem. This - method returns the events neutron needs for a given deploy. - """ - # See libvirt's driver.py -> _get_neutron_events method for - # more information. - if CONF.vif_plugging_is_fatal and CONF.vif_plugging_timeout: - return [('network-vif-plugged', network_info['id']) - for network_info in self.crt_network_infos - if not network_info.get('active', True)] - - def revert(self, lpar_wrap, result, flow_failures): - if not self.network_infos: - return - - # The parameters have to match the execute method, plus the response + - # failures even if only a subset are used. - LOG.warning('VIF creation is being rolled back.', - instance=self.instance) - - # Get the current adapters on the system - cna_w_list = vm.get_cnas(self.adapter, self.instance) - for network_info in self.crt_network_infos: - try: - vif.unplug(self.adapter, self.host_uuid, self.instance, - network_info, self.slot_mgr, cna_w_list=cna_w_list) - except Exception: - LOG.exception("Error unplugging during vif rollback. " - "Ignoring.", instance=self.instance) - - -class PlugMgmtVif(task.Task): - - """The task to plug the Management VIF into a VM.""" - - def __init__(self, adapter, instance, host_uuid, slot_mgr): - """Create the task. - - Requires 'vm_cnas' from PlugVifs. If None, this Task will retrieve the - VM's list of CNAs. - - Provides the mgmt_cna. This may be none if no management device was - created. This is the CNA of the mgmt vif for the VM. - - :param adapter: The pypowervm adapter. - :param instance: The nova instance. - :param host_uuid: The host system's PowerVM UUID. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a VIF is attached to the VM - """ - self.adapter = adapter - self.host_uuid = host_uuid - self.slot_mgr = slot_mgr - self.instance = instance - - super(PlugMgmtVif, self).__init__( - name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas']) - - def execute(self, vm_cnas): - # If configured to not use RMC mgmt vifs, then return None. Need to - # return None because the Config Drive step (which may be used...may - # not be) required the mgmt vif. - if not CONF.powervm.use_rmc_mgmt_vif: - LOG.debug('No management VIF created because ' - 'CONF.powervm.use_rmc_mgmt_vif is False', - instance=self.instance) - return None - - LOG.info('Plugging the management network interface.', - instance=self.instance) - # Determine if we need to create the secure RMC VIF. This should only - # be needed if there is not a VIF on the secure RMC vSwitch - vswitch = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid) - if vswitch is None: - LOG.warning('No management VIF created due to lack of management ' - 'virtual switch', instance=self.instance) - return None - - # This next check verifies that there are no existing NICs on the - # vSwitch, so that the VM does not end up with multiple RMC VIFs. - if vm_cnas is None: - has_mgmt_vif = vm.get_cnas(self.adapter, self.instance, - vswitch_uri=vswitch.href) - else: - has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas] - - if has_mgmt_vif: - LOG.debug('Management VIF already exists.', instance=self.instance) - return None - - # Return the created management CNA - return vif.plug_secure_rmc_vif( - self.adapter, self.instance, self.host_uuid, self.slot_mgr) diff --git a/nova_powervm/virt/powervm/tasks/slot.py b/nova_powervm/virt/powervm/tasks/slot.py deleted file mode 100644 index 22fa8478..00000000 --- a/nova_powervm/virt/powervm/tasks/slot.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2016, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_log import log as logging -from taskflow import task - - -LOG = logging.getLogger(__name__) - - -class SaveSlotStore(task.Task): - - """Will run the save of the slot store. - - This is typically done after some action (such as add nic, deploy, add - volume, etc...) has run and the slot map itself has been updated. One of - the last actions is to now save the slot map back to the storage system. - """ - - def __init__(self, instance, slot_mgr): - """Create the task. - - :param instance: The nova instance. - :param slot_mgr: A NovaSlotManager. Contains the object that will be - saved. - """ - self.slot_mgr = slot_mgr - self.instance = instance - super(SaveSlotStore, self).__init__(name='save_slot_store') - - def execute(self): - LOG.debug("Topology: %(topo)s", {'topo': self.slot_mgr.topology}, - instance=self.instance) - self.slot_mgr.save() - - -class DeleteSlotStore(task.Task): - - """Will run the delete of the slot store. - - This removes the slot store for an entire instance. Typically run when the - VM is destroyed. - """ - - def __init__(self, instance, slot_mgr): - """Create the task. - - :param instance: The nova instance. - :param slot_mgr: A NovaSlotManager. Contains the object that will be - deleted. - """ - self.slot_mgr = slot_mgr - self.instance = instance - super(DeleteSlotStore, self).__init__(name='delete_slot_store') - - def execute(self): - self.slot_mgr.delete() diff --git a/nova_powervm/virt/powervm/tasks/storage.py b/nova_powervm/virt/powervm/tasks/storage.py deleted file mode 100644 index 36d92e47..00000000 --- a/nova_powervm/virt/powervm/tasks/storage.py +++ /dev/null @@ -1,553 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from pypowervm import exceptions as pvm_exc -from pypowervm.tasks import scsi_mapper as pvm_smap -from taskflow import task -from taskflow.types import failure as task_fail - -from nova_powervm.virt.powervm.disk import driver as disk_driver -from nova_powervm.virt.powervm import exception as npvmex -from nova_powervm.virt.powervm import media -from nova_powervm.virt.powervm import mgmt -from nova_powervm.virt.powervm import vm - - -LOG = logging.getLogger(__name__) - - -class ConnectVolume(task.Task): - - """The task to connect a volume to an instance.""" - - def __init__(self, vol_drv, slot_mgr): - """Create the task. - - :param vol_drv: The volume driver (see volume folder). Ties the - storage to a connection type (ex. vSCSI or NPIV). - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is attached to the VM - """ - self.vol_drv = vol_drv - self.vol_id = self.vol_drv.connection_info['data']['volume_id'] - self.slot_mgr = slot_mgr - - super(ConnectVolume, self).__init__( - name='connect_vol_%s' % self.vol_id) - - def execute(self): - LOG.info('Connecting volume %(vol)s.', {'vol': self.vol_id}, - instance=self.vol_drv.instance) - self.vol_drv.connect_volume(self.slot_mgr) - - def revert(self, result, flow_failures): - LOG.warning('Rolling back connection for volume %(vol)s.', - {'vol': self.vol_id}, instance=self.vol_drv.instance) - - # Note that the rollback is *instant*. Resetting the FeedTask ensures - # immediate rollback. - self.vol_drv.reset_stg_ftsk() - try: - # We attempt to disconnect in case we 'partially connected'. In - # the connect scenario, perhaps one of the Virtual I/O Servers - # was connected. This attempts to clear anything out to make sure - # the terminate connection runs smoothly. - self.vol_drv.disconnect_volume(self.slot_mgr) - except npvmex.VolumeDetachFailed: - # Only log that the volume detach failed. Should not be blocking - # due to being in the revert flow. - LOG.exception("Unable to disconnect volume %s during rollback.", - self.vol_id, instance=self.vol_drv.instance) - - -class DisconnectVolume(task.Task): - - """The task to disconnect a volume from an instance.""" - - def __init__(self, vol_drv, slot_mgr): - """Create the task. - - :param vol_drv: The volume driver (see volume folder). Ties the - storage to a connection type (ex. vSCSI or NPIV). - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is detached from the VM - """ - self.vol_drv = vol_drv - self.vol_id = self.vol_drv.connection_info['data']['volume_id'] - self.slot_mgr = slot_mgr - - super(DisconnectVolume, self).__init__( - name='disconnect_vol_%s' % self.vol_id) - - def execute(self): - LOG.info('Disconnecting volume %(vol)s.', - {'vol': self.vol_id}, instance=self.vol_drv.instance) - self.vol_drv.disconnect_volume(self.slot_mgr) - - def revert(self, result, flow_failures): - LOG.warning('Reconnecting volume %(vol)s on disconnect rollback.', - {'vol': self.vol_id}, instance=self.vol_drv.instance) - - # Note that the rollback is *instant*. Resetting the FeedTask ensures - # immediate rollback. - self.vol_drv.reset_stg_ftsk() - try: - # We try to reconnect the volume here so that it maintains its - # linkage (in the hypervisor) to the VM. This makes it easier for - # operators to understand the linkage between the VMs and volumes - # in error scenarios. This is simply useful for debug purposes - # if there is an operational error. - self.vol_drv.connect_volume(self.slot_mgr) - except npvmex.VolumeAttachFailed: - # Only log that the volume attach failed. Should not be blocking - # due to being in the revert flow. See comment above. - LOG.exception("Unable to reconnect volume %s during rollback.", - self.vol_id, instance=self.vol_drv.instance) - - -class CreateDiskForImg(task.Task): - - """The Task to create the disk from an image in the storage.""" - - def __init__(self, disk_dvr, context, instance, image_meta, - image_type=disk_driver.DiskType.BOOT): - """Create the Task. - - Provides the 'disk_dev_info' for other tasks. Comes from the disk_dvr - create_disk_from_image method. - - :param disk_dvr: The storage driver. - :param context: The context passed into the driver method. - :param instance: The nova instance. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param image_type: The image type. See disk/driver.py - """ - super(CreateDiskForImg, self).__init__( - name='crt_disk_from_img', provides='disk_dev_info') - self.disk_dvr = disk_dvr - self.context = context - self.instance = instance - self.image_meta = image_meta - self.image_type = image_type - - def execute(self): - return self.disk_dvr.create_disk_from_image( - self.context, self.instance, self.image_meta, - image_type=self.image_type) - - def revert(self, result, flow_failures): - # If there is no result, or its a direct failure, then there isn't - # anything to delete. - if result is None or isinstance(result, task_fail.Failure): - return - - # Run the delete. The result is a single disk. Wrap into list - # as the method works with plural disks. - self.disk_dvr.delete_disks([result]) - - -class ConnectDisk(task.Task): - - """The task to connect the disk to the instance.""" - - def __init__(self, disk_dvr, instance, stg_ftsk=None): - """Create the Task for the connect disk to instance method. - - Requires disk info through requirement of disk_dev_info (provided by - crt_disk_from_img) - - :param disk_dvr: The disk driver. - :param instance: The nova instance. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when the respective method is executed. - """ - super(ConnectDisk, self).__init__(name='connect_disk', - requires=['disk_dev_info']) - self.disk_dvr = disk_dvr - self.instance = instance - self.stg_ftsk = stg_ftsk - - def execute(self, disk_dev_info): - self.disk_dvr.connect_disk(self.instance, disk_dev_info, - stg_ftsk=self.stg_ftsk) - - def revert(self, disk_dev_info, result, flow_failures): - # Note that the FeedTask is None - to force instant disconnect. - self.disk_dvr.disconnect_disk(self.instance) - - -class InstanceDiskToMgmt(task.Task): - - """The Task to connect an instance's disk to the management partition. - - This task will connect the instance's disk to the management partition and - discover it. We do these two pieces together because their reversion - happens in the same order. - """ - - def __init__(self, disk_dvr, instance): - """Create the Task for connecting boot disk to mgmt partition. - - Provides: - stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was - connected. - vios_wrap: The Virtual I/O Server wrapper - (pypowervm.wrappers.virtual_io_server.VIOS) from which the - storage element was mapped. - disk_path: The local path to the mapped-and-discovered device, e.g. - '/dev/sde' - - :param disk_dvr: The disk driver. - :param instance: The nova instance whose boot disk is to be connected. - """ - super(InstanceDiskToMgmt, self).__init__( - name='instance_disk_to_mgmt', - provides=['stg_elem', 'vios_wrap', 'disk_path']) - self.disk_dvr = disk_dvr - self.instance = instance - self.stg_elem = None - self.vios_wrap = None - self.disk_path = None - - def execute(self): - """Map the instance's boot disk and discover it.""" - - # Search for boot disk on the NovaLink partition - if self.disk_dvr.mp_uuid in self.disk_dvr.vios_uuids: - dev_name = self.disk_dvr.get_bootdisk_path( - self.instance, self.disk_dvr.mp_uuid) - if dev_name is not None: - return None, None, dev_name - - self.stg_elem, self.vios_wrap = ( - self.disk_dvr.connect_instance_disk_to_mgmt(self.instance)) - new_maps = pvm_smap.find_maps( - self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid, - stg_elem=self.stg_elem) - if not new_maps: - raise npvmex.NewMgmtMappingNotFoundException( - stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name) - - # new_maps should be length 1, but even if it's not - i.e. we somehow - # matched more than one mapping of the same dev to the management - # partition from the same VIOS - it is safe to use the first one. - the_map = new_maps[0] - # Scan the SCSI bus, discover the disk, find its canonical path. - LOG.info("Discovering device and path for mapping of %(dev_name)s " - "on the management partition.", - {'dev_name': self.stg_elem.name}, instance=self.instance) - self.disk_path = mgmt.discover_vscsi_disk(the_map) - return self.stg_elem, self.vios_wrap, self.disk_path - - def revert(self, result, flow_failures): - """Unmap the disk and then remove it from the management partition. - - We use this order to avoid rediscovering the device in case some other - thread scans the SCSI bus between when we remove and when we unmap. - """ - if self.vios_wrap is None or self.stg_elem is None: - # We never even got connected - nothing to do - return - LOG.warning("Unmapping boot disk %(disk_name)s from the management " - "partition via Virtual I/O Server %(vioname)s.", - {'disk_name': self.stg_elem.name, - 'vioname': self.vios_wrap.name}, instance=self.instance) - self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid, - self.stg_elem.name) - - if self.disk_path is None: - # We did not discover the disk - nothing else to do. - return - LOG.warning("Removing disk %(dpath)s from the management partition.", - {'dpath': self.disk_path}, instance=self.instance) - try: - mgmt.remove_block_dev(self.disk_path) - except pvm_exc.Error: - # Don't allow revert exceptions to interrupt the revert flow. - LOG.exception("Remove disk failed during revert. Ignoring.", - instance=self.instance) - - -class RemoveInstanceDiskFromMgmt(task.Task): - - """Unmap and remove an instance's boot disk from the mgmt partition.""" - - def __init__(self, disk_dvr, instance): - """Unmap and remove an instance's boot disk from the mgmt partition. - - Requires (from InstanceDiskToMgmt): - stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was - connected. - vios_wrap: The Virtual I/O Server wrapper - (pypowervm.wrappers.virtual_io_server.VIOS) from which the - storage element was mapped. - disk_path: The local path to the mapped-and-discovered device, e.g. - '/dev/sde' - - :param disk_dvr: The disk driver. - :param instance: The nova instance whose boot disk is to be connected. - """ - self.disk_dvr = disk_dvr - self.instance = instance - super(RemoveInstanceDiskFromMgmt, self).__init__( - name='remove_inst_disk_from_mgmt', - requires=['stg_elem', 'vios_wrap', 'disk_path']) - - def execute(self, stg_elem, vios_wrap, disk_path): - """Unmap and remove an instance's boot disk from the mgmt partition. - - Input parameters ('requires') provided by InstanceDiskToMgmt task. - - :param stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) - to be disconnected. - :param vios_wrap: The Virtual I/O Server wrapper from which the - mapping is to be removed. - :param disk_path: The local path to the disk device to be removed, e.g. - '/dev/sde' - """ - # stg_elem is None if boot disk was not mapped to management partition - if stg_elem is None: - return - LOG.info("Unmapping boot disk %(disk_name)s from the management " - "partition via Virtual I/O Server %(vios_name)s.", - {'disk_name': stg_elem.name, 'vios_name': vios_wrap.name}, - instance=self.instance) - self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name) - LOG.info("Removing disk %(disk_path)s from the management partition.", - {'disk_path': disk_path}, instance=self.instance) - mgmt.remove_block_dev(disk_path) - - -class CreateAndConnectCfgDrive(task.Task): - - """The task to create the configuration drive.""" - - def __init__(self, adapter, instance, injected_files, - network_info, admin_pass, stg_ftsk=None): - """Create the Task that create and connect the config drive. - - Requires the 'lpar_wrap' and 'mgmt_cna' - Provides the 'cfg_drv_vscsi_map' which is an element to later map - the vscsi drive. - - :param adapter: The adapter for the pypowervm API - :param instance: The nova instance - :param injected_files: A list of file paths that will be injected into - the ISO. - :param network_info: The network_info from the nova spawn method. - :param admin_pass: Optional password to inject for the VM. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when the respective method is executed. - """ - super(CreateAndConnectCfgDrive, self).__init__( - name='cfg_drive', requires=['lpar_wrap', 'mgmt_cna']) - self.adapter = adapter - self.instance = instance - self.injected_files = injected_files - self.network_info = network_info - self.ad_pass = admin_pass - self.mb = None - self.stg_ftsk = stg_ftsk - - def execute(self, lpar_wrap, mgmt_cna): - self.mb = media.ConfigDrivePowerVM(self.adapter) - self.mb.create_cfg_drv_vopt(self.instance, self.injected_files, - self.network_info, lpar_wrap.uuid, - admin_pass=self.ad_pass, - mgmt_cna=mgmt_cna, stg_ftsk=self.stg_ftsk) - - def revert(self, lpar_wrap, mgmt_cna, result, flow_failures): - # The parameters have to match the execute method, plus the response + - # failures even if only a subset are used. - - # No media builder, nothing to do - if self.mb is None: - return - - # Delete the virtual optical media. If it fails we don't care. - try: - self.mb.dlt_vopt(lpar_wrap.uuid) - except Exception: - LOG.exception('VOpt removal (as part of reversion) failed.', - instance=self.instance) - - -class DeleteVOpt(task.Task): - - """The task to delete the virtual optical.""" - - def __init__(self, adapter, instance, stg_ftsk=None): - """Creates the Task to delete the instances virtual optical media. - - :param adapter: The adapter for the pypowervm API - :param instance: The nova instance. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when the respective method is executed. - """ - super(DeleteVOpt, self).__init__(name='vopt_delete') - self.adapter = adapter - self.instance = instance - self.stg_ftsk = stg_ftsk - - def execute(self): - media_builder = media.ConfigDrivePowerVM(self.adapter) - media_builder.dlt_vopt(vm.get_pvm_uuid(self.instance), - stg_ftsk=self.stg_ftsk) - - -class DetachDisk(task.Task): - - """The task to detach the disk storage from the instance.""" - - def __init__(self, disk_dvr, instance, stg_ftsk=None, disk_type=None): - """Creates the Task to detach the storage adapters. - - Provides the stor_adpt_mappings. A list of pypowervm - VSCSIMappings or VFCMappings (depending on the storage adapter). - - :param disk_dvr: The DiskAdapter for the VM. - :param instance: The nova instance. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when the respective method is executed. - :param disk_type: List of disk types to detach. None means detach all. - """ - super(DetachDisk, self).__init__( - name='detach_storage', provides='stor_adpt_mappings') - self.disk_dvr = disk_dvr - self.instance = instance - self.stg_ftsk = stg_ftsk - self.disk_type = disk_type - - def execute(self): - return self.disk_dvr.disconnect_disk( - self.instance, stg_ftsk=self.stg_ftsk, disk_type=self.disk_type) - - -class DeleteDisk(task.Task): - - """The task to delete the backing storage.""" - - def __init__(self, disk_dvr, instance): - """Creates the Task to delete the disk storage from the system. - - Requires the stor_adpt_mappings. - - :param disk_dvr: The DiskAdapter for the VM. - :param instance: The nova instance. - """ - super(DeleteDisk, self).__init__( - name='dlt_storage', requires=['stor_adpt_mappings']) - self.disk_dvr = disk_dvr - - def execute(self, stor_adpt_mappings): - self.disk_dvr.delete_disks(stor_adpt_mappings) - - -class SaveBDM(task.Task): - - """Task to save an updated block device mapping.""" - - def __init__(self, bdm, instance): - """Creates the Task to save an updated block device mapping. - - :param bdm: The updated bdm. - :param instance: The nova instance - """ - self.bdm = bdm - self.instance = instance - super(SaveBDM, self).__init__(name='save_bdm_%s' % self.bdm.volume_id) - - def execute(self): - LOG.info('Saving block device mapping for volume id %(vol_id)s.', - {'vol_id': self.bdm.volume_id}, instance=self.instance) - self.bdm.save() - - -class FindDisk(task.Task): - - """The Task to find a disk and provide information to downstream tasks.""" - - def __init__(self, disk_dvr, context, instance, disk_type): - """Create the Task. - - Provides the 'disk_dev_info' for other tasks. Comes from the disk_dvr - create_disk_from_image method. - - :param disk_dvr: The storage driver. - :param context: The context passed into the driver method. - :param instance: The nova instance. - :param disk_type: One of the DiskType enum values. - """ - super(FindDisk, self).__init__( - name='find_disk', provides='disk_dev_info') - self.disk_dvr = disk_dvr - self.context = context - self.instance = instance - self.disk_type = disk_type - - def execute(self): - disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type) - if not disk: - LOG.warning('Disk not found: %(disk_name)s', - {'disk_name': - self.disk_dvr._get_disk_name(self.disk_type, - self.instance), - }, instance=self.instance) - return disk - - -class ExtendDisk(task.Task): - - """Task to extend a disk.""" - - def __init__(self, disk_dvr, instance, disk_info, size): - """Creates the Task to extend a disk. - - :param disk_dvr: The storage driver. - :param instance: instance to extend the disk for. - :param disk_info: dictionary with disk info. - :param size: the new size in gb. - """ - self.disk_dvr = disk_dvr - self.instance = instance - self.disk_info = disk_info - self.size = size - super(ExtendDisk, self).__init__( - name='extend_disk_%s' % disk_info['type']) - - def execute(self): - LOG.info('Extending %(disk_type)s disk to %(size)s GB.', - {'disk_type': self.disk_info['type'], 'size': self.size}, - instance=self.instance) - self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size) diff --git a/nova_powervm/virt/powervm/tasks/vm.py b/nova_powervm/virt/powervm/tasks/vm.py deleted file mode 100644 index 60ebeceb..00000000 --- a/nova_powervm/virt/powervm/tasks/vm.py +++ /dev/null @@ -1,322 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from pypowervm import const as pvm_const -from pypowervm.tasks import partition as pvm_tpar -from pypowervm.tasks import storage as pvm_stg -from taskflow import task -from taskflow.types import failure as task_fail - -from nova_powervm.virt.powervm import vm - -from nova.compute import task_states - -LOG = logging.getLogger(__name__) - - -class Get(task.Task): - - """The task for getting a VM entry.""" - - def __init__(self, adapter, host_uuid, instance): - """Creates the Task for getting a VM entry. - - Provides the 'lpar_wrap' for other tasks. - - :param adapter: The adapter for the pypowervm API - :param host_uuid: The host UUID - :param instance: The nova instance. - """ - super(Get, self).__init__(name='get_vm', provides='lpar_wrap') - self.adapter = adapter - self.host_uuid = host_uuid - self.instance = instance - - def execute(self): - return vm.get_instance_wrapper(self.adapter, self.instance) - - -class Create(task.Task): - - """The task for creating a VM.""" - - def __init__(self, adapter, host_wrapper, instance, stg_ftsk=None, - nvram_mgr=None, slot_mgr=None): - """Creates the Task for creating a VM. - - The revert method only needs to do something for failed rebuilds. - Since the rebuild and build methods have different flows, it is - necessary to clean up the destination LPAR on fails during rebuild. - - The revert method is not implemented for build because the compute - manager calls the driver destroy operation for spawn errors. By - not deleting the lpar, it's a cleaner flow through the destroy - operation and accomplishes the same result. - - Any stale storage associated with the new VM's (possibly recycled) ID - will be cleaned up. The cleanup work will be delegated to the FeedTask - represented by the stg_ftsk parameter. - - Provides the 'lpar_wrap' for other tasks. - - :param adapter: The adapter for the pypowervm API - :param host_wrapper: The managed system wrapper - :param instance: The nova instance. - :param stg_ftsk: (Optional, Default: None) A FeedTask managing storage - I/O operations. If None, one will be built locally - and executed immediately. Otherwise it is the caller's - responsibility to execute the FeedTask. - :param nvram_mgr: The NVRAM manager to fetch the NVRAM from. If None, - the NVRAM will not be fetched. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the - maximum number of virtual slots for the VM. - """ - super(Create, self).__init__(name='crt_vm', provides='lpar_wrap') - self.adapter = adapter - self.host_wrapper = host_wrapper - self.instance = instance - self.stg_ftsk = stg_ftsk or pvm_tpar.build_active_vio_feed_task( - adapter, name='create_scrubber', - xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP}) - self.nvram_mgr = nvram_mgr - self.slot_mgr = slot_mgr - - def execute(self): - data = None - if self.nvram_mgr is not None: - LOG.info('Fetching NVRAM.', instance=self.instance) - data = self.nvram_mgr.fetch(self.instance) - LOG.debug('NVRAM data is: %s', data, instance=self.instance) - - wrap = vm.create_lpar(self.adapter, self.host_wrapper, self.instance, - nvram=data, slot_mgr=self.slot_mgr) - pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk, - lpars_exist=True) - # If the stg_ftsk passed in was None and we initialized a - # 'create_scrubber' stg_ftsk then run it immediately. We do - # this because we moved the LPAR storage scrub tasks out of the - # build_map initialization. This was so that we could construct the - # build map earlier in the spawn, just before the LPAR is created. - # Only rebuilds should be passing in None for stg_ftsk. - if self.stg_ftsk.name == 'create_scrubber': - LOG.info('Scrubbing storage as part of rebuild.', - instance=self.instance) - self.stg_ftsk.execute() - - return wrap - - def revert(self, result, flow_failures, **kwargs): - # Only reverts failed rebuilds, because the revert - # for a failed build is handled in the manager. - - if self.instance.task_state == task_states.REBUILD_SPAWNING: - LOG.info('Rebuild of instance failed. Deleting instance from ' - 'destination.', instance=self.instance) - vm.delete_lpar(self.adapter, self.instance) - - -class Resize(task.Task): - - """The task for resizing an existing VM.""" - - def __init__(self, adapter, host_wrapper, instance, name=None): - """Creates the Task to resize a VM. - - Provides the 'lpar_wrap' for other tasks. - - :param adapter: The adapter for the pypowervm API - :param host_wrapper: The managed system wrapper - :param instance: The nova instance. - :param name: VM name to use for the update. Used on resize when we - want to rename it but not use the instance name. - """ - super(Resize, self).__init__(name='resize_vm', provides='lpar_wrap') - self.adapter = adapter - self.host_wrapper = host_wrapper - self.instance = instance - self.vm_name = name - - def execute(self): - return vm.update(self.adapter, self.host_wrapper, - self.instance, entry=None, name=self.vm_name) - - -class Rename(task.Task): - - """The task for renaming an existing VM.""" - - def __init__(self, adapter, instance, name): - """Creates the Task to rename a VM. - - Provides the 'lpar_wrap' for other tasks. - - :param adapter: The adapter for the pypowervm API - :param instance: The nova instance. - :param name: The new VM name. - """ - super(Rename, self).__init__(name='rename_vm_%s' % name, - provides='lpar_wrap') - self.adapter = adapter - self.instance = instance - self.vm_name = name - - def execute(self): - LOG.info('Renaming instance to name: %s', self.name, - instance=self.instance) - return vm.rename(self.adapter, self.instance, self.vm_name) - - -class PowerOn(task.Task): - - """The task to power on the instance.""" - - def __init__(self, adapter, instance, pwr_opts=None): - """Create the Task for the power on of the LPAR. - - :param adapter: The pypowervm adapter. - :param instance: The nova instance. - :param pwr_opts: Additional parameters for the pypowervm PowerOn Job. - """ - super(PowerOn, self).__init__(name='pwr_vm') - self.adapter = adapter - self.instance = instance - self.pwr_opts = pwr_opts - - def execute(self): - vm.power_on(self.adapter, self.instance, opts=self.pwr_opts) - - def revert(self, result, flow_failures): - LOG.warning('Rolling back power-on.', instance=self.instance) - - if isinstance(result, task_fail.Failure): - # The power on itself failed...can't power off. - LOG.debug('Power on failed. Not performing power off.', - instance=self.instance) - return - - vm.power_off(self.adapter, self.instance, force_immediate=True) - - -class PowerOff(task.Task): - - """The task to power off a VM.""" - - def __init__(self, adapter, instance, force_immediate=False): - """Creates the Task to power off an LPAR. - - :param adapter: The adapter for the pypowervm API - :param instance: The nova instance. - :param force_immediate: Boolean. Perform a VSP hard power off. - """ - super(PowerOff, self).__init__(name='pwr_off_vm') - self.adapter = adapter - self.instance = instance - self.force_immediate = force_immediate - - def execute(self): - vm.power_off(self.adapter, self.instance, - force_immediate=self.force_immediate) - - -class StoreNvram(task.Task): - - """Store the NVRAM for an instance.""" - - def __init__(self, nvram_mgr, instance, immediate=False): - """Creates a task to store the NVRAM of an instance. - - :param nvram_mgr: The NVRAM manager. - :param instance: The nova instance. - :param immediate: boolean whether to update the NVRAM immediately - """ - super(StoreNvram, self).__init__(name='store_nvram') - self.nvram_mgr = nvram_mgr - self.instance = instance - self.immediate = immediate - - def execute(self): - if self.nvram_mgr is None: - return - - try: - self.nvram_mgr.store(self.instance, immediate=self.immediate) - except Exception: - LOG.exception('Unable to store NVRAM.', instance=self.instance) - - -class DeleteNvram(task.Task): - - """Delete the NVRAM for an instance from the store.""" - - def __init__(self, nvram_mgr, instance): - """Creates a task to delete the NVRAM of an instance. - - :param nvram_mgr: The NVRAM manager. - :param instance: The nova instance. - """ - super(DeleteNvram, self).__init__(name='delete_nvram') - self.nvram_mgr = nvram_mgr - self.instance = instance - - def execute(self): - if self.nvram_mgr is None: - LOG.info("No op for NVRAM delete.", instance=self.instance) - return - - LOG.info('Deleting NVRAM', instance=self.instance) - try: - self.nvram_mgr.remove(self.instance) - except Exception: - LOG.exception('Unable to delete NVRAM.', instance=self.instance) - - -class Delete(task.Task): - - """The task to delete the instance from the system.""" - - def __init__(self, adapter, instance): - """Create the Task to delete the VM from the system. - - :param adapter: The adapter for the pypowervm API. - :param instance: The nova instance. - """ - super(Delete, self).__init__(name='dlt_vm') - self.adapter = adapter - self.instance = instance - - def execute(self): - vm.delete_lpar(self.adapter, self.instance) - - -class UpdateIBMiSettings(task.Task): - - """The task to update settings of an ibmi instance.""" - - def __init__(self, adapter, instance, boot_type): - """Create the Task to update settings of the IBMi VM. - - :param adapter: The adapter for the pypowervm API. - :param instance: The nova instance. - :param boot_type: The boot type of the instance. - """ - super(UpdateIBMiSettings, self).__init__(name='update_ibmi_settings') - self.adapter = adapter - self.instance = instance - self.boot_type = boot_type - - def execute(self): - vm.update_ibmi_settings(self.adapter, self.instance, self.boot_type) diff --git a/nova_powervm/virt/powervm/vif.py b/nova_powervm/virt/powervm/vif.py deleted file mode 100644 index 86bedc6c..00000000 --- a/nova_powervm/virt/powervm/vif.py +++ /dev/null @@ -1,845 +0,0 @@ -# Copyright 2016, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - -from nova import context as ctx -from nova import exception -from nova import network as net_api -from nova.network import model as network_model -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import excutils -from oslo_utils import importutils -import pypowervm.const as pvm_c -from pypowervm import exceptions as pvm_ex -from pypowervm.tasks import cna as pvm_cna -from pypowervm.tasks import partition as pvm_par -from pypowervm.tasks import sriov as sriovtask -from pypowervm import util as pvm_util -from pypowervm.wrappers import event as pvm_evt -from pypowervm.wrappers import iocard as pvm_card -from pypowervm.wrappers import logical_partition as pvm_lpar -from pypowervm.wrappers import managed_system as pvm_ms -from pypowervm.wrappers import network as pvm_net - -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm import vm - - -LOG = log.getLogger(__name__) - -SECURE_RMC_VSWITCH = 'MGMTSWITCH' -SECURE_RMC_VLAN = 4094 - -# Provider tag for custom events from this module -EVENT_PROVIDER_ID = 'NOVA_PVM_VIF' - -VIF_TYPE_PVM_SEA = 'pvm_sea' -VIF_TYPE_PVM_OVS = 'ovs' -VIF_TYPE_PVM_SRIOV = 'pvm_sriov' - -VIF_MAPPING = {VIF_TYPE_PVM_SEA: - 'nova_powervm.virt.powervm.vif.PvmSeaVifDriver', - VIF_TYPE_PVM_OVS: - 'nova_powervm.virt.powervm.vif.PvmOvsVifDriver', - VIF_TYPE_PVM_SRIOV: - 'nova_powervm.virt.powervm.vif.PvmVnicSriovVifDriver'} - -# NOTE(svenkat): Manually adjust CNA child ordering to workaround bug 1731657 -# TODO(svenkat) Remove workaround when pypowervm is fixed -child_order = list(pvm_net.CNA._child_order) -child_order.remove('VirtualNetworks') -child_order.append('VirtualNetworks') -pvm_net.CNA._child_order = tuple(child_order) - -CONF = cfg.CONF - - -def _build_vif_driver(adapter, host_uuid, instance, vif): - """Returns the appropriate VIF Driver for the given VIF. - - :param adapter: The pypowervm adapter API interface. - :param host_uuid: The host system UUID. - :param instance: The nova instance. - :param vif: The virtual interface to from Nova. - :return: The appropriate PvmVifDriver for the VIF. - """ - if vif.get('type') is None: - raise exception.VirtualInterfacePlugException( - _("vif_type parameter must be present for this vif_driver " - "implementation")) - - # Check the type to the implementations - if VIF_MAPPING.get(vif['type']): - return importutils.import_object( - VIF_MAPPING.get(vif['type']), adapter, host_uuid, instance) - - # No matching implementation, raise error. - raise exception.VirtualInterfacePlugException( - _("Unable to find appropriate PowerVM VIF Driver for VIF type " - "%(vif_type)s on instance %(instance)s") % - {'vif_type': vif['type'], 'instance': instance.name}) - - -def _push_vif_event(adapter, action, vif_w, instance, vif_type): - """Push a custom event to the REST server for a vif action (plug/unplug). - - This event prompts the neutron agent to mark the port up or down. - - :param adapter: The pypowervm adapter. - :param action: The action taken on the vif - either 'plug' or 'unplug' - :param vif_w: The pypowervm wrapper of the affected vif (CNA, VNIC, etc.) - :param instance: The nova instance for the event - :param vif_type: The type of event source (pvm_sea, ovs, bridge, - pvm_sriov etc) - """ - data = vif_w.href - detail = jsonutils.dumps(dict(provider=EVENT_PROVIDER_ID, action=action, - mac=vif_w.mac, type=vif_type)) - event = pvm_evt.Event.bld(adapter, data, detail) - try: - event = event.create() - LOG.debug('Pushed custom event for consumption by neutron agent: %s', - str(event), instance=instance) - except Exception: - with excutils.save_and_reraise_exception(logger=LOG): - LOG.exception('Custom VIF event push failed. %s', str(event), - instance=instance) - - -def plug(adapter, host_uuid, instance, vif, slot_mgr, new_vif=True): - """Plugs a virtual interface (network) into a VM. - - :param adapter: The pypowervm adapter. - :param host_uuid: The host UUID for the PowerVM API. - :param instance: The nova instance object. - :param vif: The virtual interface to plug into the instance. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a VIF is attached to the VM. - :param new_vif: (Optional, Default: True) If set, indicates that it is - a brand new VIF. If False, it indicates that the VIF - is already on the client but should be treated on the - bridge. - :return: The wrapper (CNA or VNIC) representing the plugged virtual - network. None if the vnet was not created. - """ - vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) - - # Get the slot number to use for the VIF creation. May be None - # indicating usage of the next highest available. - slot_num = slot_mgr.build_map.get_vnet_slot(vif['address']) - - # Invoke the plug - try: - vnet_w = vif_drv.plug(vif, slot_num, new_vif=new_vif) - except pvm_ex.HttpError as he: - # Log the message constructed by HttpError - LOG.exception("HttpError during vif plug operation.", - instance=instance) - raise exception.VirtualInterfacePlugException(message=he.args[0]) - # Other exceptions are (hopefully) custom VirtualInterfacePlugException - # generated lower in the call stack. - - # If the slot number hadn't been provided initially, save it for the - # next rebuild - if not slot_num and new_vif: - slot_mgr.register_vnet(vnet_w) - - # Push a custom event if we really plugged the vif - if vnet_w is not None: - _push_vif_event(adapter, 'plug', vnet_w, instance, vif['type']) - - return vnet_w - - -def unplug(adapter, host_uuid, instance, vif, slot_mgr, cna_w_list=None): - """Unplugs a virtual interface (network) from a VM. - - :param adapter: The pypowervm adapter. - :param host_uuid: The host UUID for the PowerVM API. - :param instance: The nova instance object. - :param vif: The virtual interface to plug into the instance. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a VIF is detached from the VM. - :param cna_w_list: (Optional, Default: None) The list of Client Network - Adapters from pypowervm. Providing this input - allows for an improvement in operation speed. - """ - vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) - try: - vnet_w = vif_drv.unplug(vif, cna_w_list=cna_w_list) - # Push a custom event, but only if the vif existed in the first place - if vnet_w: - _push_vif_event(adapter, 'unplug', vnet_w, instance, vif['type']) - except pvm_ex.HttpError as he: - # Log the message constructed by HttpError - LOG.exception("HttpError during vif unplug operation.", - instance=instance) - raise exception.VirtualInterfaceUnplugException(reason=he.args[0]) - - if vnet_w: - slot_mgr.drop_vnet(vnet_w) - - -def pre_live_migrate_at_destination(adapter, host_uuid, instance, vif, - vea_vlan_mappings): - """Performs the pre live migrate on the destination host. - - :param adapter: The pypowervm adapter. - :param host_uuid: The host UUID for the PowerVM API. - :param instance: The nova instance object. - :param vif: The virtual interface that will be migrated. This may be - called network_info in other portions of the code. - :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac - address, value is the destination's target - hypervisor VLAN. - """ - vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) - vif_drv.pre_live_migrate_at_destination(vif, vea_vlan_mappings) - - -def rollback_live_migration_at_destination(adapter, host_uuid, instance, vif, - vea_vlan_mappings): - """Performs the rollback of the live migrate on the destination host. - - :param adapter: The pypowervm adapter. - :param host_uuid: The host UUID for the PowerVM API. - :param instance: The nova instance object. - :param vif: The virtual interface that is being rolled back. This may be - called network_info in other portions of the code. - :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif mac - address, value is the destination's target - hypervisor VLAN. - """ - vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) - vif_drv.rollback_live_migration_at_destination(vif, vea_vlan_mappings) - - -def pre_live_migrate_at_source(adapter, host_uuid, instance, vif): - """Performs the pre live migrate on the source host. - - This is executed directly before the migration is started on the source - host. - - :param adapter: The pypowervm adapter. - :param host_uuid: The host UUID for the PowerVM API. - :param instance: The nova instance object. - :param vif: The virtual interface that will be migrated. This may be - called network_info in other portions of the code. - :return: The list of TrunkAdapter's on the source that are hosting the - VM's vif. Should only return data if those trunks should be - deleted after the migration. - """ - vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) - return vif_drv.pre_live_migrate_at_source(vif) - - -def post_live_migrate_at_source(adapter, host_uuid, instance, vif): - """Performs the post live migrate on the source host. - - :param adapter: The pypowervm adapter. - :param host_uuid: The host UUID for the PowerVM API. - :param instance: The nova instance object. - :param vif: The virtual interface of the instance. This may be - called network_info in other portions of the code. - """ - vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) - return vif_drv.post_live_migrate_at_source(vif) - - -def get_secure_rmc_vswitch(adapter, host_uuid): - """Returns the vSwitch that is used for secure RMC. - - :param adapter: The pypowervm adapter API interface. - :param host_uuid: The host system UUID. - :return: The wrapper for the secure RMC vSwitch. If it does not exist - on the system, None is returned. - """ - vswitches = pvm_net.VSwitch.search( - adapter, parent_type=pvm_ms.System.schema_type, - parent_uuid=host_uuid, name=SECURE_RMC_VSWITCH) - if len(vswitches) == 1: - return vswitches[0] - return None - - -def plug_secure_rmc_vif(adapter, instance, host_uuid, slot_mgr): - """Creates the Secure RMC Network Adapter on the VM. - - :param adapter: The pypowervm adapter API interface. - :param instance: The nova instance to create the VIF against. - :param host_uuid: The host system UUID. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a VIF is attached to the VM - :return: The created network adapter wrapper. - """ - # Gather the mac and slot number for the mgmt vif - mac, slot_num = slot_mgr.build_map.get_mgmt_vea_slot() - if not mac: - # This is either a deploy case or rebuild case. For remote restart, - # mac will not be none, as it will be available from slot data. - # Deploy case - mac is None at both slot and instance_system_metadata - # and crt_cna will auto-generate it. - # Rebuild case - mac is none from slot data but is available - # at instance system_metadata. - mac = instance.system_metadata.get('mgmt_interface_mac') - - # Create the adapter. - lpar_uuid = vm.get_pvm_uuid(instance) - cna_w = pvm_cna.crt_cna(adapter, host_uuid, lpar_uuid, SECURE_RMC_VLAN, - vswitch=SECURE_RMC_VSWITCH, crt_vswitch=True, - slot_num=slot_num, mac_addr=mac) - - # Save the mgmt vif to the slot map. - # For the rebuild case, mac will be present but not slot_num. - # For deploy case, both will be none. We want to register cna in both cases - if not slot_num: - slot_mgr.register_cna(cna_w) - if cna_w.mac != mac: - # Update instance system metadata to store instance management - # interface mac address. - instance.system_metadata.update({'mgmt_interface_mac': cna_w.mac}) - - return cna_w - - -def _get_trunk_dev_name(vif): - """Returns the device name for the trunk adapter. - - A given VIF will have a trunk adapter and a - client adapter. This will return the trunk adapter's name as it - will appear on the management VM. - - :param vif: The nova network interface - :return: The device name. - """ - if 'devname' in vif: - return vif['devname'] - return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN] - - -@six.add_metaclass(abc.ABCMeta) -class PvmVifDriver(object): - """Represents an abstract class for a PowerVM Vif Driver. - - A VIF Driver understands a given virtual interface type (network). It - understands how to plug and unplug a given VIF for a virtual machine. - """ - - def __init__(self, adapter, host_uuid, instance): - """Initializes a VIF Driver. - - :param adapter: The pypowervm adapter API interface. - :param host_uuid: The host system UUID. - :param instance: The nova instance that the vif action will be run - against. - """ - self.adapter = adapter - self.host_uuid = host_uuid - self.instance = instance - - @abc.abstractmethod - def plug(self, vif, slot_num, new_vif=True): - """Plugs a virtual interface (network) into a VM. - - :param vif: The virtual interface to plug into the instance. - :param slot_num: Which slot number to plug the VIF into. May be None. - :param new_vif: (Optional, Default: True) If set, indicates that it is - a brand new VIF. If False, it indicates that the VIF - is already on the client but should be treated on the - bridge. - :return: The new vif that was created. Only returned if new_vif is - set to True. Otherwise None is expected. - """ - pass - - def unplug(self, vif, cna_w_list=None): - """Unplugs a virtual interface (network) from a VM. - - :param vif: The virtual interface to plug into the instance. - :param cna_w_list: (Optional, Default: None) The list of Client Network - Adapters from pypowervm. Providing this input - allows for an improvement in operation speed. - :return cna_w: The deleted Client Network Adapter. - """ - # This is a default implementation that most implementations will - # require. - - # Need to find the adapters if they were not provided - if not cna_w_list: - cna_w_list = vm.get_cnas(self.adapter, self.instance) - - cna_w = self._find_cna_for_vif(cna_w_list, vif) - if not cna_w: - LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was ' - 'not found on the instance.', - {'mac': vif['address']}, instance=self.instance) - return None - - LOG.info('Deleting VIF with mac %(mac)s.', - {'mac': vif['address']}, instance=self.instance) - try: - cna_w.delete() - except Exception as e: - LOG.exception('Unable to unplug VIF with mac %(mac)s.', - {'mac': vif['address']}, instance=self.instance) - raise exception.VirtualInterfaceUnplugException( - reason=six.text_type(e)) - return cna_w - - @staticmethod - def _find_cna_for_vif(cna_w_list, vif): - """Finds the PowerVM CNA for a given Nova VIF. - - :param cna_w_list: The list of Client Network Adapter wrappers from - pypowervm. - :param vif: The Nova Virtual Interface (virtual network interface). - :return: The CNA that corresponds to the VIF. None if one is not - part of the cna_w_list. - """ - for cna_w in cna_w_list: - # If the MAC address matched, attempt the delete. - if vm.norm_mac(cna_w.mac) == vif['address']: - return cna_w - return None - - def pre_live_migrate_at_destination(self, vif, vea_vlan_mappings): - """Performs the pre live migrate on the destination host. - - Pre live migrate at destination is invoked before - pre_live_migrate_at_source. - - :param vif: The virtual interface that will be migrated. This may be - called network_info in other portions of the code. - :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif - mac address, value is the destination's - target hypervisor VLAN. - """ - pass - - def rollback_live_migration_at_destination(self, vif, vea_vlan_mappings): - """Rolls back the pre live migrate on the destination host. - - :param vif: The virtual interface that was being migrated. This may be - called network_info in other portions of the code. - :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif - mac address, value is the destination's - target hypervisor VLAN. - """ - pass - - def pre_live_migrate_at_source(self, vif): - """Performs the pre live migrate on the source host. - - This is executed directly before the migration is started on the source - host. - - :param vif: The virtual interface that will be migrated. This may be - called network_info in other portions of the code. - :return: The list of TrunkAdapter's on the source that are hosting the - VM's vif. Should only return data if those trunks should be - deleted after the migration. - """ - return [] - - def post_live_migrate_at_source(self, vif): - """Performs the post live migrate on the source host. - - :param vif: The virtual interface of an instance. This may be - called network_info in other portions of the code. - """ - pass - - -class PvmSeaVifDriver(PvmVifDriver): - """The PowerVM Shared Ethernet Adapter VIF Driver.""" - - def plug(self, vif, slot_num, new_vif=True): - """Plugs a virtual interface (network) into a VM. - - This method simply creates the client network adapter into the VM. - - :param vif: The virtual interface to plug into the instance. - :param slot_num: Which slot number to plug the VIF into. May be None. - :param new_vif: (Optional, Default: True) If set, indicates that it is - a brand new VIF. If False, it indicates that the VIF - is already on the client but should be treated on the - bridge. - :return: The new vif that was created. Only returned if new_vif is - set to True. Otherwise None is expected. - """ - # Do nothing if not a new VIF - if not new_vif: - return None - - lpar_uuid = vm.get_pvm_uuid(self.instance) - - # CNA's require a VLAN. Nova network puts it in network-meta. - # The networking-powervm neutron agent will also send it, if so via - # the vif details. - vlan = vif['network']['meta'].get('vlan', None) - if not vlan: - vlan = int(vif['details']['vlan']) - - LOG.debug("Creating SEA-based VIF with VLAN %s", str(vlan), - instance=self.instance) - cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan, - mac_addr=vif['address'], slot_num=slot_num) - - return cna_w - - -class PvmVnicSriovVifDriver(PvmVifDriver): - """The SR-IOV VIF driver for PowerVM.""" - - def plug(self, vif, slot_num, new_vif=True): - if not new_vif: - return None - - physnet = vif.get_physical_network() - if not physnet: - # Get physnet from neutron network if not present in vif - # TODO(svenkat): This section of code will be eliminated in - # pike release. Design will be in place to fix any vif - # that has physical_network missing. The fix will be in - # compute startup code. - net_id = vif['network']['id'] - admin_context = ctx.get_admin_context() - napi = net_api.API() - network = napi.get(admin_context, net_id) - physnet = network.physical_network - - LOG.debug("Plugging vNIC SR-IOV vif for physical network %(physnet)s.", - {'physnet': physnet}, instance=self.instance) - - # Get the msys - msys = pvm_ms.System.get(self.adapter)[0] - # Physical ports for the given port label - pports_w = sriovtask.find_pports_for_portlabel(physnet, self.adapter, - msys) - pports = [pport.loc_code for pport in pports_w] - - if not pports: - raise exception.VirtualInterfacePlugException( - _("Unable to find acceptable Ethernet ports on physical " - "network '%(physnet)s' for instance %(inst)s for SRIOV " - "based VIF with MAC address %(vif_mac)s.") % - {'physnet': physnet, 'inst': self.instance.name, - 'vif_mac': vif['address']}) - - # MAC - mac_address = pvm_util.sanitize_mac_for_api(vif['address']) - - # vlan id - vlan_id = int(vif['details']['vlan']) - - # Redundancy: plugin sets from binding:profile, then conf, then default - redundancy = int(vif['details']['redundancy']) - - # Capacity: plugin sets from binding:profile, then conf, then default - capacity = vif['details']['capacity'] - maxcapacity = vif['details'].get('maxcapacity') - - vnic = pvm_card.VNIC.bld( - self.adapter, vlan_id, slot_num=slot_num, mac_addr=mac_address, - allowed_vlans=pvm_util.VLANList.NONE, - allowed_macs=pvm_util.MACList.NONE) - - try: - sriovtask.set_vnic_back_devs(vnic, pports, sys_w=msys, - redundancy=redundancy, - capacity=capacity, - max_capacity=maxcapacity, - check_port_status=True) - except ValueError as ve: - LOG.exception("Failed to set vNIC backing devices") - msg = '' - if ve.args: - msg = ve.args[0] - raise exception.VirtualInterfacePlugException(message=msg) - - return vnic.create(parent_type=pvm_lpar.LPAR, - parent_uuid=vm.get_pvm_uuid(self.instance)) - - def unplug(self, vif, cna_w_list=None): - mac = pvm_util.sanitize_mac_for_api(vif['address']) - vnic = vm.get_vnics( - self.adapter, self.instance, mac=mac, one_result=True) - if not vnic: - LOG.warning('Unable to unplug VIF with mac %(mac)s. No matching ' - 'vNIC was found on the instance. VIF: %(vif)s', - {'mac': mac, 'vif': vif}, instance=self.instance) - return None - vnic.delete() - return vnic - - -class PvmMetaAttrs(list): - """Represents meta attributes for a PowerVM Vif Driver. - - """ - - def __init__(self, vif, instance): - """Initializes meta attributes. - - :param vif: The virtual interface for the instance - :param instance: The nova instance that the vif action will be run - against. - """ - self.append('iface-id=%s' % (vif.get('ovs_interfaceid') or vif['id'])) - self.append('iface-status=active') - self.append('attached-mac=%s' % vif['address']) - self.append('vm-uuid=%s' % instance.uuid) - - def __str__(self): - return ','.join(self) - - -class PvmOvsVifDriver(PvmVifDriver): - """The Open vSwitch VIF driver for PowerVM.""" - - def plug(self, vif, slot_num, new_vif=True): - """Plugs a virtual interface (network) into a VM. - - Extends the Lio implementation. Will make sure that the trunk device - has the appropriate metadata (ex. port id) set on it so that the - Open vSwitch agent picks it up properly. - - :param vif: The virtual interface to plug into the instance. - :param slot_num: Which slot number to plug the VIF into. May be None. - :param new_vif: (Optional, Default: True) If set, indicates that it is - a brand new VIF. If False, it indicates that the VIF - is already on the client but should be treated on the - bridge. - :return: The new vif that was created. Only returned if new_vif is - set to True. Otherwise None is expected. - """ - lpar_uuid = vm.get_pvm_uuid(self.instance) - mgmt_uuid = pvm_par.get_mgmt_partition(self.adapter).uuid - - # There will only be one trunk wrap, as we have created with just - # the mgmt lpar. Next step is to connect to the OVS. - mtu = vif['network'].get_meta('mtu') - dev_name = _get_trunk_dev_name(vif) - - meta_attrs = PvmMetaAttrs(vif, self.instance) - - if new_vif: - # Create the trunk and client adapter. - return pvm_cna.crt_p2p_cna( - self.adapter, self.host_uuid, lpar_uuid, [mgmt_uuid], - CONF.powervm.pvm_vswitch_for_novalink_io, crt_vswitch=True, - mac_addr=vif['address'], dev_name=dev_name, - slot_num=slot_num, ovs_bridge=vif['network']['bridge'], - ovs_ext_ids=str(meta_attrs), configured_mtu=mtu)[0] - else: - # Bug : https://bugs.launchpad.net/nova-powervm/+bug/1731548 - # When a host is rebooted, something is discarding tap devices for - # VMs deployed with OVS vif. To prevent VMs losing network - # connectivity, this is fixed by recreating the tap devices during - # init of the nova compute service, which will call vif plug with - # new_vif==False. - - # Find the CNA for this vif. - # TODO(svenkat) improve performance by caching VIOS wrapper(s) and - # CNA lists (in case >1 vif per VM). - cna_w_list = vm.get_cnas(self.adapter, self.instance) - cna_w = self._find_cna_for_vif(cna_w_list, vif) - # Find the corresponding trunk adapter - trunks = pvm_cna.find_trunks(self.adapter, cna_w) - for trunk in trunks: - # Set MTU, OVS external ids, and OVS bridge metadata - # TODO(svenkat) set_parm_value calls should be replaced once - # pypowervm supports setting these values directly. - trunk.set_parm_value('ConfiguredMTU', - mtu, attrib=pvm_c.ATTR_KSV160) - trunk.set_parm_value('OvsPortExternalIds', - meta_attrs, attrib=pvm_c.ATTR_KSV160) - trunk.set_parm_value('OvsBridge', - vif['network']['bridge'], - attrib=pvm_c.ATTR_KSV160) - # Updating the trunk adapter will cause NovaLink to reassociate - # the tap device. - trunk.update() - - @staticmethod - def get_ovs_interfaceid(vif): - """Returns the interface id to set for a given VIF. - - When a VIF is plugged for an Open vSwitch, it needs to have the - interface ID set in the OVS metadata. This returns what the - appropriate interface id is. - - :param vif: The Nova network interface. - """ - return vif.get('ovs_interfaceid') or vif['id'] - - def unplug(self, vif, cna_w_list=None): - """Unplugs a virtual interface (network) from a VM. - - Extends the base implementation, but before calling it will remove - the adapter from the Open vSwitch and delete the trunk. - - :param vif: The virtual interface to plug into the instance. - :param cna_w_list: (Optional, Default: None) The list of Client Network - Adapters from pypowervm. Providing this input - allows for an improvement in operation speed. - :return cna_w: The deleted Client Network Adapter. - """ - # Need to find the adapters if they were not provided - if not cna_w_list: - cna_w_list = vm.get_cnas(self.adapter, self.instance) - - # Find the CNA for this vif. - cna_w = self._find_cna_for_vif(cna_w_list, vif) - if not cna_w: - LOG.warning('Unable to unplug VIF with mac %(mac)s for. The VIF ' - 'was not found on the instance.', - {'mac': vif['address']}, instance=self.instance) - return None - - # Find and delete the trunk adapters - trunks = pvm_cna.find_trunks(self.adapter, cna_w) - for trunk in trunks: - trunk.delete() - - # Now delete the client CNA - return super(PvmOvsVifDriver, self).unplug(vif, cna_w_list=cna_w_list) - - def pre_live_migrate_at_destination(self, vif, vea_vlan_mappings): - """Performs the pre live migrate on the destination host. - - This method will create the trunk adapter on the destination host, - set its link state up, and attach it to the integration OVS switch. - It also updates the vea_vlan_mappings to indicate which unique - hypervisor VLAN should be used for this VIF for the migration operation - to complete properly. - - :param vif: The virtual interface that will be migrated. This may be - called network_info in other portions of the code. - :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif - mac address, value is the destination's - target hypervisor VLAN. - """ - self._cleanup_orphan_adapters(vif, - CONF.powervm.pvm_vswitch_for_novalink_io) - mgmt_wrap = pvm_par.get_mgmt_partition(self.adapter) - dev = _get_trunk_dev_name(vif) - - meta_attrs = PvmMetaAttrs(vif, self.instance) - - mtu = vif['network'].get_meta('mtu') - - # Find a specific free VLAN and create the Trunk in a single atomic - # action. - cna_w = pvm_cna.crt_trunk_with_free_vlan( - self.adapter, self.host_uuid, [mgmt_wrap.uuid], - CONF.powervm.pvm_vswitch_for_novalink_io, dev_name=dev, - ovs_bridge=vif['network']['bridge'], - ovs_ext_ids=str(meta_attrs), configured_mtu=mtu)[0] - - # Save this data for the migration command. - vea_vlan_mappings[vif['address']] = cna_w.pvid - LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with PVID " - "%(pvid)s", - {'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid}, - instance=self.instance) - - def rollback_live_migration_at_destination(self, vif, vea_vlan_mappings): - """Rolls back the pre live migrate on the destination host. - - Will delete the TrunkAdapter that pre_live_migrate_at_destination - created with its unique hypervisor VLAN. This uses the - vea_vlan_mappings to provide the information as to what TrunkAdapter - it should remove. - - :param vif: The virtual interface that was being migrated. This may be - called network_info in other portions of the code. - :param vea_vlan_mappings: The VEA VLAN mappings. Key is the vif - mac address, value is the destination's - target hypervisor VLAN. - """ - LOG.warning("Rolling back the live migrate of VIF with mac %(mac)s.", - {'mac': vif['address']}, instance=self.instance) - - # We know that we just attached the VIF to the NovaLink VM. Search - # for a trunk adapter with the PVID and vSwitch that we specified - # above. This is guaranteed to be unique. - vlan = int(vea_vlan_mappings[vif['address']]) - vswitch_id = pvm_net.VSwitch.search( - self.adapter, parent_type=pvm_ms.System, one_result=True, - name=CONF.powervm.pvm_vswitch_for_novalink_io).switch_id - - # Find the trunk - mgmt_wrap = pvm_par.get_mgmt_partition(self.adapter) - child_adpts = pvm_net.CNA.get(self.adapter, parent=mgmt_wrap) - trunk = None - for adpt in child_adpts: - # We need a trunk adapter (so check trunk_pri). Then the trunk - # is unique by PVID and PowerVM vSwitch ID. - if (adpt.pvid == vlan and adpt.vswitch_id == vswitch_id): - if adpt.trunk_pri: - trunk = adpt - break - - if trunk: - # Delete the peer'd trunk adapter. - LOG.warning("Deleting target side trunk adapter %(dev)s for " - "rollback operation", {'dev': trunk.dev_name}, - instance=self.instance) - trunk.delete() - - def pre_live_migrate_at_source(self, vif): - """Performs the pre live migrate on the source host. - - This is executed directly before the migration is started on the source - host. - - :param vif: The virtual interface that will be migrated. This may be - called network_info in other portions of the code. - :return: The list of TrunkAdapter's on the source that are hosting the - VM's vif. Should only return data if those trunks should be - deleted after the migration. - """ - # Right before the migration, we need to find the trunk on the source - # host. - mac = pvm_util.sanitize_mac_for_api(vif['address']) - cna_w = pvm_net.CNA.search( - self.adapter, parent_type=pvm_lpar.LPAR.schema_type, - parent_uuid=vm.get_pvm_uuid(self.instance), one_result=True, - mac=mac) - - return pvm_cna.find_trunks(self.adapter, cna_w) - - def post_live_migrate_at_source(self, vif): - """Performs the post live migrate on the source host. - - :param vif: The virtual interface of an instance. This may be - called network_info in other portions of the code. - """ - self._cleanup_orphan_adapters(vif, - CONF.powervm.pvm_vswitch_for_novalink_io) - - def _cleanup_orphan_adapters(self, vif, vswitch_name): - """Finds and removes trunk VEAs that have no corresponding CNA.""" - # Find and delete orphan adapters with macs matching our vif - orphans = pvm_cna.find_orphaned_trunks(self.adapter, vswitch_name) - for orphan in orphans: - if vm.norm_mac(orphan.mac) == vif['address']: - orphan.delete() diff --git a/nova_powervm/virt/powervm/vm.py b/nova_powervm/virt/powervm/vm.py deleted file mode 100644 index 1fdd2fed..00000000 --- a/nova_powervm/virt/powervm/vm.py +++ /dev/null @@ -1,833 +0,0 @@ -# Copyright 2014, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_concurrency import lockutils -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import re -import six - -from nova.compute import power_state -from nova.compute import task_states -from nova import exception -from nova import objects -from nova.virt import event -from nova.virt import hardware -from pypowervm import exceptions as pvm_exc -from pypowervm.helpers import log_helper as pvm_log -from pypowervm.tasks import ibmi -from pypowervm.tasks import power -from pypowervm.tasks import power_opts as popts -from pypowervm.tasks import vterm -from pypowervm import util as pvm_util -from pypowervm.utils import lpar_builder as lpar_bldr -from pypowervm.utils import transaction as pvm_trans -from pypowervm.utils import uuid as pvm_uuid -from pypowervm.utils import validation as vldn -from pypowervm.wrappers import base_partition as pvm_bp -from pypowervm.wrappers import iocard as pvm_card -from pypowervm.wrappers import logical_partition as pvm_lpar -from pypowervm.wrappers import network as pvm_net -from pypowervm.wrappers import shared_proc_pool as pvm_spp - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import exception as nvex -from nova_powervm.virt.powervm.i18n import _ - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -POWERVM_TO_NOVA_STATE = { - pvm_bp.LPARState.MIGRATING_RUNNING: power_state.RUNNING, - pvm_bp.LPARState.RUNNING: power_state.RUNNING, - pvm_bp.LPARState.STARTING: power_state.RUNNING, - # map open firmware state to active since it can be shut down - pvm_bp.LPARState.OPEN_FIRMWARE: power_state.RUNNING, - # It is running until it is off. - pvm_bp.LPARState.SHUTTING_DOWN: power_state.RUNNING, - # It is running until the suspend completes - pvm_bp.LPARState.SUSPENDING: power_state.RUNNING, - - pvm_bp.LPARState.MIGRATING_NOT_ACTIVE: power_state.SHUTDOWN, - pvm_bp.LPARState.NOT_ACTIVATED: power_state.SHUTDOWN, - - pvm_bp.LPARState.UNKNOWN: power_state.NOSTATE, - pvm_bp.LPARState.HARDWARE_DISCOVERY: power_state.NOSTATE, - pvm_bp.LPARState.NOT_AVAILBLE: power_state.NOSTATE, - - # While resuming, we should be considered suspended still. Only once - # resumed will we be active (which is represented by the RUNNING state) - pvm_bp.LPARState.RESUMING: power_state.SUSPENDED, - pvm_bp.LPARState.SUSPENDED: power_state.SUSPENDED, - - pvm_bp.LPARState.ERROR: power_state.CRASHED -} - -# Groupings of PowerVM events used when considering if a state transition -# has taken place. -RUNNING_EVENTS = [ - pvm_bp.LPARState.MIGRATING_RUNNING, - pvm_bp.LPARState.RUNNING, - pvm_bp.LPARState.STARTING, - pvm_bp.LPARState.OPEN_FIRMWARE, -] -STOPPED_EVENTS = [ - pvm_bp.LPARState.NOT_ACTIVATED, - pvm_bp.LPARState.ERROR, - pvm_bp.LPARState.UNKNOWN, -] -SUSPENDED_EVENTS = [ - pvm_bp.LPARState.SUSPENDING, -] -RESUMING_EVENTS = [ - pvm_bp.LPARState.RESUMING, -] - -POWERVM_STARTABLE_STATE = (pvm_bp.LPARState.NOT_ACTIVATED, ) -POWERVM_STOPABLE_STATE = ( - pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING, - pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN, - pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING, - pvm_bp.LPARState.SUSPENDING) - - -def translate_event(pvm_state, pwr_state): - """Translate the PowerVM state and see if it has changed. - - Compare the state from PowerVM to the state from OpenStack and see if - a life cycle event should be sent to up to OpenStack. - - :param pvm_state: VM state from PowerVM - :param pwr_state: Instance power state from OpenStack - :returns: life cycle event to send. - """ - trans = None - if pvm_state in RUNNING_EVENTS and pwr_state != power_state.RUNNING: - trans = event.EVENT_LIFECYCLE_STARTED - elif pvm_state in STOPPED_EVENTS and pwr_state != power_state.SHUTDOWN: - trans = event.EVENT_LIFECYCLE_STOPPED - elif pvm_state in SUSPENDED_EVENTS: - if pwr_state != power_state.SUSPENDED: - trans = event.EVENT_LIFECYCLE_SUSPENDED - elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING: - trans = event.EVENT_LIFECYCLE_RESUMED - - LOG.debug('Translated {PowerVM state %s; power state %s} to %s', - pvm_state, pwr_state, trans) - return trans - - -def _translate_vm_state(pvm_state): - """Find the current state of the lpar. - - State is converted to the appropriate nova.compute.power_state - - :return: The appropriate integer state value from power_state - """ - - if pvm_state is None: - return power_state.NOSTATE - - try: - nova_state = POWERVM_TO_NOVA_STATE[pvm_state.lower()] - except KeyError: - nova_state = power_state.NOSTATE - - return nova_state - - -class VMBuilder(object): - """Converts a Nova Instance/Flavor into a pypowervm LPARBuilder.""" - - _PVM_PROC_COMPAT = 'powervm:processor_compatibility' - _PVM_UNCAPPED = 'powervm:uncapped' - _PVM_DED_SHAR_MODE = 'powervm:dedicated_sharing_mode' - _PVM_SHAR_PROC_POOL = 'powervm:shared_proc_pool_name' - _PVM_SRR_CAPABILITY = 'powervm:srr_capability' - _PVM_PPT_RATIO = 'powervm:ppt_ratio' - _PVM_ENFORCE_AFFINITY_CHECK = 'powervm:enforce_affinity_check' - _PVM_SECURE_BOOT = 'powervm:secure_boot' - - # Map of PowerVM extra specs to the lpar builder attributes. - # '' is used for attributes that are not implemented yet. - # None means there is no direct attribute mapping and must - # be handled individually - _ATTRS_MAP = { - 'powervm:min_mem': lpar_bldr.MIN_MEM, - 'powervm:max_mem': lpar_bldr.MAX_MEM, - 'powervm:min_vcpu': lpar_bldr.MIN_VCPU, - 'powervm:max_vcpu': lpar_bldr.MAX_VCPU, - 'powervm:proc_units': lpar_bldr.PROC_UNITS, - 'powervm:min_proc_units': lpar_bldr.MIN_PROC_U, - 'powervm:max_proc_units': lpar_bldr.MAX_PROC_U, - 'powervm:dedicated_proc': lpar_bldr.DED_PROCS, - 'powervm:shared_weight': lpar_bldr.UNCAPPED_WEIGHT, - 'powervm:availability_priority': lpar_bldr.AVAIL_PRIORITY, - 'powervm:enable_lpar_metric': lpar_bldr.ENABLE_LPAR_METRIC, - _PVM_PPT_RATIO: lpar_bldr.PPT_RATIO, - _PVM_ENFORCE_AFFINITY_CHECK: lpar_bldr.ENFORCE_AFFINITY_CHECK, - _PVM_SECURE_BOOT: lpar_bldr.SECURE_BOOT, - _PVM_UNCAPPED: None, - _PVM_DED_SHAR_MODE: None, - _PVM_PROC_COMPAT: None, - _PVM_SHAR_PROC_POOL: None, - _PVM_SRR_CAPABILITY: None, - } - - _DED_SHARING_MODES_MAP = { - 'share_idle_procs': pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS, - 'keep_idle_procs': pvm_bp.DedicatedSharingMode.KEEP_IDLE_PROCS, - 'share_idle_procs_active': - pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ACTIVE, - 'share_idle_procs_always': - pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ALWAYS - } - - def __init__(self, host_w, adapter, slot_mgr=None, cur_lpar_w=None): - """Initialize the converter. - - :param host_w: The host system wrapper. - :param adapter: The pypowervm.adapter.Adapter for the PowerVM REST API. - :param slot_mgr: NovaSlotManager for setting/saving the maximum number - of virtual slots on the VM. - :param cur_lpar_w: The LPAR wrapper of the instance. Passing in this - parameter signifies a resize operation. - """ - self.adapter = adapter - self.host_w = host_w - kwargs = dict(uncapped_weight=CONF.powervm.uncapped_proc_weight, - proc_units_factor=CONF.powervm.proc_units_factor) - if cur_lpar_w: - # Maintain the existing attributes in DefaultStandardize - kwargs['max_slots'] = cur_lpar_w.io_config.max_virtual_slots - kwargs['spp'] = cur_lpar_w.proc_config.shared_proc_cfg.pool_id - kwargs['avail_priority'] = cur_lpar_w.avail_priority - kwargs['srr'] = cur_lpar_w.srr_enabled - kwargs['proc_compat'] = cur_lpar_w.proc_compat_mode - kwargs['enable_lpar_metric'] = ( - cur_lpar_w.allow_perf_data_collection) - if slot_mgr is not None: - # This will already default if not set - max_vslots = slot_mgr.build_map.get_max_vslots() - if max_vslots > kwargs.get('max_slots', 0): - kwargs['max_slots'] = max_vslots - self.stdz = lpar_bldr.DefaultStandardize(self.host_w, **kwargs) - - def lpar_builder(self, instance): - """Returns the pypowervm LPARBuilder for a given Nova flavor. - - :param instance: the VM instance - """ - attrs = self._format_flavor(instance) - self._add_IBMi_attrs(instance, attrs) - return lpar_bldr.LPARBuilder(self.adapter, attrs, self.stdz) - - def _add_IBMi_attrs(self, instance, attrs): - distro = instance.system_metadata.get('image_os_distro', '') - if distro.lower() == 'ibmi': - attrs[lpar_bldr.ENV] = pvm_bp.LPARType.OS400 - # Add other attributes in the future - - def _format_flavor(self, instance): - """Returns the pypowervm format of the flavor. - - :param instance: the VM instance - :return: a dict that can be used by the LPAR builder - """ - # The attrs are what is sent to pypowervm to convert the lpar. - attrs = {} - - attrs[lpar_bldr.NAME] = pvm_util.sanitize_partition_name_for_api( - instance.name) - # The uuid is only actually set on a create of an LPAR - attrs[lpar_bldr.UUID] = pvm_uuid.convert_uuid_to_pvm(instance.uuid) - attrs[lpar_bldr.MEM] = instance.flavor.memory_mb - attrs[lpar_bldr.VCPU] = instance.flavor.vcpus - # Set the srr capability to True by default - attrs[lpar_bldr.SRR_CAPABLE] = True - - # Loop through the extra specs and process powervm keys - for key in instance.flavor.extra_specs.keys(): - # If it is not a valid key, then can skip. - if not self._is_pvm_valid_key(key): - continue - - # Look for the mapping to the lpar builder - bldr_key = self._ATTRS_MAP.get(key) - - # Check for no direct mapping, if the value is none, need to - # derive the complex type - if bldr_key is None: - self._build_complex_type(key, attrs, instance.flavor) - elif bldr_key == lpar_bldr.ENABLE_LPAR_METRIC: - lpar_metric = self._flavor_bool( - instance.flavor.extra_specs[key], key) - attrs[bldr_key] = lpar_metric - elif bldr_key == lpar_bldr.PPT_RATIO: - if (instance.task_state == task_states.REBUILD_SPAWNING and not - self.host_w.get_capability( - 'physical_page_table_ratio_capable')): - # We still want to be able to rebuild from hosts that - # support setting the PPT ratio to hosts that don't support - # setting the PPT ratio. - LOG.info("Ignoring PPT ratio on rebuild to PPT ratio " - "unsupported host.", instance=instance) - else: - attrs[bldr_key] = instance.flavor.extra_specs[key] - elif bldr_key == lpar_bldr.ENFORCE_AFFINITY_CHECK: - if (instance.task_state == task_states.REBUILD_SPAWNING and not - self.host_w.get_capability('affinity_check_capable')): - # We still want to be able to rebuild from hosts that - # support affinity score check to hosts that don't support - # affinity score check. - LOG.info("Skipping affinity check attribute processing " - "on rebuild to host which does not support " - "affinity checks.", instance=instance) - else: - attrs[bldr_key] = self._flavor_bool( - instance.flavor.extra_specs[key], key) - else: - # We found a direct mapping - attrs[bldr_key] = instance.flavor.extra_specs[key] - - return attrs - - def _is_pvm_valid_key(self, key): - """Will return if this is a valid PowerVM key. - - :param key: The powervm key. - :return: True if valid key. False if non-powervm key and should be - skipped. Raises an InvalidAttribute exception if is an - unknown PowerVM key. - """ - # If not a powervm key, then it is not 'pvm_valid' - if not key.startswith('powervm:'): - return False - - # Check if this is a valid attribute - if key not in self._ATTRS_MAP.keys(): - exc = exception.InvalidAttribute(attr=key) - raise exc - - return True - - def _build_complex_type(self, key, attrs, flavor): - """If a key does not directly map, this method derives the right value. - - Some types are complex, in that the flavor may have one key that maps - to several different attributes in the lpar builder. This method - handles the complex types. - - :param key: The flavor's key. - :param attrs: The attribute map to put the value into. - :param flavor: The Nova instance flavor. - :return: The value to put in for the key. - """ - # Map uncapped to sharing mode - if key == self._PVM_UNCAPPED: - is_uncapped = self._flavor_bool(flavor.extra_specs[key], key) - shar_mode = (pvm_bp.SharingMode.UNCAPPED if is_uncapped - else pvm_bp.SharingMode.CAPPED) - attrs[lpar_bldr.SHARING_MODE] = shar_mode - elif key == self._PVM_DED_SHAR_MODE: - # Dedicated sharing modes...map directly - mode = self._DED_SHARING_MODES_MAP.get( - flavor.extra_specs[key]) - if mode is not None: - attrs[lpar_bldr.SHARING_MODE] = mode - else: - attr = key + '=' + flavor.extra_specs[key] - exc = exception.InvalidAttribute(attr=attr) - LOG.error(exc) - raise exc - elif key == self._PVM_SHAR_PROC_POOL: - pool_name = flavor.extra_specs[key] - attrs[lpar_bldr.SPP] = self._spp_pool_id(pool_name) - elif key == self._PVM_PROC_COMPAT: - # Handle variants of the supported values - attrs[lpar_bldr.PROC_COMPAT] = re.sub( - r'\+', '_Plus', flavor.extra_specs[key]) - elif key == self._PVM_SRR_CAPABILITY: - srr_cap = self._flavor_bool(flavor.extra_specs[key], key) - attrs[lpar_bldr.SRR_CAPABLE] = srr_cap - else: - # There was no mapping or we didn't handle it. - exc = exception.InvalidAttribute(attr=key) - LOG.error(exc) - raise exc - - def _spp_pool_id(self, pool_name): - """Returns the shared proc pool id for a given pool name. - - :param pool_name: The shared proc pool name. - :return: The internal API id for the shared proc pool. - """ - default_pool_name = pvm_spp.DEFAULT_POOL_DISPLAY_NAME - if (pool_name is None or pool_name == default_pool_name): - # The default pool is 0 - return 0 - - # Search for the pool with this name - pool_wraps = pvm_spp.SharedProcPool.search( - self.adapter, name=pool_name, parent=self.host_w) - - # Check to make sure there is a pool with the name, and only one pool. - if len(pool_wraps) > 1: - msg = (_('Multiple Shared Processing Pools with name %(pool)s.') % - {'pool': pool_name}) - raise exception.ValidationError(msg) - elif len(pool_wraps) == 0: - msg = (_('Unable to find Shared Processing Pool %(pool)s') % - {'pool': pool_name}) - raise exception.ValidationError(msg) - - # Return the singular pool id. - return pool_wraps[0].id - - def _flavor_bool(self, val, key): - """Will validate and return the boolean for a given value. - - :param val: The value to parse into a boolean. - :param key: The flavor key. - :return: The boolean value for the attribute. If is not well formed - will raise an ValidationError. - """ - trues = ['true', 't', 'yes', 'y'] - falses = ['false', 'f', 'no', 'n'] - if val.lower() in trues: - return True - elif val.lower() in falses: - return False - else: - msg = (_('Flavor attribute %(attr)s must be either True or ' - 'False. Current value %(val)s is not allowed.') % - {'attr': key, 'val': val}) - raise exception.ValidationError(msg) - - -def get_lpars(adapter): - """Get a list of the LPAR wrappers.""" - return pvm_lpar.LPAR.search(adapter, is_mgmt_partition=False) - - -def get_lpar_names(adapter): - """Get a list of the LPAR names.""" - return [x.name for x in get_lpars(adapter)] - - -def get_instance_wrapper(adapter, instance, xag=None): - """Get the LPAR wrapper for a given Nova instance. - - :param adapter: The adapter for the pypowervm API - :param instance: The nova instance OR its instance uuid. - :param xag: The pypowervm XAG to be used on the read request - :return: The pypowervm logical_partition wrapper. - """ - pvm_inst_uuid = get_pvm_uuid(instance) - try: - return pvm_lpar.LPAR.get(adapter, uuid=pvm_inst_uuid, xag=xag) - except pvm_exc.HttpNotFound: - raise exception.InstanceNotFound(instance_id=pvm_inst_uuid) - - -def instance_exists(adapter, instance, log_errors=False): - """Determine if an instance exists on the host. - - :param adapter: The adapter for the pypowervm API - :param instance: The nova instance. - :param log_errors: Indicator whether to log REST data after an exception - :return: boolean, whether the instance exists. - """ - try: - # If we're able to get the property, then it exists. - get_vm_id(adapter, get_pvm_uuid(instance), log_errors=log_errors) - return True - except exception.InstanceNotFound: - return False - - -def get_vm_id(adapter, lpar_uuid, log_errors=True): - """Returns the client LPAR ID for a given UUID. - - :param adapter: The pypowervm adapter. - :param lpar_uuid: The UUID for the LPAR. - :param log_errors: Indicator whether to log REST data after an exception - :return: The system id (an integer value). - """ - return get_vm_qp(adapter, lpar_uuid, qprop='PartitionID', - log_errors=log_errors) - - -def get_vm_qp(adapter, lpar_uuid, qprop=None, log_errors=True): - """Returns one or all quick properties of an LPAR. - - :param adapter: The pypowervm adapter. - :param lpar_uuid: The (powervm) UUID for the LPAR. - :param qprop: The quick property key to return. If specified, that single - property value is returned. If None/unspecified, all quick - properties are returned in a dictionary. - :param log_errors: Indicator whether to log REST data after an exception - :return: Either a single quick property value or a dictionary of all quick - properties. - """ - try: - kwds = dict(root_id=lpar_uuid, suffix_type='quick', suffix_parm=qprop) - if not log_errors: - # Remove the log helper from the list of helpers - helpers = adapter.helpers - try: - helpers.remove(pvm_log.log_helper) - except ValueError: - # It's not an error if we didn't find it. - pass - kwds['helpers'] = helpers - resp = adapter.read(pvm_lpar.LPAR.schema_type, **kwds) - except pvm_exc.HttpNotFound: - # 404 error indicates the LPAR has been deleted (or moved to a - # different host) - raise exception.InstanceNotFound(instance_id=lpar_uuid) - - return jsonutils.loads(resp.body) - - -def get_vm_info(adapter, instance): - """Get the InstanceInfo for an instance. - - :param adapter: The pypowervm.adapter.Adapter for the PowerVM REST API. - :param instance: nova.objects.instance.Instance object - :returns: An InstanceInfo object. - """ - pvm_uuid = get_pvm_uuid(instance) - pvm_state = get_vm_qp(adapter, pvm_uuid, 'PartitionState') - nova_state = _translate_vm_state(pvm_state) - return hardware.InstanceInfo(nova_state) - - -def create_lpar(adapter, host_wrapper, instance, nvram=None, slot_mgr=None): - """Create an LPAR based on the host based on the instance - - :param adapter: The adapter for the pypowervm API - :param host_wrapper: The host wrapper - :param instance: The nova instance. - :param nvram: The NVRAM to set on the LPAR. - :param slot_mgr: NovaSlotManager to restore/save the maximum number of - virtual slots. If omitted, the default is used. - :return: The LPAR response from the API. - """ - try: - lpar_b = VMBuilder( - host_wrapper, adapter, slot_mgr=slot_mgr).lpar_builder(instance) - pending_lpar_w = lpar_b.build() - vldn.LPARWrapperValidator(pending_lpar_w, host_wrapper).validate_all() - if nvram is not None: - pending_lpar_w.nvram = nvram - lpar_w = pending_lpar_w.create(parent=host_wrapper) - if slot_mgr is not None: - slot_mgr.register_max_vslots(lpar_w.io_config.max_virtual_slots) - return lpar_w - except lpar_bldr.LPARBuilderException as e: - # Raise the BuildAbortException since LPAR failed to build - raise exception.BuildAbortException(instance_uuid=instance.uuid, - reason=e) - except pvm_exc.HttpError as he: - # Raise the API exception - LOG.exception("PowerVM HttpError creating LPAR.", instance=instance) - raise nvex.PowerVMAPIFailed(inst_name=instance.name, reason=he) - - -def update(adapter, host_wrapper, instance, entry=None, name=None): - """Update an LPAR based on the host based on the instance - - :param adapter: The adapter for the pypowervm API - :param host_wrapper: The host wrapper - :param instance: The nova instance. - :param entry: The instance pvm entry, if available, otherwise it will - be fetched. - :param name: VM name to use for the update. Used on resize when we want - to rename it but not use the instance name. - :returns: The updated LPAR wrapper. - """ - - if not entry: - entry = get_instance_wrapper(adapter, instance) - - lpar_b = VMBuilder(host_wrapper, adapter, cur_lpar_w=entry).lpar_builder( - instance) - lpar_b.rebuild(entry) - - # Set the new name if the instance name is not desired. - if name: - entry.name = pvm_util.sanitize_partition_name_for_api(name) - # Write out the new specs, return the updated version - return entry.update() - - -def rename(adapter, instance, name, entry=None): - """Rename a VM. - - :param adapter: The adapter for the pypowervm API - :param instance: The nova instance. - :param name: The new name. - :param entry: The instance pvm entry, if available, otherwise it will - be fetched. - :returns: The updated LPAR wrapper. - """ - if not entry: - entry = get_instance_wrapper(adapter, instance) - - hyp_name = pvm_util.sanitize_partition_name_for_api(name) - - @pvm_trans.entry_transaction - def _rename(entry): - entry.name = hyp_name - return entry.update() - - return _rename(entry) - - -def delete_lpar(adapter, instance): - """Delete an LPAR - - :param adapter: The adapter for the pypowervm API. - :param instance: The nova instance whose LPAR is to be deleted. - """ - lpar_uuid = get_pvm_uuid(instance) - # Attempt to delete the VM. - try: - LOG.info('Deleting LPAR', instance=instance) - - # Ensure any vterms are closed. Will no-op otherwise. - vterm.close_vterm(adapter, lpar_uuid) - - # Run the LPAR delete - resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid) - LOG.info('LPAR delete status: %d', resp.status, instance=instance) - return resp - except pvm_exc.HttpNotFound: - LOG.info('LPAR not found (already deleted).', instance=instance) - - -def power_on(adapter, instance, opts=None): - """Powers on a VM. - - :param adapter: A pypowervm.adapter.Adapter. - :param instance: The nova instance to power on. - :param opts: (Optional) Additional parameters to the pypowervm power_on - method. See that method's docstring for details. - :return: True if the instance was powered on. False if it was not in a - startable state. - :raises: InstancePowerOnFailure - """ - # Synchronize power-on and power-off ops on a given instance - with lockutils.lock('power_%s' % instance.uuid): - entry = get_instance_wrapper(adapter, instance) - - # Get the current state and see if we can start the VM - if entry.state in POWERVM_STARTABLE_STATE: - # Now start the lpar - power.power_on(entry, None, add_parms=opts) - return True - - return False - - -def power_off(adapter, instance, force_immediate=False, timeout=None): - """Powers off a VM. - - :param adapter: A pypowervm.adapter.Adapter. - :param instance: The nova instance to power off. - :param force_immediate: (Optional, Default False) Should it be immediately - shut down. - :param timeout: (Optional, Default None) How long to wait for the job - to complete. By default, is None which indicates it should - use the default from pypowervm's power off method. - :return: True if the instance was stopped. False if it was not in a - stoppable state. - :raises: InstancePowerOffFailure - """ - # Synchronize power-on and power-off ops on a given instance - with lockutils.lock('power_%s' % instance.uuid): - entry = get_instance_wrapper(adapter, instance) - - # Get the current state and see if we can stop the VM - LOG.debug("Power off requested for instance in state %(state)s. Force " - "Immediate Flag: %(force)s.", - {'state': entry.state, 'force': force_immediate}, - instance=instance) - if entry.state in POWERVM_STOPABLE_STATE: - # Now stop the lpar - try: - LOG.debug("Power off executing.", instance=instance) - kwargs = {'timeout': timeout} if timeout else {} - if force_immediate: - power.PowerOp.stop( - entry, opts=popts.PowerOffOpts().vsp_hard(), **kwargs) - else: - power.power_off_progressive(entry, **kwargs) - except Exception as e: - LOG.exception("Failed to power off instance.", - instance=instance) - raise exception.InstancePowerOffFailure( - reason=six.text_type(e)) - return True - else: - LOG.debug("Power off not required.", instance=instance) - - return False - - -def reboot(adapter, instance, hard): - """Reboots a VM. - - :param adapter: A pypowervm.adapter.Adapter. - :param instance: The nova instance to reboot. - :param hard: Boolean True if hard reboot, False otherwise. - :raises: InstanceRebootFailure - """ - # Synchronize power-on and power-off ops on a given instance - with lockutils.lock('power_%s' % instance.uuid): - try: - entry = get_instance_wrapper(adapter, instance) - if entry.state != pvm_bp.LPARState.NOT_ACTIVATED: - if hard: - power.PowerOp.stop( - entry, opts=popts.PowerOffOpts().vsp_hard().restart()) - else: - power.power_off_progressive(entry, restart=True) - else: - # pypowervm does NOT throw an exception if "already down". - # Any other exception from pypowervm is a legitimate failure; - # let it raise up. - # If we get here, pypowervm thinks the instance is down. - power.power_on(entry, None) - except Exception as e: - LOG.exception("Failed to reboot instance.", instance=instance) - raise exception.InstanceRebootFailure(reason=six.text_type(e)) - - -def get_pvm_uuid(instance): - """Get the corresponding PowerVM VM uuid of an instance uuid - - Maps a OpenStack instance uuid to a PowerVM uuid. The UUID between the - Nova instance and PowerVM will be 1 to 1 mapped. This method runs the - algorithm against the instance's uuid to convert it to the PowerVM - UUID. - - :param instance: nova.objects.instance.Instance OR the OpenStack instance - uuid. - :return: pvm_uuid. - """ - inst_uuid = instance if uuidutils.is_uuid_like(instance) else instance.uuid - return pvm_uuid.convert_uuid_to_pvm(inst_uuid).upper() - - -def _uuid_set_high_bit(pvm_uuid): - """Turns on the high bit of a uuid - - PowerVM uuids always set the byte 0, bit 0 to 0. - So to convert it to an OpenStack uuid we may have to set the high bit. - - :param uuid: A PowerVM compliant uuid - :returns: A standard format uuid string - """ - return "%x%s" % (int(pvm_uuid[0], 16) | 8, pvm_uuid[1:]) - - -def get_instance(context, pvm_uuid): - """Get an instance, if there is one, that corresponds to the PVM UUID - - Not finding the instance can be a pretty normal case when handling events. - Don't log exceptions for those cases. - - :param pvm_uuid: PowerVM UUID - :return: OpenStack instance or None - """ - uuid = pvm_uuid.lower() - - def get_inst(): - try: - return objects.Instance.get_by_uuid(context, uuid) - except exception.InstanceNotFound: - return objects.Instance.get_by_uuid(context, - _uuid_set_high_bit(uuid)) - - try: - return get_inst() - except exception.InstanceNotFound: - pass - except Exception as e: - LOG.debug('Instance with PowerVM UUID %s not found: %s', pvm_uuid, e) - return None - - -def get_cnas(adapter, instance, **search): - """Returns the (possibly filtered) current CNAs on the instance. - - The Client Network Adapters are the Ethernet adapters for a VM. - :param adapter: The pypowervm adapter. - :param instance: The nova instance. - :param search: Keyword arguments for CNA.search. If omitted, all CNAs are - returned. - :return: The CNA wrappers that represent the ClientNetworkAdapters - on the VM - """ - meth = pvm_net.CNA.search if search else pvm_net.CNA.get - - return meth(adapter, parent_type=pvm_lpar.LPAR, - parent_uuid=get_pvm_uuid(instance), **search) - - -def get_vnics(adapter, instance, **search): - """Returns the (possibly filtered) current vNICs on the instance. - - :param adapter: The pypowervm adapter. - :param instance: The nova instance. - :param search: Keyword arguments for VNIC.search. If omitted, all VNICs - are returned. - :return: The VNIC wrappers that represent the virtual NICs on the VM. - """ - meth = pvm_card.VNIC.search if search else pvm_card.VNIC.get - - return meth(adapter, parent_type=pvm_lpar.LPAR, - parent_uuid=get_pvm_uuid(instance), **search) - - -def norm_mac(mac): - """Normalizes a MAC address from pypowervm format to OpenStack. - - That means that the format will be converted to lower case and will - have colons added. - - :param mac: A pypowervm mac address. Ex. 1234567890AB - :return: A mac that matches the standard neutron format. - Ex. 12:34:56:78:90:ab - """ - mac = mac.lower().replace(':', '') - return ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2)) - - -def update_ibmi_settings(adapter, instance, boot_type): - """Update settings of IBMi VMs on the instance. - - :param adapter: The pypowervm adapter. - :param instance: The nova instance. - :param boot_type: The boot connectivity type of the instance. - """ - lpar_wrap = get_instance_wrapper(adapter, instance) - entry = ibmi.update_ibmi_settings(adapter, lpar_wrap, boot_type) - entry.update() diff --git a/nova_powervm/virt/powervm/volume/__init__.py b/nova_powervm/virt/powervm/volume/__init__.py deleted file mode 100644 index fa5bc251..00000000 --- a/nova_powervm/virt/powervm/volume/__init__.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# Defines the various volume connectors that can be used. -from nova import exception -from oslo_utils import importutils - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm.i18n import _ - -CONF = cfg.CONF - -FC_STRATEGY_MAPPING = { - 'npiv': CONF.powervm.fc_npiv_adapter_api, - 'vscsi': CONF.powervm.fc_vscsi_adapter_api -} - -_STATIC_VOLUME_MAPPINGS = { - 'iscsi': 'nova_powervm.virt.powervm.volume.iscsi.' - 'IscsiVolumeAdapter', - 'iser': 'nova_powervm.virt.powervm.volume.iscsi.' - 'IscsiVolumeAdapter', - 'local': 'nova_powervm.virt.powervm.volume.local.' - 'LocalVolumeAdapter', - 'nfs': 'nova_powervm.virt.powervm.volume.nfs.NFSVolumeAdapter', - 'gpfs': 'nova_powervm.virt.powervm.volume.gpfs.GPFSVolumeAdapter', - 'rbd': 'nova_powervm.virt.powervm.volume.rbd.RBDVolumeAdapter', -} - - -def build_volume_driver(adapter, host_uuid, instance, conn_info, - stg_ftsk=None): - vol_cls = get_volume_class(conn_info.get('driver_volume_type')) - - return vol_cls(adapter, host_uuid, instance, conn_info, - stg_ftsk=stg_ftsk) - - -def get_volume_class(drv_type): - if drv_type in _STATIC_VOLUME_MAPPINGS: - class_type = _STATIC_VOLUME_MAPPINGS[drv_type] - elif drv_type == 'fibre_channel': - class_type = (FC_STRATEGY_MAPPING[ - CONF.powervm.fc_attach_strategy.lower()]) - else: - failure_reason = _("Invalid connection type of %s") % drv_type - raise exception.InvalidVolume(reason=failure_reason) - - return importutils.import_class(class_type) - - -def get_hostname_for_volume(instance): - if CONF.powervm.fc_attach_strategy.lower() == 'npiv': - # Tie the host name to the instance, as it will be represented in - # the backend as a full server. - host = CONF.host if len(CONF.host) < 20 else CONF.host[:20] - return host + '_' + instance.name - else: - return CONF.host - - -def get_wwpns_for_volume_connector(adapter, host_uuid, instance): - # WWPNs are derived from the FC connector. Pass in a fake connection info - # to trick it into thinking it FC - fake_fc_conn_info = {'driver_volume_type': 'fibre_channel'} - fc_vol_drv = build_volume_driver(adapter, host_uuid, instance, - fake_fc_conn_info) - return fc_vol_drv.wwpns() diff --git a/nova_powervm/virt/powervm/volume/driver.py b/nova_powervm/virt/powervm/volume/driver.py deleted file mode 100644 index 25edbb68..00000000 --- a/nova_powervm/virt/powervm/volume/driver.py +++ /dev/null @@ -1,321 +0,0 @@ -# Copyright 2015, 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - -from oslo_log import log as logging -from pypowervm import exceptions as pvm_exc -from pypowervm.tasks import partition as pvm_partition -from pypowervm.tasks import storage as tsk_stg -from pypowervm.utils import transaction as pvm_tx -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from nova_powervm.virt.powervm import exception as exc -from nova_powervm.virt.powervm import vm - - -LOG = logging.getLogger(__name__) -LOCAL_FEED_TASK = 'local_feed_task' - - -@six.add_metaclass(abc.ABCMeta) -class PowerVMVolumeAdapter(object): - """The volume adapter connects a Cinder volume to a VM. - - The role of the volume driver is to perform the connection between the - compute node and the backing physical fabric. - - This volume adapter is a generic adapter for all volume types to extend. - - This is built similarly to the LibvirtBaseVolumeDriver. - """ - def __init__(self, adapter, host_uuid, instance, connection_info, - stg_ftsk=None): - """Initialize the PowerVMVolumeAdapter - - :param adapter: The pypowervm adapter. - :param host_uuid: The pypowervm UUID of the host. - :param instance: The nova instance that the volume should connect to. - :param connection_info: The volume connection info generated from the - BDM. Used to determine how to connect the - volume to the VM. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when the respective method is executed. - """ - self.adapter = adapter - self.host_uuid = host_uuid - self.instance = instance - self.connection_info = connection_info - self.vm_uuid = vm.get_pvm_uuid(instance) - # Lazy-set this - self._vm_id = None - - self.reset_stg_ftsk(stg_ftsk=stg_ftsk) - - @property - def vm_id(self): - """Return the short ID (not UUID) of the LPAR for our instance. - - This method is unavailable during a pre live migration call since - there is no instance of the VM on the destination host at the time. - """ - if self._vm_id is None: - self._vm_id = vm.get_vm_id(self.adapter, self.vm_uuid) - return self._vm_id - - @property - def volume_id(self): - """Method to return the volume id. - - Every driver must implement this method if the default impl will - not work for their data. - """ - return self.connection_info['serial'] - - def reset_stg_ftsk(self, stg_ftsk=None): - """Resets the pypowervm transaction FeedTask to a new value. - - The previous updates from the original FeedTask WILL NOT be migrated - to this new FeedTask. - - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when this method is executed. - """ - if stg_ftsk is None: - getter = pvm_vios.VIOS.getter(self.adapter, xag=self.min_xags()) - self.stg_ftsk = pvm_tx.FeedTask(LOCAL_FEED_TASK, getter) - else: - self.stg_ftsk = stg_ftsk - - @classmethod - def min_xags(cls): - """List of pypowervm XAGs needed to support this adapter.""" - raise NotImplementedError() - - @property - def vios_uuids(self): - """List the UUIDs of the Virtual I/O Servers hosting the storage.""" - vios_wraps = pvm_partition.get_active_vioses(self.adapter) - return [wrap.uuid for wrap in vios_wraps] - - @classmethod - def vol_type(cls): - """The type of volume supported by this driver.""" - raise NotImplementedError() - - def is_volume_on_vios(self, vios_w): - """Returns whether or not the volume is on VIOS. - - This method is used in the NovaSlotManager to build slot map. - Needs to be implemented within the subclass to support rebuild. - - :param vios_w: The Virtual I/O Server wrapper - :return: True if the volume is available on the VIOS. False - otherwise. - :return: The unique identification of the volume. - """ - raise NotImplementedError() - - def pre_live_migration_on_destination(self, mig_data): - """Perform pre live migration steps for the volume on the target host. - - This method performs any pre live migration that is needed. - - Certain volume connectors may need to pass data from the source host - to the target. This may be required to determine how volumes connect - through the Virtual I/O Servers. - - This method will be called after the pre_live_migration_on_source - method. The data from the pre_live call will be passed in via the - mig_data. This method should put its output into the dest_mig_data. - - :param mig_data: Dict of migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. - """ - raise NotImplementedError() - - def pre_live_migration_on_source(self, mig_data): - """Performs pre live migration steps for the volume on the source host. - - Certain volume connectors may need to pass data from the source host - to the target. This may be required to determine how volumes connect - through the Virtual I/O Servers. - - This method gives the volume connector an opportunity to update the - mig_data (a dictionary) with any data that is needed for the target - host during the pre-live migration step. - - Since the source host has no native pre_live_migration step, this is - invoked from check_can_live_migrate_source in the overall live - migration flow. - - :param mig_data: A dictionary that the method can update to include - data needed by the pre_live_migration_at_destination - method. - """ - pass - - def post_live_migration_at_source(self, migrate_data): - """Performs post live migration for the volume on the source host. - - This method can be used to handle any steps that need to taken on - the source host after the VM is on the destination. - - :param migrate_data: volume migration data - """ - pass - - def post_live_migration_at_destination(self, mig_vol_stor): - """Perform post live migration steps for the volume on the target host. - - This method performs any post live migration that is needed. Is not - required to be implemented. - - :param mig_vol_stor: An unbounded dictionary that will be passed to - each volume adapter during the post live migration - call. Adapters can store data in here that may - be used by subsequent volume adapters. - """ - pass - - def cleanup_volume_at_destination(self, migrate_data): - """Performs volume cleanup after LPM failure on the dest host. - - This method can be used to handle any steps that need to taken on - the destination host after the migration has failed. - - :param migrate_data: migration data - """ - pass - - def connect_volume(self, slot_mgr): - """Connects the volume. - - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is attached to the VM - """ - # Check if the VM is in a state where the attach is acceptable. - lpar_w = vm.get_instance_wrapper(self.adapter, self.instance) - capable, reason = lpar_w.can_modify_io() - if not capable: - raise exc.VolumeAttachFailed( - volume_id=self.volume_id, instance_name=self.instance.name, - reason=reason) - - # Run the connect - self._connect_volume(slot_mgr) - - if self.stg_ftsk.name == LOCAL_FEED_TASK: - self.stg_ftsk.execute() - - def extend_volume(self): - raise NotImplementedError() - - def _extend_volume(self, udid): - """Rescan virtual disk so client VM can see extended size""" - resized = False - error = False - for vios_uuid in self.vios_uuids: - try: - LOG.debug("Rescanning volume %(vol)s for vios uuid %(uuid)s", - dict(vol=self.volume_id, uuid=vios_uuid), - instance=self.instance) - tsk_stg.rescan_vstor(vios_uuid, udid, adapter=self.adapter) - resized = True - except pvm_exc.VstorNotFound: - LOG.info("Failed to find volume %(vol)s for VIOS " - "UUID %(uuid)s during extend operation.", - {'vol': self.volume_id, 'uuid': vios_uuid}, - instance=self.instance) - except pvm_exc.JobRequestFailed as e: - error = True - LOG.error("Failed to rescan volume %(vol)s for VIOS " - "UUID %(uuid)s. %(reason)s", - {'vol': self.volume_id, 'uuid': vios_uuid, - 'reason': six.text_type(e)}, instance=self.instance) - if not resized or error: - raise exc.VolumeExtendFailed(volume_id=self.volume_id, - instance_name=self.instance.name) - - def disconnect_volume(self, slot_mgr): - """Disconnect the volume. - - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is detached from the VM. - """ - # Check if the VM is in a state where the detach is acceptable. - lpar_w = vm.get_instance_wrapper(self.adapter, self.instance) - capable, reason = lpar_w.can_modify_io() - if not capable: - raise exc.VolumeDetachFailed( - volume_id=self.volume_id, instance_name=self.instance.name, - reason=reason) - - # Run the disconnect - self._disconnect_volume(slot_mgr) - - if self.stg_ftsk.name == LOCAL_FEED_TASK: - self.stg_ftsk.execute() - - def _connect_volume(self, slot_mgr): - """Connects the volume. - - This is the actual method to implement within the subclass. Some - transaction maintenance is done by the parent class. - - :param slot_mgr: A NovaSlotStore. Used to store/retrieve the client - slots used when a volume is attached to the VM. - """ - raise NotImplementedError() - - def _disconnect_volume(self, slot_mgr): - """Disconnect the volume. - - This is the actual method to implement within the subclass. Some - transaction maintenance is done by the parent class. - - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM - """ - raise NotImplementedError() - - -@six.add_metaclass(abc.ABCMeta) -class FibreChannelVolumeAdapter(PowerVMVolumeAdapter): - """Defines a Fibre Channel specific volume adapter. - - Fibre Channel has a few additional attributes for the volume adapter. - This class defines the additional attributes so that the multiple FC - sub classes can support them. - """ - - def wwpns(self): - """Builds the WWPNs of the adapters that will connect the ports. - - :return: The list of WWPNs that need to be included in the zone set. - """ - raise NotImplementedError() diff --git a/nova_powervm/virt/powervm/volume/fileio.py b/nova_powervm/virt/powervm/volume/fileio.py deleted file mode 100644 index 2c31fc1f..00000000 --- a/nova_powervm/virt/powervm/volume/fileio.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os -import six -from taskflow import task - -from nova import exception as nova_exc -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm import vm -from nova_powervm.virt.powervm.volume import driver as v_driver -from oslo_log import log as logging -from pypowervm import const as pvm_const -from pypowervm.tasks import client_storage as pvm_c_stor -from pypowervm.tasks import partition -from pypowervm.tasks import scsi_mapper as tsk_map -from pypowervm.wrappers import storage as pvm_stg - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter): - """Base class for connecting file based Cinder Volumes to PowerVM VMs.""" - - def __init__(self, adapter, host_uuid, instance, connection_info, - stg_ftsk=None): - super(FileIOVolumeAdapter, self).__init__( - adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk) - self._nl_vios_ids = None - - @classmethod - def min_xags(cls): - return [pvm_const.XAG.VIO_SMAP] - - @classmethod - def vol_type(cls): - """The type of volume supported by this type.""" - return 'fileio' - - @abc.abstractmethod - def _get_path(self): - """Return the path to the file to connect.""" - pass - - @property - def vios_uuids(self): - """List the UUIDs of the Virtual I/O Servers hosting the storage.""" - # Get the hosting UUID - if self._nl_vios_ids is None: - nl_vios_wrap = partition.get_mgmt_partition(self.adapter) - self._nl_vios_ids = [nl_vios_wrap.uuid] - return self._nl_vios_ids - - def pre_live_migration_on_destination(self, mig_data): - """Perform pre live migration steps for the volume on the target host. - - This method performs any pre live migration that is needed. - - This method will be called after the pre_live_migration_on_source - method. The data from the pre_live call will be passed in via the - mig_data. This method should put its output into the dest_mig_data. - - :param mig_data: Dict of migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. - """ - LOG.debug("Incoming mig_data=%s", mig_data, instance=self.instance) - # Check if volume is available in destination. - vol_path = self._get_path() - if not os.path.exists(vol_path): - LOG.warning("File not found at path %s", vol_path, - instance=self.instance) - raise p_exc.VolumePreMigrationFailed( - volume_id=self.volume_id, instance_name=self.instance.name) - - def _connect_volume(self, slot_mgr): - path = self._get_path() - volid = self.connection_info['data']['volume_id'] - fio = pvm_stg.FileIO.bld( - self.adapter, path, - backstore_type=pvm_stg.BackStoreType.LOOP, tag=volid) - - def add_func(vios_w): - # If the vios doesn't match, just return - if vios_w.uuid not in self.vios_uuids: - return None - - LOG.info("Adding logical volume disk connection to VIOS %(vios)s.", - {'vios': vios_w.name}, instance=self.instance) - slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path) - if slot_mgr.is_rebuild and not slot: - LOG.debug('Detected a device with path %(path)s on VIOS ' - '%(vios)s on the rebuild that did not exist on the ' - 'source. Ignoring.', - {'path': path, 'vios': vios_w.uuid}, - instance=self.instance) - return None - - mapping = tsk_map.build_vscsi_mapping( - self.host_uuid, vios_w, self.vm_uuid, fio, lpar_slot_num=slot, - lua=lua) - return tsk_map.add_map(vios_w, mapping) - - self.stg_ftsk.add_functor_subtask(add_func) - - # Run after all the deferred tasks the query to save the slots in the - # slot map. - def set_slot_info(): - vios_wraps = self.stg_ftsk.feed - partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) - for vios_w in vios_wraps: - scsi_map = pvm_c_stor.udid_to_scsi_mapping( - vios_w, path, partition_id) - if not scsi_map: - continue - slot_mgr.register_vscsi_mapping(scsi_map) - - self.stg_ftsk.add_post_execute(task.FunctorTask( - set_slot_info, name='file_io_slot_%s' % path)) - - def extend_volume(self): - path = self._get_path() - if path is None: - raise nova_exc.InvalidBDM() - self._extend_volume(path) - - def _disconnect_volume(self, slot_mgr): - # Build the match function - match_func = tsk_map.gen_match_func(pvm_stg.VDisk, - names=[self._get_path()]) - - # Make sure the remove function will run within the transaction manager - def rm_func(vios_w): - # If the vios doesn't match, just return - if vios_w.uuid not in self.vios_uuids: - return None - - LOG.info("Disconnecting storage disks.", instance=self.instance) - removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid, - match_func=match_func) - for rm_map in removed_maps: - slot_mgr.drop_vscsi_mapping(rm_map) - return removed_maps - - self.stg_ftsk.add_functor_subtask(rm_func) - # Find the disk directly. - vios_w = self.stg_ftsk.wrapper_tasks[self.vios_uuids[0]].wrapper - mappings = tsk_map.find_maps(vios_w.scsi_mappings, - client_lpar_id=self.vm_uuid, - match_func=match_func) - - return [x.backing_storage for x in mappings] - - def is_volume_on_vios(self, vios_w): - """Returns whether or not the volume file is on a VIOS. - - This method is used during live-migration and rebuild to - check if the volume is available on the target host. - - :param vios_w: The Virtual I/O Server wrapper. - :return: True if the file is on the VIOS. False - otherwise. - :return: The file path. - """ - if vios_w.uuid not in self.vios_uuids: - return False, None - - vol_path = self._get_path() - vol_found = os.path.exists(vol_path) - return vol_found, vol_path diff --git a/nova_powervm/virt/powervm/volume/gpfs.py b/nova_powervm/virt/powervm/volume/gpfs.py deleted file mode 100644 index 79067a6c..00000000 --- a/nova_powervm/virt/powervm/volume/gpfs.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova_powervm.virt.powervm.volume import fileio - - -class GPFSVolumeAdapter(fileio.FileIOVolumeAdapter): - """Connects GPFS Cinder Volumes to PowerVM VMs.""" - - def _get_path(self): - return self.connection_info.get("data")['device_path'] diff --git a/nova_powervm/virt/powervm/volume/iscsi.py b/nova_powervm/virt/powervm/volume/iscsi.py deleted file mode 100644 index f6e05704..00000000 --- a/nova_powervm/virt/powervm/volume/iscsi.py +++ /dev/null @@ -1,478 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import collections -import copy -from oslo_concurrency import lockutils -from oslo_log import log as logging - -from nova import exception as nova_exc -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm import vm -from nova_powervm.virt.powervm.volume import driver as v_driver -from nova_powervm.virt.powervm.volume import volume -from pypowervm import const as pvm_const -from pypowervm import exceptions as pvm_exc -from pypowervm.tasks import hdisk -from pypowervm.tasks import partition as pvm_partition -from pypowervm.utils import transaction as tx -from pypowervm.wrappers import virtual_io_server as pvm_vios - -from taskflow import task - -import six - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -DEVNAME_KEY = 'target_devname' -_ISCSI_INITIATORS = collections.OrderedDict() - - -def get_iscsi_initiators(adapter, vios_ids=None): - """Gets the VIOS iSCSI initiators. - - For the first time invocation of this method after process start up, - it populates initiators data for VIOSes (if specified, otherwise it - gets active VIOSes from the host) and stores in memory for futher - lookup. - - :param adapter: The pypowervm adapter - :param vios_ids: List of VIOS ids to get the initiators. If not - specified, a list of active VIOSes for the - host is fetched (but only for the first time) - through the pypowervm adapter. - :return: A dict of the form - {: } - """ - - global _ISCSI_INITIATORS - - def discover_initiator(vios_id): - - # Get the VIOS id lock for initiator lookup - @lockutils.synchronized('inititator-lookup-' + vios_id) - def _discover_initiator(): - if vios_id in _ISCSI_INITIATORS and _ISCSI_INITIATORS[vios_id]: - return - else: - try: - initiator = hdisk.discover_iscsi_initiator( - adapter, vios_id) - _ISCSI_INITIATORS[vios_id] = initiator - except (pvm_exc.ISCSIDiscoveryFailed, - pvm_exc.JobRequestFailed) as e: - # TODO(chhagarw): handle differently based on - # error codes - LOG.error(e) - - _discover_initiator() - - if vios_ids is None and not _ISCSI_INITIATORS: - vios_list = pvm_partition.get_active_vioses(adapter) - vios_ids = [vios.uuid for vios in vios_list] - - for vios_id in vios_ids or []: - discover_initiator(vios_id) - - LOG.debug("iSCSI initiator info: %s" % _ISCSI_INITIATORS) - return _ISCSI_INITIATORS - - -class IscsiVolumeAdapter(volume.VscsiVolumeAdapter, - v_driver.PowerVMVolumeAdapter): - """The iSCSI implementation of the Volume Adapter. - - This driver will connect a volume to a VM. First using iSCSI to connect the - volume to the I/O Host (NovaLink partition). Then using the PowerVM vSCSI - technology to host it to the VM itself. - """ - def __init__(self, adapter, host_uuid, instance, connection_info, - stg_ftsk=None): - super(IscsiVolumeAdapter, self).__init__( - adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk) - if connection_info['driver_volume_type'] == 'iser': - self.iface_name = 'iser' - else: - self.iface_name = CONF.powervm.iscsi_iface - - @classmethod - def vol_type(cls): - """The type of volume supported by this type.""" - return 'iscsi' - - @classmethod - def min_xags(cls): - """List of pypowervm XAGs needed to support this adapter.""" - return [pvm_const.XAG.VIO_SMAP] - - def pre_live_migration_on_destination(self, mig_data): - """Perform pre live migration steps for the volume on the target host. - - This method performs any pre live migration that is needed. - - Certain volume connectors may need to pass data from the source host - to the target. This may be required to determine how volumes connect - through the Virtual I/O Servers. - - This method will be called after the pre_live_migration_on_source - method. The data from the pre_live call will be passed in via the - mig_data. This method should put its output into the dest_mig_data. - - :param mig_data: Dict of migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. - """ - - # See the connect_volume for why this is a direct call instead of - # using the tx_mgr.feed - vios_wraps = pvm_vios.VIOS.get(self.adapter, - xag=[pvm_const.XAG.VIO_STOR]) - - volume_key = 'vscsi-' + self.volume_id - for vios_w in vios_wraps: - if vios_w.uuid not in self.vios_uuids: - continue - # Discover the volume on all VIOS's to trigger the - # device configuration, which in turn will discover the - # LUN associated with the volume. This needs to be - # attempted on all VIOS's to determine the VIOS's that - # will be servicing IO for this volume. - udid = self._discover_volume_on_vios(vios_w)[1] - if udid: - LOG.debug("Discovered volume udid %(udid)s on vios %(name)s", - dict(udid=udid, name=vios_w.name)) - mig_data[volume_key] = udid - - if volume_key not in mig_data: - LOG.debug("Failed to discover the volume") - ex_args = dict(volume_id=self.volume_id, - instance_name=self.instance.name) - raise p_exc.VolumePreMigrationFailed(**ex_args) - - def post_live_migration_at_destination(self, mig_data): - """This method will update the connection info with the volume udid.""" - - volume_key = 'vscsi-' + self.volume_id - if volume_key in mig_data: - self._set_udid(mig_data[volume_key]) - - def post_live_migration_at_source(self, migrate_data): - """Performs post live migration for the volume on the source host. - - This method can be used to handle any steps that need to taken on - the source host after the VM is on the destination. - - :param migrate_data: volume migration data - """ - # Get the udid of the volume to remove the hdisk for. We can't - # use the connection information because LPM 'refreshes' it, which - # wipes out our data, so we use the data from the destination host - # to avoid having to discover the hdisk to get the udid. - udid = migrate_data.get('vscsi-' + self.volume_id) - self._cleanup_volume(udid) - - def is_volume_on_vios(self, vios_w): - """Returns whether or not the volume is on a VIOS. - - :param vios_w: The Virtual I/O Server wrapper. - :return: True if the volume driver's volume is on the VIOS. False - otherwise. - :return: The udid of the device. - """ - if vios_w.uuid not in self.vios_uuids: - return False, None - device_name, udid = self._discover_volume_on_vios(vios_w) - return (device_name and udid) is not None, udid - - def _is_multipath(self): - return self.connection_info["connector"].get("multipath", False) - - def _get_iscsi_conn_props(self, vios_w, auth=False): - """Returns the required iSCSI connection properties.""" - props = dict() - try: - data = self.connection_info['data'] - # For multipath target properties should exist - if all([key in data for key in ('target_portals', - 'target_iqns', - 'target_luns')]): - props['target_portals'] = data['target_portals'] - props['target_iqns'] = data['target_iqns'] - props['target_luns'] = data['target_luns'] - - if auth and 'discovery_auth_method' in data: - for s in ('method', 'username', 'password'): - k = 'discovery_auth_' + s - props[k] = data[k] - - props['target_portal'] = data['target_portal'] - props['target_iqn'] = data['target_iqn'] - props['target_lun'] = data['target_lun'] - - # if auth_method is set look for username, password - if auth and 'auth_method' in data: - props['auth_method'] = data['auth_method'] - props['auth_username'] = data['auth_username'] - props['auth_password'] = data['auth_password'] - - return props - - except (KeyError, ValueError): - # Missing information in the connection info - LOG.warning('Failed to retrieve iSCSI connection properties ' - 'for vios %(vios)s, connection_info=%(cinfo)s', - dict(vios=vios_w.uuid, cinfo=self.connection_info)) - return None - - def _discover_vol(self, vios_w, props): - portal = props.get("target_portals", props.get("target_portal")) - iqn = props.get("target_iqns", props.get("target_iqn")) - lun = props.get("target_luns", props.get("target_lun")) - auth = props.get("auth_method") - user = props.get("auth_username") - password = props.get("auth_password") - discovery_auth = props.get("discovery_auth_method") - discovery_username = props.get("discovery_auth_username") - discovery_password = props.get("discovery_auth_password") - try: - return hdisk.discover_iscsi( - self.adapter, portal, user, password, iqn, vios_w.uuid, - lunid=lun, iface_name=self.iface_name, auth=auth, - discovery_auth=discovery_auth, - discovery_username=discovery_username, - discovery_password=discovery_password, - multipath=self._is_multipath()) - except (pvm_exc.ISCSIDiscoveryFailed, pvm_exc.JobRequestFailed) as e: - msg_args = {'vios': vios_w.uuid, 'err': six.text_type(e)} - LOG.warning("iSCSI discovery on VIOS %(vios)s failed with " - "error: %(err)s", msg_args) - return None, None - - def _discover_volume_on_vios(self, vios_w): - """Discovers an hdisk on a single vios for the volume. - - :param vios_w: VIOS wrapper to process - :returns: Device name or None - :returns: LUN or None - """ - device_name = udid = None - conn_props = self._get_iscsi_conn_props(vios_w, auth=True) - if conn_props is None: - return None, None - - # Check if multipath, we can directly pass the IQN list to - # to the low level driver for volume discovery, else iterate - # over the IQN and get the list for discovery. - if self._is_multipath(): - device_name, udid = self._discover_vol(vios_w, conn_props) - else: - for props in self._iterate_all_targets(conn_props): - device_name, udid = self._discover_vol(vios_w, props) - return device_name, udid - - def _connect_volume_to_vio(self, vios_w, slot_mgr): - """Attempts to connect a volume to a given VIO. - - :param vios_w: The Virtual I/O Server wrapper to connect to. - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM - - :return: True if the volume was connected. False if the volume was - not (could be the Virtual I/O Server does not have - connectivity to the hdisk). - """ - # check if the vios uuid exist in the expected vios list - if vios_w.uuid not in self.vios_uuids: - LOG.debug("Skipping connect volume %(vol)s from " - "inactive vios uuid %(uuid)s.", - dict(vol=self.volume_id, uuid=vios_w.uuid)) - return False - - device_name, udid = self._discover_volume_on_vios(vios_w) - if device_name is not None and udid is not None: - slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, device_name) - volume_id = self.connection_info["data"]["volume_id"] - # Found a hdisk on this Virtual I/O Server. Add the action to - # map it to the VM when the stg_ftsk is executed. - with lockutils.lock(hash(self)): - self._add_append_mapping( - vios_w.uuid, device_name, lpar_slot_num=slot, lua=lua, - udid=udid, tag=volume_id) - - # Save the udid for the disk in the connection info. It is - # used for the detach. - self._set_udid(udid) - - LOG.debug('Device attached: %s', device_name, - instance=self.instance) - - # Valid attachment - return True - - return False - - def extend_volume(self): - """Rescan virtual disk so client VM can see extended size.""" - udid = self._get_udid() - if udid is None: - raise nova_exc.InvalidBDM() - self._extend_volume(udid) - - def _disconnect_volume(self, slot_mgr): - """Disconnect the volume. - - This is the actual method to implement within the subclass. Some - transaction maintenance is done by the parent class. - - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM - """ - - def discon_vol_for_vio(vios_w): - """Removes the volume from a specific Virtual I/O Server. - - :param vios_w: The VIOS wrapper. - :return: True if a remove action was done against this VIOS. False - otherwise. - """ - # Check if the vios uuid exist in the list - if vios_w.uuid not in self.vios_uuids: - LOG.debug("Skipping disconnect of volume %(vol)s from " - "inactive vios uuid %(uuid)s.", - dict(vol=self.volume_id, uuid=vios_w.uuid)) - return False - - LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s", - dict(vol=self.volume_id, uuid=vios_w.uuid), - instance=self.instance) - - device_name = None - try: - udid = self._get_udid() - if udid: - # Get the device name using UniqueDeviceID Identifier. - device_name = vios_w.hdisk_from_uuid(udid) - - if not udid or not device_name: - # If we have no device name, at this point - # we should not continue. Subsequent scrub code on - # future deploys will clean this up. - LOG.warning( - "Disconnect Volume: The backing hdisk for volume " - "%(volume_id)s on Virtual I/O Server %(vios)s is " - "not in a valid state. No disconnect " - "actions to be taken as volume is not healthy.", - {'volume_id': self.volume_id, 'vios': vios_w.name}, - instance=self.instance) - return False - - except Exception: - LOG.exception( - "Disconnect Volume: Failed to find device on Virtual I/O " - "Server %(vios_name)s for volume %(volume_id)s.", - {'vios_name': vios_w.name, 'volume_id': self.volume_id}, - instance=self.instance) - return False - - # We have found the device name - LOG.info("Disconnect Volume: Discovered the device %(hdisk)s " - "on Virtual I/O Server %(vios_name)s for volume " - "%(volume_id)s.", - {'volume_id': self.volume_id, - 'vios_name': vios_w.name, 'hdisk': device_name}, - instance=self.instance) - - # Add the action to remove the mapping when the stg_ftsk is run. - partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) - - with lockutils.lock(hash(self)): - self._add_remove_mapping(partition_id, vios_w.uuid, - device_name, slot_mgr) - conn_data = self._get_iscsi_conn_props(vios_w) - if not conn_data: - return False - iqn = conn_data.get("target_iqns", conn_data.get("target_iqn")) - portal = conn_data.get("target_portals", - conn_data.get("target_portal")) - lun = conn_data.get("target_luns", - conn_data.get("target_lun")) - - def remove(): - try: - hdisk.remove_iscsi( - self.adapter, iqn, vios_w.uuid, lun=lun, - iface_name=self.iface_name, portal=portal, - multipath=self._is_multipath()) - except (pvm_exc.ISCSIRemoveFailed, - pvm_exc.JobRequestFailed) as e: - LOG.warning(e) - - self.stg_ftsk.add_post_execute(task.FunctorTask( - remove, name='remove_%s_from_vios_%s' % (device_name, - vios_w.uuid))) - - # Found a valid element to remove - return True - - try: - # See logic in _connect_volume for why this new FeedTask is here. - discon_ftsk = tx.FeedTask( - 'discon_volume_from_vio', pvm_vios.VIOS.getter( - self.adapter, xag=[pvm_const.XAG.VIO_STOR])) - # Find hdisks to disconnect - discon_ftsk.add_functor_subtask( - discon_vol_for_vio, provides='vio_modified', flag_update=False) - ret = discon_ftsk.execute() - - # Warn if no hdisks disconnected. - if not any([result['vio_modified'] - for result in ret['wrapper_task_rets'].values()]): - LOG.warning( - "Disconnect Volume: Failed to disconnect the volume " - "%(volume_id)s on ANY of the Virtual I/O Servers.", - {'volume_id': self.volume_id}, instance=self.instance) - - except Exception as e: - LOG.exception('PowerVM error detaching volume from virtual ' - 'machine.', instance=self.instance) - ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e), - 'instance_name': self.instance.name} - raise p_exc.VolumeDetachFailed(**ex_args) - - # Taken from os_brick.initiator.connectors.base_iscsi.py - def _iterate_all_targets(self, connection_properties): - for portal, iqn, lun in self._get_all_targets(connection_properties): - props = copy.deepcopy(connection_properties) - props['target_portal'] = portal - props['target_iqn'] = iqn - props['target_lun'] = lun - for key in ('target_portals', 'target_iqns', 'target_luns'): - props.pop(key, None) - yield props - - def _get_all_targets(self, connection_properties): - if all([key in connection_properties for key in ('target_portals', - 'target_iqns', - 'target_luns')]): - return zip(connection_properties['target_portals'], - connection_properties['target_iqns'], - connection_properties['target_luns']) - - return [(connection_properties['target_portal'], - connection_properties['target_iqn'], - connection_properties.get('target_lun', 0))] diff --git a/nova_powervm/virt/powervm/volume/local.py b/nova_powervm/virt/powervm/volume/local.py deleted file mode 100644 index 12f0fad6..00000000 --- a/nova_powervm/virt/powervm/volume/local.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova_powervm.virt.powervm.volume import fileio - - -class LocalVolumeAdapter(fileio.FileIOVolumeAdapter): - """Connects Local Cinder Volumes to PowerVM VMs.""" - - def _get_path(self): - return self.connection_info['data']['device_path'] diff --git a/nova_powervm/virt/powervm/volume/nfs.py b/nova_powervm/virt/powervm/volume/nfs.py deleted file mode 100644 index 082e0f23..00000000 --- a/nova_powervm/virt/powervm/volume/nfs.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova_powervm.virt.powervm.volume import fileio -import os - - -class NFSVolumeAdapter(fileio.FileIOVolumeAdapter): - """Connects NFS Cinder Volumes to PowerVM VMs.""" - - def _get_path(self): - return os.path.join(self.connection_info['data']['export'], - self.connection_info['data']['name']) diff --git a/nova_powervm/virt/powervm/volume/npiv.py b/nova_powervm/virt/powervm/volume/npiv.py deleted file mode 100644 index 94ef2749..00000000 --- a/nova_powervm/virt/powervm/volume/npiv.py +++ /dev/null @@ -1,744 +0,0 @@ -# Copyright 2015, 2018 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from oslo_concurrency import lockutils -from oslo_log import log as logging -from taskflow import task - -from nova.compute import task_states -from oslo_serialization import jsonutils -from pypowervm import const as pvm_const -from pypowervm.tasks import client_storage as pvm_c_stor -from pypowervm.tasks import vfc_mapper as pvm_vfcm - -from nova_powervm import conf as cfg -from nova_powervm.conf import powervm as pvm_cfg -from nova_powervm.virt.powervm import exception as exc -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm.volume import driver as v_driver - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -WWPN_SYSTEM_METADATA_KEY = 'npiv_adpt_wwpns' -FABRIC_STATE_METADATA_KEY = 'fabric_state' -FS_UNMAPPED = 'unmapped' -FS_MIGRATING = 'migrating' -FS_INST_MAPPED = 'inst_mapped' -TASK_STATES_FOR_DISCONNECT = [task_states.DELETING, task_states.SPAWNING] - - -class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter): - """The NPIV implementation of the Volume Adapter. - - NPIV stands for N_Port ID Virtualization. It is a means of providing - more efficient connections between virtual machines and Fibre Channel - backed SAN fabrics. - - From a management level, the main difference is that the Virtual Machine - will have its own WWPNs and own Virtual FC adapter. The Virtual I/O - Server only passes through communication directly to the VM itself. - """ - - @classmethod - def min_xags(cls): - """List of pypowervm XAGs needed to support this adapter.""" - # Storage are so physical FC ports are available - # FC mapping is for the connections between VIOS and client VM - return [pvm_const.XAG.VIO_FMAP, pvm_const.XAG.VIO_STOR] - - @classmethod - def vol_type(cls): - """The type of volume supported by this type.""" - return 'npiv' - - def _connect_volume(self, slot_mgr): - """Connects the volume. - - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is attached to the VM - """ - # Run the add for each fabric. - for fabric in self._fabric_names(): - self._add_maps_for_fabric(fabric, slot_mgr) - - def extend_volume(self): - # The compute node does not need to take any additional steps for the - # client to see the extended volume. - pass - - def _disconnect_volume(self, slot_mgr): - """Disconnect the volume. - - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM - """ - # We should only delete the NPIV mappings if we are running through a - # VM deletion. VM deletion occurs when the task state is deleting. - # However, it can also occur during a 'roll-back' of the spawn. - # Disconnect of the volumes will only be called during a roll back - # of the spawn. We also want to check that the instance is on this - # host. If it isn't then we can remove the mappings because this is - # being called as the result of an evacuation clean up. - if (self.instance.task_state not in TASK_STATES_FOR_DISCONNECT): - if (self.instance.host in [None, CONF.host]): - # NPIV should only remove the VFC mapping upon a destroy of - # the VM - return - - # Run the disconnect for each fabric - for fabric in self._fabric_names(): - self._remove_maps_for_fabric(fabric) - - def pre_live_migration_on_source(self, mig_data): - """Performs pre live migration steps for the volume on the source host. - - Certain volume connectors may need to pass data from the source host - to the target. This may be required to determine how volumes connect - through the Virtual I/O Servers. - - This method gives the volume connector an opportunity to update the - mig_data (a dictionary) with any data that is needed for the target - host during the pre-live migration step. - - Since the source host has no native pre_live_migration step, this is - invoked from check_can_live_migrate_source in the overall live - migration flow. - - :param mig_data: A dictionary that the method can update to include - data needed by the pre_live_migration_at_destination - method. - """ - fabrics = self._fabric_names() - vios_wraps = self.stg_ftsk.feed - # This mapping contains the client slots used on a given vios. - # { vios_uuid: [slot_num, ...], vios2_uuid: [slot_num2,..] } - slot_peer_dict = dict() - for fabric in fabrics: - npiv_port_maps = self._get_fabric_meta(fabric) - if not npiv_port_maps: - continue - - client_slots = [] - for port_map in npiv_port_maps: - vios_w, vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns( - vios_wraps, port_map[1].split()) - slot_num = vfc_map.client_adapter.lpar_slot_num - vios_uuid = vios_w.partition_uuid - if vios_uuid not in slot_peer_dict: - slot_peer_dict[vios_uuid] = [] - slot_peer_dict[vios_uuid].append(slot_num) - client_slots.append(slot_num) - - # Set the client slots into the fabric data to pass to the - # destination. Only strings can be stored. - mig_data['src_npiv_fabric_slots_%s' % fabric] = ( - jsonutils.dumps(client_slots)) - # The target really doesn't care what the UUID is of the source VIOS - # it is on a different server. So let's strip that out and just - # get the values. - mig_data['src_vios_peer_slots'] = ( - jsonutils.dumps(list(slot_peer_dict.values()))) - - def pre_live_migration_on_destination(self, mig_data): - """Perform pre live migration steps for the volume on the target host. - - This method performs any pre live migration that is needed. - - Certain volume connectors may need to pass data from the source host - to the target. This may be required to determine how volumes connect - through the Virtual I/O Servers. - - This method will be called after the pre_live_migration_on_source - method. The data from the pre_live call will be passed in via the - mig_data. This method should put its output into the dest_mig_data. - - :param mig_data: Dict of migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. - """ - vios_wraps = self.stg_ftsk.feed - if 'src_vios_peer_slots' in mig_data: - self._pre_live_migration_on_dest_new(vios_wraps, mig_data) - else: - self._pre_live_migration_on_dest_legacy(vios_wraps, mig_data) - - def _pre_live_migration_on_dest_new(self, vios_wraps, mig_data): - # Need to first derive the port mappings that can be passed back - # to the source system for the live migration call. This tells - # the source system what 'vfc mappings' to pass in on the live - # migration command. - fabric_data = dict() - for fabric in self._fabric_names(): - fab_slots = jsonutils.loads( - mig_data['src_npiv_fabric_slots_%s' % fabric]) - ports = self._fabric_ports(fabric) - fabric_data[fabric] = {'slots': fab_slots, - 'p_port_wwpns': ports} - - slot_peers = jsonutils.loads( - mig_data['src_vios_peer_slots']) - fabric_mapping = pvm_vfcm.build_migration_mappings( - vios_wraps, fabric_data, slot_peers) - mig_data['vfc_lpm_mappings'] = jsonutils.dumps(fabric_mapping) - - def _pre_live_migration_on_dest_legacy(self, vios_wraps, mig_data): - # Used in case the source server is running an old nova-compute (ex. - # Mitaka). To be removed in Ocata or Pike. - # - # Need to first derive the port mappings that can be passed back - # to the source system for the live migration call. This tells - # the source system what 'vfc mappings' to pass in on the live - # migration command. - for fabric in self._fabric_names(): - slots = jsonutils.loads( - mig_data['src_npiv_fabric_slots_%s' % fabric]) - fabric_mapping = pvm_vfcm.build_migration_mappings_for_fabric( - vios_wraps, self._fabric_ports(fabric), slots) - mig_data['dest_npiv_fabric_mapping_%s' % fabric] = ( - jsonutils.dumps(fabric_mapping)) - # Reverse the vios wrapper so that the other fabric will get the - # on the second vios. - vios_wraps.reverse() - - # Collate all of the individual fabric mappings into a single element. - full_map = [] - for key, value in mig_data.items(): - if key.startswith('dest_npiv_fabric_mapping_'): - full_map.extend(jsonutils.loads(value)) - mig_data['vfc_lpm_mappings'] = jsonutils.dumps(full_map) - - def post_live_migration_at_destination(self, mig_vol_stor): - """Perform post live migration steps for the volume on the target host. - - This method performs any post live migration that is needed. Is not - required to be implemented. - - :param mig_vol_stor: An unbounded dictionary that will be passed to - each volume adapter during the post live migration - call. Adapters can store data in here that may - be used by subsequent volume adapters. - """ - vios_wraps = self.stg_ftsk.feed - - # This method will run on the target host after the migration is - # completed. Right after this the instance.save is invoked from the - # manager. Given that, we need to update the order of the WWPNs. - # The first WWPN is the one that is logged into the fabric and this - # will now indicate that our WWPN is logged in. - LOG.debug('Post live migrate volume store: %s', mig_vol_stor, - instance=self.instance) - for fabric in self._fabric_names(): - # We check the mig_vol_stor to see if this fabric has already been - # flipped. If so, we can continue. - fabric_key = '%s_flipped' % fabric - if mig_vol_stor.get(fabric_key, False): - continue - - # Must not be flipped, so execute the flip - npiv_port_maps = self._get_fabric_meta(fabric) - new_port_maps = [] - for port_map in npiv_port_maps: - # Flip the WPWNs - c_wwpns = port_map[1].split() - c_wwpns.reverse() - LOG.debug('Flipping WWPNs, ports: %s wwpns: %s', - port_map, c_wwpns, instance=self.instance) - # Get the new physical WWPN. - vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns(vios_wraps, - c_wwpns)[1] - p_wwpn = vfc_map.backing_port.wwpn - - # Build the new map. - new_map = (p_wwpn, " ".join(c_wwpns)) - new_port_maps.append(new_map) - self._set_fabric_meta(fabric, new_port_maps) - self._set_fabric_state(fabric, FS_INST_MAPPED) - - # Store that this fabric is now flipped. - mig_vol_stor[fabric_key] = True - - def _is_initial_wwpn(self, fc_state, fabric): - """Determines if the invocation to wwpns is for a general method. - - A 'general' method would be a spawn (with a volume) or a volume attach - or detach. - - :param fc_state: The state of the fabric. - :param fabric: The name of the fabric. - :return: True if the invocation appears to be for a spawn/volume - action. False otherwise. - """ - # Easy fabric state check. If its a state other than unmapped, it - # can't be an initial WWPN - if fc_state != FS_UNMAPPED: - return False - - # Easy state check. This is important in case of a rollback failure. - # If it is deleting or migrating, it definitely is not an initial WWPN - if self.instance.task_state in [task_states.DELETING, - task_states.MIGRATING]: - return False - - # Next, we have to check the fabric metadata. Having metadata - # indicates that we have been on at least a single host. However, - # a VM could be rescheduled. In that case, the 'physical WWPNs' won't - # match. So if any of the physical WWPNs are not supported by this - # host, we know that it is 'initial' for this host. - port_maps = self._get_fabric_meta(fabric) - if len(port_maps) > 0 and self._hosts_wwpn(port_maps): - return False - - # At this point, it should be correct. - LOG.info("Instance has not yet defined a WWPN on fabric %(fabric)s. " - "Appropriate WWPNs will be generated.", - {'fabric': fabric}, instance=self.instance) - return True - - def _hosts_wwpn(self, port_maps): - """Determines if this system hosts the port maps. - - Hosting the port map will be determined if one of the physical WWPNs - is hosted by one of the VIOSes. - - :param port_maps: The list of port mappings for the given fabric. - """ - vios_wraps = self.stg_ftsk.feed - if port_maps: - for port_map in port_maps: - for vios_w in vios_wraps: - for pfc_port in vios_w.pfc_ports: - if pfc_port.wwpn == port_map[0]: - return True - return False - - def _is_migration_wwpn(self, fc_state): - """Determines if the WWPN call is occurring during a migration. - - This determines if it is on the target host. - - :param fc_state: The fabrics state. - :return: True if the instance appears to be migrating to this host. - False otherwise. - """ - return fc_state == FS_INST_MAPPED and self.instance.host != CONF.host - - def _configure_wwpns_for_migration(self, fabric): - """Configures the WWPNs for a migration. - - During a NPIV migration, the WWPNs need to be flipped. This is because - the second WWPN is what will be logged in on the source system. So by - flipping them, we indicate that the 'second' wwpn is the new one to - log in. - - Another way to think of it is, this code should always return the - correct WWPNs for the system that the workload will be running on. - - This WWPNs invocation is done on the target server prior to the - actual migration call. It is used to build the volume connector. - Therefore this code simply flips the ports around. - - :param fabric: The fabric to configure. - :return: An updated port mapping. - """ - if self._get_fabric_state(fabric) == FS_MIGRATING: - # If the fabric is migrating, just return the existing port maps. - # They've already been flipped. - return self._get_fabric_meta(fabric) - - # When we migrate...flip the WWPNs around. This is so the other - # WWPN logs in on the target fabric. If this code is hit, the flip - # hasn't yet occurred (read as first volume on the instance). - port_maps = self._get_fabric_meta(fabric) - client_wwpns = [] - for port_map in port_maps: - c_wwpns = port_map[1].split() - c_wwpns.reverse() - client_wwpns.extend(c_wwpns) - - # Now derive the mapping to the VIOS physical ports on this system - # (the destination) - port_mappings = pvm_vfcm.derive_npiv_map( - self.stg_ftsk.feed, self._fabric_ports(fabric), client_wwpns) - - # This won't actually get saved by the process. The instance save will - # only occur after the 'post migration'. But if there are multiple - # volumes, their WWPNs calls will subsequently see the data saved - # temporarily here, and therefore won't "double flip" the wwpns back - # to the original. - self._set_fabric_meta(fabric, port_mappings) - self._set_fabric_state(fabric, FS_MIGRATING) - - # Return the mappings - return port_mappings - - @lockutils.synchronized('npiv_wwpns') - def wwpns(self): - """Builds the WWPNs of the adapters that will connect the ports.""" - # Refresh the instance. It could have been updated by a concurrent - # call from another thread to get the wwpns. - self.instance.refresh() - vios_wraps = self.stg_ftsk.feed - resp_wwpns = [] - - # If this is the first time to query the WWPNs for the instance, we - # need to generate a set of valid WWPNs. Loop through the configured - # FC fabrics and determine if these are new, part of a migration, or - # were already configured. - for fabric in self._fabric_names(): - fc_state = self._get_fabric_state(fabric) - LOG.info("NPIV wwpns fabric state=%(st)s.", - {'st': fc_state}, instance=self.instance) - - if self._is_initial_wwpn(fc_state, fabric): - # Get a set of WWPNs that are globally unique from the system. - v_wwpns = pvm_vfcm.build_wwpn_pair( - self.adapter, self.host_uuid, - pair_count=self._ports_per_fabric()) - - # Derive the virtual to physical port mapping - port_maps = pvm_vfcm.derive_npiv_map( - vios_wraps, self._fabric_ports(fabric), v_wwpns) - - # the fabric is mapped to the physical port) and the fabric - # state. - self._set_fabric_meta(fabric, port_maps) - self._set_fabric_state(fabric, FS_UNMAPPED) - self.instance.save() - elif self._is_migration_wwpn(fc_state): - # The migration process requires the 'second' wwpn from the - # fabric to be used. - port_maps = self._configure_wwpns_for_migration(fabric) - else: - # This specific fabric had been previously set. Just pull - # from the meta (as it is likely already mapped to the - # instance) - port_maps = self._get_fabric_meta(fabric) - - # Every loop through, we reverse the vios wrappers. This is - # done so that if Fabric A only has 1 port, it goes on the - # first VIOS. Then Fabric B would put its port on a different - # VIOS. This servers as a form of multi pathing (so that your - # paths are not restricted to a single VIOS). - vios_wraps.reverse() - - # Port map is set by either conditional, but may be set to None. - # If not None, then add the WWPNs to the response. - if port_maps is not None: - for mapping in port_maps: - # Only add the first WWPN. That is the one that will be - # logged into the fabric. - resp_wwpns.append(mapping[1].split()[0]) - - # The return object needs to be a list for the volume connector. - return resp_wwpns - - def _ensure_phys_ports_for_system(self, npiv_port_maps, vios_wraps, - fabric): - """Ensures that the npiv_port_map is correct for the system. - - Rare scenarios can occur where the physical port on the NPIV port - map does not match the actual port. This is generally caused when the - last volume is removed from the VM, the VM is migrated to another host, - and then a new volume is attached. - - Stale metadata would be there (as it can't be cleaned out) on the - attach. This method clears that up. - - :param npiv_port_maps: The existing port maps. - :param vios_wraps: The Virtual I/O Server wraps. - :param fabric: The name of the fabric - :return: The npiv_port_maps. May be unchanged. - """ - # Check that all physical ports in the mappings belong to 'this' - # set of VIOSs. - if all(pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0] - for pm in npiv_port_maps): - LOG.debug("All physical ports were found on the given Virtual I/O " - "Server(s).", instance=self.instance) - return npiv_port_maps - - # If ANY of the VIOS ports were not there, rebuild the port maps - LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.", - npiv_port_maps, instance=self.instance) - v_wwpns = [] - for port_map in npiv_port_maps: - v_wwpns.extend(port_map[1].split()) - self._set_fabric_state(fabric, FS_UNMAPPED) - - # Derive new maps and don't preserve existing maps - npiv_port_maps = pvm_vfcm.derive_npiv_map( - vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False) - LOG.debug("Rebuilt port maps: %s", npiv_port_maps, - instance=self.instance) - self._set_fabric_meta(fabric, npiv_port_maps) - LOG.warning("Had to update the system metadata for the WWPNs due to " - "incorrect physical WWPNs on fabric %(fabric)s", - {'fabric': fabric}, instance=self.instance) - - return npiv_port_maps - - def _add_maps_for_fabric(self, fabric, slot_mgr): - """Adds the vFC storage mappings to the VM for a given fabric. - - :param fabric: The fabric to add the mappings to. - :param slot_mgr: A NovaSlotManager. Used to store/retrieve the client - slots used when a volume is attached to the VM - """ - vios_wraps = self.stg_ftsk.feed - # Ensure the physical ports in the metadata are not for a different - # host (stale). If so, rebuild the maps with current info. - npiv_port_maps = self._ensure_phys_ports_for_system( - self._get_fabric_meta(fabric), vios_wraps, fabric) - volume_id = self.connection_info['serial'] - - # This loop adds the maps from the appropriate VIOS to the client VM - slot_ids = copy.deepcopy(slot_mgr.build_map.get_vfc_slots( - fabric, len(npiv_port_maps))) - for npiv_port_map in npiv_port_maps: - vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map) - if vios_w is None: - LOG.error("Mappings were not able to find a proper VIOS. " - "The port mappings were %s.", npiv_port_maps, - instance=self.instance) - raise exc.VolumeAttachFailed( - volume_id=volume_id, instance_name=self.instance.name, - reason=_("Unable to find a Virtual I/O Server that " - "hosts the NPIV port map for the server.")) - ls = [LOG.info, "Adding NPIV mapping for instance %(inst)s " - "for Virtual I/O Server %(vios)s.", - {'inst': self.instance.name, 'vios': vios_w.name}] - - # Add the subtask to add the specific map. - slot_num = slot_ids.pop() - self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask( - pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map, - lpar_slot_num=slot_num, logspec=ls) - - # Store the client slot number for the NPIV mapping (for rebuild - # scenarios) - def set_vol_meta(): - vios_wraps = self.stg_ftsk.feed - port_maps = self._get_fabric_meta(fabric) - for port_map in port_maps: - # The port map is [ 'phys_wwpn', 'client_wwpn1 client_wwpn2' ] - # We only need one of the two client wwpns. - vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, port_map) - c_wwpns = port_map[1].split() - vfc_mapping = pvm_c_stor.c_wwpn_to_vfc_mapping(vios_w, - c_wwpns[0]) - - # If there is no mapping, then don't add it. It means that - # the client WWPN is hosted on a different VIOS. - if vfc_mapping is None: - continue - - # However, by this point we know that it is hosted on this - # VIOS. So the vfc_mapping will have the client adapter - slot_mgr.register_vfc_mapping(vfc_mapping, fabric) - - self.stg_ftsk.add_post_execute(task.FunctorTask( - set_vol_meta, name='fab_slot_%s_%s' % (fabric, volume_id))) - - # After all the mappings, make sure the fabric state is updated. - def set_state(): - self._set_fabric_state(fabric, FS_INST_MAPPED) - self.stg_ftsk.add_post_execute(task.FunctorTask( - set_state, name='fab_%s_%s' % (fabric, volume_id))) - - def _remove_maps_for_fabric(self, fabric): - """Removes the vFC storage mappings from the VM for a given fabric. - - :param fabric: The fabric to remove the mappings from. - """ - npiv_port_maps = self._get_fabric_meta(fabric) - if not npiv_port_maps: - # If no mappings exist, exit out of the method. - return - - vios_wraps = self.stg_ftsk.feed - - for npiv_port_map in npiv_port_maps: - ls = [LOG.info, "Removing a NPIV mapping for instance " - "%(inst)s for fabric %(fabric)s.", - {'inst': self.instance.name, 'fabric': fabric}] - vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map) - - if vios_w is not None: - # Add the subtask to remove the specific map - task_wrapper = self.stg_ftsk.wrapper_tasks[vios_w.uuid] - task_wrapper.add_functor_subtask( - pvm_vfcm.remove_maps, self.vm_uuid, - port_map=npiv_port_map, logspec=ls) - else: - LOG.warning("No storage connections found between the Virtual " - "I/O Servers and FC Fabric %(fabric)s.", - {'fabric': fabric}, instance=self.instance) - - def _set_fabric_state(self, fabric, state): - """Sets the fabric state into the instance's system metadata. - - :param fabric: The name of the fabric - :param state: state of the fabric which needs to be set - - Possible Valid States: - FS_UNMAPPED: Initial state unmapped. - FS_INST_MAPPED: Fabric is mapped with the nova instance. - """ - meta_key = self._sys_fabric_state_key(fabric) - LOG.info("Setting Fabric state=%(st)s.", - {'st': state}, instance=self.instance) - self.instance.system_metadata[meta_key] = state - - def _get_fabric_state(self, fabric): - """Gets the fabric state from the instance's system metadata. - - :param fabric: The name of the fabric - :return: The state of the fabric which needs to be set - - Possible Valid States: - FS_UNMAPPED: Initial state unmapped. - FS_INST_MAPPED: Fabric is mapped with the nova instance. - """ - meta_key = self._sys_fabric_state_key(fabric) - if self.instance.system_metadata.get(meta_key) is None: - self.instance.system_metadata[meta_key] = FS_UNMAPPED - - return self.instance.system_metadata[meta_key] - - def _sys_fabric_state_key(self, fabric): - """Returns the nova system metadata key for a given fabric.""" - return FABRIC_STATE_METADATA_KEY + '_' + fabric - - def _set_fabric_meta(self, fabric, port_map): - """Sets the port map into the instance's system metadata. - - The system metadata will store per-fabric port maps that link the - physical ports to the virtual ports. This is needed for the async - nature between the wwpns call (get_volume_connector) and the - connect_volume (spawn). - - :param fabric: The name of the fabric. - :param port_map: The port map (as defined via the derive_npiv_map - pypowervm method). - """ - - # We will store the metadata in comma-separated strings with up to 4 - # three-token pairs. Each set of three comprises the Physical Port - # WWPN followed by the two Virtual Port WWPNs: - # Ex: - # npiv_wwpn_adpt_A: - # "p_wwpn1,v_wwpn1,v_wwpn2,p_wwpn2,v_wwpn3,v_wwpn4,..." - # npiv_wwpn_adpt_A_2: - # "p_wwpn5,v_wwpn9,vwwpn_10,p_wwpn6,..." - - meta_elems = [] - for p_wwpn, v_wwpn in port_map: - meta_elems.append(p_wwpn) - meta_elems.extend(v_wwpn.split()) - - LOG.info("Fabric %(fabric)s wwpn metadata will be set to %(meta)s.", - {'fabric': fabric, 'meta': ",".join(meta_elems)}, - instance=self.instance) - - # Clear out the original metadata. We may be reducing the number of - # keys (ex. reschedule) so we need to just delete what we had before - # we add something new. - meta_key_root = self._sys_meta_fabric_key(fabric) - for key in tuple(self.instance.system_metadata.keys()): - if key.startswith(meta_key_root): - del self.instance.system_metadata[key] - - # Build up the mapping for the new keys. - fabric_id_iter = 1 - meta_key = meta_key_root - key_len = len(meta_key) - - for key in range(self._get_num_keys(port_map)): - start_elem = 12 * (fabric_id_iter - 1) - meta_value = ",".join(meta_elems[start_elem:start_elem + 12]) - self.instance.system_metadata[meta_key] = meta_value - # If this is not the first time through, replace the end else cat - if fabric_id_iter > 1: - fabric_id_iter += 1 - meta_key = meta_key.replace(meta_key[key_len:], - "_%s" % fabric_id_iter) - else: - fabric_id_iter += 1 - meta_key = meta_key + "_%s" % fabric_id_iter - - def _get_fabric_meta(self, fabric): - """Gets the port map from the instance's system metadata. - - See _set_fabric_meta. - - :param fabric: The name of the fabric. - :return: The port map (as defined via the derive_npiv_map pypowervm - method. - """ - meta_key = self._sys_meta_fabric_key(fabric) - - if self.instance.system_metadata.get(meta_key) is None: - # If no mappings exist, log a warning. - LOG.warning("No NPIV mappings exist for instance on fabric " - "%(fabric)s. May not have connected to the fabric " - "yet or fabric configuration was recently modified.", - {'fabric': fabric}, instance=self.instance) - return [] - - wwpns = self.instance.system_metadata[meta_key] - key_len = len(meta_key) - iterator = 2 - meta_key = meta_key + "_" + str(iterator) - while self.instance.system_metadata.get(meta_key) is not None: - meta_value = self.instance.system_metadata[meta_key] - wwpns += "," + meta_value - iterator += 1 - meta_key = meta_key.replace(meta_key[key_len:], - "_" + str(iterator)) - - wwpns = wwpns.split(",") - - # Rebuild the WWPNs into the natural structure. - return [(p, ' '.join([v1, v2])) for p, v1, v2 - in zip(wwpns[::3], wwpns[1::3], wwpns[2::3])] - - def _sys_meta_fabric_key(self, fabric): - """Returns the nova system metadata key for a given fabric.""" - return WWPN_SYSTEM_METADATA_KEY + '_' + fabric - - def _fabric_names(self): - """Returns a list of the fabric names.""" - return pvm_cfg.NPIV_FABRIC_WWPNS.keys() - - def _fabric_ports(self, fabric_name): - """Returns a list of WWPNs for the fabric's physical ports.""" - return pvm_cfg.NPIV_FABRIC_WWPNS[fabric_name] - - def _ports_per_fabric(self): - """Returns the number of virtual ports to be used per fabric.""" - return CONF.powervm.ports_per_fabric - - def _get_num_keys(self, port_map): - """Returns the number of keys we need to generate""" - # Keys will have up to 4 mapping pairs so we determine based on that - if len(port_map) % 4 > 0: - return int(len(port_map) / 4 + 1) - else: - return int(len(port_map) / 4) diff --git a/nova_powervm/virt/powervm/volume/rbd.py b/nova_powervm/virt/powervm/volume/rbd.py deleted file mode 100644 index 8d8eec42..00000000 --- a/nova_powervm/virt/powervm/volume/rbd.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2017 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six -from taskflow import task - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm import vm -from nova_powervm.virt.powervm.volume import driver as v_driver -from oslo_log import log as logging -from pypowervm import const as pvm_const -from pypowervm.tasks import client_storage as pvm_c_stor -from pypowervm.tasks import hdisk -from pypowervm.tasks import partition -from pypowervm.tasks import scsi_mapper as tsk_map -from pypowervm.wrappers import storage as pvm_stg - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class RBDVolumeAdapter(v_driver.PowerVMVolumeAdapter): - """Base class for connecting ceph based Cinder Volumes to PowerVM VMs.""" - - def __init__(self, adapter, host_uuid, instance, connection_info, - stg_ftsk=None): - super(RBDVolumeAdapter, self).__init__( - adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk) - self._nl_vios_ids = None - - @classmethod - def min_xags(cls): - return [pvm_const.XAG.VIO_SMAP] - - @classmethod - def vol_type(cls): - """The type of volume supported by this type.""" - return 'rbd' - - @property - def vios_uuids(self): - """List the UUIDs of the Virtual I/O Servers hosting the storage.""" - # Get the hosting UUID - if self._nl_vios_ids is None: - nl_vios_wrap = partition.get_mgmt_partition(self.adapter) - self._nl_vios_ids = [nl_vios_wrap.uuid] - return self._nl_vios_ids - - def pre_live_migration_on_destination(self, mig_data): - """Perform pre live migration steps for the volume on the target host. - - This method performs any pre live migration that is needed. - - This method will be called after the pre_live_migration_on_source - method. The data from the pre_live call will be passed in via the - mig_data. This method should put its output into the dest_mig_data. - - :param mig_data: Dict of migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. - """ - for vios_uuid in self.vios_uuids: - exists, name = self.is_volume_on_vios(vios_uuid) - if exists and name is not None: - return - name = self.connection_info["data"]["name"] - LOG.warning("RBD %s not found", name, instance=self.instance) - raise p_exc.VolumePreMigrationFailed( - volume_id=self.volume_id, instance_name=self.instance.name) - - def _connect_volume(self, slot_mgr): - name = self.connection_info["data"]["name"] - volid = self.connection_info["data"]["volume_id"] - user = CONF.powervm.rbd_user - rbd = pvm_stg.RBD.bld_ref(self.adapter, name, tag=volid, user=user) - - def add_func(vios_w): - # If the vios doesn't match, just return - if vios_w.uuid not in self.vios_uuids: - return None - - LOG.info("Adding rbd disk connection to VIOS %(vios)s.", - {'vios': vios_w.name}, instance=self.instance) - slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, name) - if slot_mgr.is_rebuild and not slot: - LOG.debug('Detected a device with path %(path)s on VIOS ' - '%(vios)s on the rebuild that did not exist on the ' - 'source. Ignoring.', - {'path': name, 'vios': vios_w.uuid}, - instance=self.instance) - return None - - mapping = tsk_map.build_vscsi_mapping( - self.host_uuid, vios_w, self.vm_uuid, rbd, lpar_slot_num=slot, - lua=lua) - return tsk_map.add_map(vios_w, mapping) - - self.stg_ftsk.add_functor_subtask(add_func) - - # Run after all the deferred tasks the query to save the slots in the - # slot map. - def set_slot_info(): - vios_wraps = self.stg_ftsk.feed - partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) - for vios_w in vios_wraps: - scsi_map = pvm_c_stor.udid_to_scsi_mapping( - vios_w, name, partition_id) - if not scsi_map: - continue - slot_mgr.register_vscsi_mapping(scsi_map) - - self.stg_ftsk.add_post_execute(task.FunctorTask( - set_slot_info, name='rbd_slot_%s' % name)) - - def _disconnect_volume(self, slot_mgr): - # Build the match function - name = self.connection_info["data"]["name"] - match_func = tsk_map.gen_match_func(pvm_stg.VDisk, - names=[name]) - - # Make sure the remove function will run within the transaction manager - def rm_func(vios_w): - # If the vios doesn't match, just return - if vios_w.uuid not in self.vios_uuids: - return None - - LOG.info("Disconnecting instance %(inst)s from storage " - "disks.", {'inst': self.instance.name}, - instance=self.instance) - removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid, - match_func=match_func) - for rm_map in removed_maps: - slot_mgr.drop_vscsi_mapping(rm_map) - return removed_maps - - self.stg_ftsk.add_functor_subtask(rm_func) - # Find the disk directly. - vios_w = self.stg_ftsk.wrapper_tasks[self.vios_uuids[0]].wrapper - mappings = tsk_map.find_maps(vios_w.scsi_mappings, - client_lpar_id=self.vm_uuid, - match_func=match_func) - - return [x.backing_storage for x in mappings] - - def is_volume_on_vios(self, vio): - """Returns whether or not the volume file is on a VIOS. - - This method is used during live-migration and rebuild to - check if the volume is available on the target host. - - :param vio: The Virtual I/O Server wrapper or UDID string of the VIOS. - :return: True if the file is on the VIOS. False - otherwise. - :return: The file path. - """ - try: - vio_uuid = vio.uuid - except AttributeError: - vio_uuid = vio - if vio_uuid not in self.vios_uuids: - return False, None - name = self.connection_info["data"]["name"] - exists = hdisk.rbd_exists(self.adapter, vio_uuid, name) - return exists, name if exists else None diff --git a/nova_powervm/virt/powervm/volume/volume.py b/nova_powervm/virt/powervm/volume/volume.py deleted file mode 100644 index 01df664f..00000000 --- a/nova_powervm/virt/powervm/volume/volume.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_log import log as logging -from taskflow import task - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm.i18n import _ -from nova_powervm.virt.powervm import vm - -from pypowervm import const as pvm_const -from pypowervm.tasks import client_storage as pvm_c_stor -from pypowervm.tasks import hdisk -from pypowervm.tasks import scsi_mapper as tsk_map -from pypowervm.utils import transaction as tx -from pypowervm.wrappers import storage as pvm_stor -from pypowervm.wrappers import virtual_io_server as pvm_vios - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -UDID_KEY = 'target_UDID' -DEVNAME_KEY = 'target_devname' - - -class VscsiVolumeAdapter(object): - """VscsiVolumeAdapter that connects a Cinder volume to a VM. - - This volume adapter is a generic adapter for volume types that use PowerVM - vSCSI to host the volume to the VM. - """ - - def _connect_volume(self, slot_mgr): - """Connects the volume. - - :param connect_volume_to_vio: Function to connect a volume to the vio. - :param vios_w: Vios wrapper. - :return: True if mapping was created. - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM - """ - - # Its about to get weird. The transaction manager has a list of - # VIOSes. We could use those, but they only have SCSI mappings (by - # design). They do not have storage (super expensive). - # - # We need the storage xag when we are determining which mappings to - # add to the system. But we don't want to tie it to the stg_ftsk. If - # we do, every retry, every etag gather, etc... takes MUCH longer. - # - # So we get the VIOSes with the storage xag here, separately, to save - # the stg_ftsk from potentially having to run it multiple times. - connect_ftsk = tx.FeedTask( - 'connect_volume_to_vio', pvm_vios.VIOS.getter( - self.adapter, xag=[pvm_const.XAG.VIO_STOR, - pvm_const.XAG.VIO_SMAP])) - - # Find valid hdisks and map to VM. - connect_ftsk.add_functor_subtask( - self._connect_volume_to_vio, slot_mgr, provides='vio_modified', - flag_update=False) - - ret = connect_ftsk.execute() - - # Check the number of VIOSes - vioses_modified = 0 - for result in ret['wrapper_task_rets'].values(): - if result['vio_modified']: - vioses_modified += 1 - - partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) - - # Update the slot information - def set_slot_info(): - vios_wraps = self.stg_ftsk.feed - for vios_w in vios_wraps: - scsi_map = pvm_c_stor.udid_to_scsi_mapping( - vios_w, self._get_udid(), partition_id) - if not scsi_map: - continue - slot_mgr.register_vscsi_mapping(scsi_map) - - self._validate_vios_on_connection(vioses_modified) - self.stg_ftsk.add_post_execute(task.FunctorTask( - set_slot_info, name='hdisk_slot_%s' % self._get_udid())) - - def _validate_vios_on_connection(self, num_vioses_found): - """Validates that the correct number of VIOSes were discovered. - - Certain environments may have redundancy requirements. For PowerVM - this is achieved by having multiple Virtual I/O Servers. This method - will check to ensure that the operator's requirements for redundancy - have been met. If not, a specific error message will be raised. - - :param num_vioses_found: The number of VIOSes the hdisk was found on. - """ - # Is valid as long as the vios count exceeds the conf value. - if num_vioses_found >= CONF.powervm.vscsi_vios_connections_required: - return - - # Should have a custom message based on zero or 'some but not enough' - # I/O Servers. - if num_vioses_found == 0: - msg = (_('Failed to discover valid hdisk on any Virtual I/O ' - 'Server for volume %(volume_id)s.') % - {'volume_id': self.volume_id}) - else: - msg = (_('Failed to discover the hdisk on the required number of ' - 'Virtual I/O Servers. Volume %(volume_id)s required ' - '%(vios_req)d Virtual I/O Servers, but the disk was only ' - 'found on %(vios_act)d Virtual I/O Servers.') % - {'volume_id': self.volume_id, 'vios_act': num_vioses_found, - 'vios_req': CONF.powervm.vscsi_vios_connections_required}) - ex_args = {'volume_id': self.volume_id, 'reason': msg, - 'instance_name': self.instance.name} - raise p_exc.VolumeAttachFailed(**ex_args) - - def _add_append_mapping(self, vios_uuid, device_name, lpar_slot_num=None, - lua=None, target_name=None, udid=None, - tag=None): - """Update the stg_ftsk to append the mapping to the VIOS. - - :param vios_uuid: The UUID of the vios for the pypowervm adapter. - :param device_name: The hdisk device name. - :param lpar_slot_num: (Optional, Default:None) If specified, the client - lpar slot number to use on the mapping. If left - as None, it will use the next available slot - number. - :param lua: (Optional. Default: None) Logical Unit Address to set on - the TargetDevice. If None, the LUA will be assigned by the - server. Should be specified for all of the VSCSIMappings - for a particular bus, or none of them. - :param target_name: (Optional. Default: None) Name to set on the - TargetDevice. If None, it will be assigned by the - server. - :param udid: (Optional. Default: None) Universal Disk IDentifier of - the physical volume to attach. Used to resolve name - conflicts. - :param tag: String tag to set on the physical volume. - """ - def add_func(vios_w): - LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s on " - "vios %(vios)s.", - {'dev': device_name, 'vios': vios_w.name}, - instance=self.instance) - pv = pvm_stor.PV.bld(self.adapter, device_name, udid=udid, - tag=tag) - v_map = tsk_map.build_vscsi_mapping( - self.host_uuid, vios_w, self.vm_uuid, pv, - lpar_slot_num=lpar_slot_num, lua=lua, target_name=target_name) - return tsk_map.add_map(vios_w, v_map) - self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func) - - def _get_udid(self): - """This method will return the hdisk udid stored in connection_info. - - :return: The target_udid associated with the hdisk - """ - try: - return self.connection_info['data'][UDID_KEY] - except (KeyError, ValueError): - # It's common to lose our specific data in the BDM. The connection - # information can be 'refreshed' by operations like LPM and resize - LOG.info('Failed to retrieve target_UDID key from BDM for volume ' - 'id %s', self.volume_id, instance=self.instance) - return None - - def _set_udid(self, udid): - """This method will set the hdisk udid in the connection_info. - - :param udid: The hdisk target_udid to be stored in system_metadata - """ - self.connection_info['data'][UDID_KEY] = udid - - def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name, slot_mgr): - """Adds a subtask to remove the storage mapping. - - :param vm_uuid: The UUID of the VM instance - :param vios_uuid: The UUID of the vios for the pypowervm adapter. - :param device_name: The The hdisk device name. - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM. - """ - def rm_func(vios_w): - LOG.info("Removing vSCSI mapping from physical volume %(dev)s " - "on vios %(vios)s", - {'dev': device_name, 'vios': vios_w.name}, - instance=self.instance) - removed_maps = tsk_map.remove_maps( - vios_w, vm_uuid, - tsk_map.gen_match_func(pvm_stor.PV, names=[device_name])) - for rm_map in removed_maps: - slot_mgr.drop_vscsi_mapping(rm_map) - return removed_maps - self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func) - - def _add_remove_hdisk(self, vio_wrap, device_name, - stg_ftsk=None): - """Adds a post-mapping task to remove the hdisk from the VIOS. - - This removal is only done after the mapping updates have completed. - This method is also used during migration to remove hdisks that remain - on the source host after the VM is migrated to the destination. - - :param vio_wrap: The Virtual I/O Server wrapper to remove the disk - from. - :param device_name: The hdisk name to remove. - :param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk - """ - def rm_hdisk(): - LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server " - "%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name}, - instance=self.instance) - try: - # Attempt to remove the hDisk - hdisk.remove_hdisk(self.adapter, CONF.host, device_name, - vio_wrap.uuid) - except Exception: - # If there is a failure, log it, but don't stop the process - LOG.exception("There was an error removing the hdisk " - "%(disk)s from Virtual I/O Server %(vios)s.", - {'disk': device_name, 'vios': vio_wrap.name}, - instance=self.instance) - - # Check if there are not multiple mapping for the device - if not self._check_host_mappings(vio_wrap, device_name): - name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name) - stg_ftsk = stg_ftsk or self.stg_ftsk - stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name)) - else: - LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server " - "%(vios)s because it has existing storage mappings", - {'disk': device_name, 'vios': vio_wrap.name}, - instance=self.instance) - - def _check_host_mappings(self, vios_wrap, device_name): - """Checks if the given hdisk has multiple mappings - - :param vio_wrap: The Virtual I/O Server wrapper to remove the disk - from. - :param device_name: The hdisk name to remove. - - :return: True if there are multiple instances using the given hdisk - """ - vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed - if v.uuid == vios_wrap.uuid) - mappings = tsk_map.find_maps( - vios_scsi_mappings, None, - tsk_map.gen_match_func(pvm_stor.PV, names=[device_name])) - - LOG.debug("%(num)d storage mapping(s) found for %(dev)s on VIOS " - "%(vios)s", {'num': len(mappings), 'dev': device_name, - 'vios': vios_wrap.name}, instance=self.instance) - # the mapping is still present as the task feed removes it later - return len(mappings) > 1 - - def _cleanup_volume(self, udid=None, devname=None): - """Cleanup the hdisk associated with this udid.""" - - if not udid and not devname: - LOG.warning('Could not remove hdisk for volume %s', self.volume_id, - instance=self.instance) - return - - LOG.info('Removing hdisk for udid: %s', udid, instance=self.instance) - - def find_hdisk_to_remove(vios_w): - if devname is None: - device_name = vios_w.hdisk_from_uuid(udid) - else: - device_name = devname - if device_name is None: - return - LOG.info('Adding deferred task to remove %(hdisk)s from VIOS ' - '%(vios)s.', {'hdisk': device_name, 'vios': vios_w.name}, - instance=self.instance) - self._add_remove_hdisk(vios_w, device_name, - stg_ftsk=rmv_hdisk_ftsk) - - # Create a feed task to get the vios, find the hdisk and remove it. - rmv_hdisk_ftsk = tx.FeedTask( - 'find_hdisk_to_remove', pvm_vios.VIOS.getter( - self.adapter, xag=[pvm_const.XAG.VIO_STOR])) - # Find vios hdisks for this udid to remove. - rmv_hdisk_ftsk.add_functor_subtask( - find_hdisk_to_remove, flag_update=False) - rmv_hdisk_ftsk.execute() diff --git a/nova_powervm/virt/powervm/volume/vscsi.py b/nova_powervm/virt/powervm/volume/vscsi.py deleted file mode 100644 index 5ad24ad9..00000000 --- a/nova_powervm/virt/powervm/volume/vscsi.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright IBM Corp. and contributors -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_concurrency import lockutils -from oslo_log import log as logging - -from nova_powervm import conf as cfg -from nova_powervm.virt.powervm import exception as p_exc -from nova_powervm.virt.powervm import vm -from nova_powervm.virt.powervm.volume import driver as v_driver -from nova_powervm.virt.powervm.volume import volume - -from pypowervm import const as pvm_const -from pypowervm.tasks import hdisk -from pypowervm.tasks import partition as pvm_tpar -from pypowervm.utils import transaction as tx -from pypowervm.wrappers import virtual_io_server as pvm_vios - -import base64 -import six - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -UDID_KEY = 'target_UDID' - -# A global variable that will cache the physical WWPNs on the system. -_vscsi_pfc_wwpns = None - - -class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter, - v_driver.FibreChannelVolumeAdapter): - """The vSCSI implementation of the Volume Adapter. - - For physical volumes, hosted to the VIOS through Fibre Channel, that - connect to the VMs with vSCSI. - - vSCSI is the internal mechanism to link a given hdisk on the Virtual - I/O Server to a Virtual Machine. This volume driver will take the - information from the driver and link it to a given virtual machine. - """ - - def __init__(self, adapter, host_uuid, instance, connection_info, - stg_ftsk=None): - """Initializes the vSCSI Volume Adapter. - - :param adapter: The pypowervm adapter. - :param host_uuid: The pypowervm UUID of the host. - :param instance: The nova instance that the volume should connect to. - :param connection_info: Comes from the BDM. - :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the - I/O Operations. If provided, the Virtual I/O Server - mapping updates will be added to the FeedTask. This - defers the updates to some later point in time. If - the FeedTask is not provided, the updates will be run - immediately when the respective method is executed. - """ - super(PVVscsiFCVolumeAdapter, self).__init__( - adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk) - self._pfc_wwpns = None - - @classmethod - def min_xags(cls): - """List of pypowervm XAGs needed to support this adapter.""" - # SCSI mapping is for the connections between VIOS and client VM - return [pvm_const.XAG.VIO_SMAP] - - @classmethod - def vol_type(cls): - """The type of volume supported by this type.""" - return 'vscsi' - - def pre_live_migration_on_destination(self, mig_data): - """Perform pre live migration steps for the volume on the target host. - - This method performs any pre live migration that is needed. - - Certain volume connectors may need to pass data from the source host - to the target. This may be required to determine how volumes connect - through the Virtual I/O Servers. - - This method will be called after the pre_live_migration_on_source - method. The data from the pre_live call will be passed in via the - mig_data. This method should put its output into the dest_mig_data. - - :param mig_data: Dict of migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. - """ - volume_id = self.volume_id - found = False - - # See the connect_volume for why this is a direct call instead of - # using the tx_mgr.feed - vios_wraps = pvm_vios.VIOS.get(self.adapter, - xag=[pvm_const.XAG.VIO_STOR]) - - # Iterate through host vios list to find valid hdisks. - for vios_w in vios_wraps: - status, device_name, udid = self._discover_volume_on_vios( - vios_w, volume_id) - # If we found one, no need to check the others. - found = found or hdisk.good_discovery(status, device_name) - # if valid udid is returned save in mig_data - volume_key = 'vscsi-' + volume_id - if udid is not None: - mig_data[volume_key] = udid - - if not found or volume_key not in mig_data: - ex_args = dict(volume_id=volume_id, - instance_name=self.instance.name) - raise p_exc.VolumePreMigrationFailed(**ex_args) - - def post_live_migration_at_source(self, migrate_data): - """Performs post live migration for the volume on the source host. - - This method can be used to handle any steps that need to taken on - the source host after the VM is on the destination. - - :param migrate_data: volume migration data - """ - # Get the udid of the volume to remove the hdisk for. We can't - # use the connection information because LPM 'refreshes' it, which - # wipes out our data, so we use the data from the destination host - # to avoid having to discover the hdisk to get the udid. - udid = migrate_data.get('vscsi-' + self.volume_id) - self._cleanup_volume(udid) - - def cleanup_volume_at_destination(self, migrate_data): - """Performs volume cleanup after LPM failure on the dest host. - - This method can be used to handle any steps that need to taken on - the destination host after the migration has failed. - - :param migrate_data: migration data - """ - udid = migrate_data.get('vscsi-' + self.volume_id) - self._cleanup_volume(udid) - - def is_volume_on_vios(self, vios_w): - """Returns whether or not the volume is on a VIOS. - - :param vios_w: The Virtual I/O Server wrapper. - :return: True if the volume driver's volume is on the VIOS. False - otherwise. - :return: The udid of the device. - """ - status, device_name, udid = self._discover_volume_on_vios( - vios_w, self.volume_id) - return hdisk.good_discovery(status, device_name), udid - - def extend_volume(self): - # The compute node does not need to take any additional steps for the - # client to see the extended volume. - pass - - def _discover_volume_on_vios(self, vios_w, volume_id): - """Discovers an hdisk on a single vios for the volume. - - :param vios_w: VIOS wrapper to process - :param volume_id: Volume to discover - :returns: Status of the volume or None - :returns: Device name or None - :returns: UDID or None - """ - # Get the initiatior WWPNs, targets and Lun for the given VIOS. - vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w) - - # Build the ITL map and discover the hdisks on the Virtual I/O - # Server (if any). - itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun) - if len(itls) == 0: - LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.', - {'vios': vios_w.name, 'volume_id': volume_id}, - instance=self.instance) - return None, None, None - - device_id = self.connection_info.get('data', {}).get('pg83NAA') - if device_id: - device_id = base64.b64encode(device_id.encode()) - - status, device_name, udid = hdisk.discover_hdisk(self.adapter, - vios_w.uuid, itls, - device_id=device_id) - - if hdisk.good_discovery(status, device_name): - LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume ' - '%(volume_id)s. Status code: %(status)s.', - {'hdisk': device_name, 'vios': vios_w.name, - 'volume_id': volume_id, 'status': status}, - instance=self.instance) - elif status == hdisk.LUAStatus.DEVICE_IN_USE: - LOG.warning('Discovered device %(dev)s for volume %(volume)s ' - 'on %(vios)s is in use. Error code: %(status)s.', - {'dev': device_name, 'volume': volume_id, - 'vios': vios_w.name, 'status': status}, - instance=self.instance) - - return status, device_name, udid - - def _connect_volume_to_vio(self, vios_w, slot_mgr): - """Attempts to connect a volume to a given VIO. - - :param vios_w: The Virtual I/O Server wrapper to connect to. - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM - - :return: True if the volume was connected. False if the volume was - not (could be the Virtual I/O Server does not have - connectivity to the hdisk). - """ - status, device_name, udid = self._discover_volume_on_vios( - vios_w, self.volume_id) - - # Get the slot and LUA to assign. - slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid) - - if slot_mgr.is_rebuild and not slot: - LOG.debug('Detected a device with UDID %(udid)s on VIOS ' - '%(vios)s on the rebuild that did not exist on the ' - 'source. Ignoring.', {'udid': udid, 'vios': vios_w.uuid}, - instance=self.instance) - return False - - if hdisk.good_discovery(status, device_name): - volume_id = self.connection_info["data"]["volume_id"] - # Found a hdisk on this Virtual I/O Server. Add the action to - # map it to the VM when the stg_ftsk is executed. - with lockutils.lock(hash(self)): - self._add_append_mapping(vios_w.uuid, device_name, - lpar_slot_num=slot, lua=lua, - tag=volume_id) - - # Save the UDID for the disk in the connection info. It is - # used for the detach. - self._set_udid(udid) - LOG.debug('Added deferred task to attach device %(device_name)s ' - 'to vios %(vios_name)s.', - {'device_name': device_name, 'vios_name': vios_w.name}, - instance=self.instance) - - # Valid attachment - return True - - return False - - def _disconnect_volume(self, slot_mgr): - """Disconnect the volume. - - :param slot_mgr: A NovaSlotManager. Used to delete the client slots - used when a volume is detached from the VM - """ - def discon_vol_for_vio(vios_w): - """Removes the volume from a specific Virtual I/O Server. - - :param vios_w: The VIOS wrapper. - :return: True if a remove action was done against this VIOS. False - otherwise. - """ - LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s", - dict(vol=self.volume_id, uuid=vios_w.uuid), - instance=self.instance) - device_name = None - udid = self._get_udid() - try: - if udid: - # This will only work if vios_w has the Storage XAG. - device_name = vios_w.hdisk_from_uuid(udid) - - if not udid or not device_name: - # We lost our bdm data. We'll need to discover it. - status, device_name, udid = self._discover_volume_on_vios( - vios_w, self.volume_id) - - # Check if the hdisk is in a bad state in the I/O Server. - # Subsequent scrub code on future deploys will clean it up. - if not hdisk.good_discovery(status, device_name): - LOG.warning( - "Disconnect Volume: The backing hdisk for volume " - "%(volume_id)s on Virtual I/O Server %(vios)s is " - "not in a valid state. This may be the result of " - "an evacuate.", - {'volume_id': self.volume_id, 'vios': vios_w.name}, - instance=self.instance) - return False - - except Exception: - LOG.exception( - "Disconnect Volume: Failed to find disk on Virtual I/O " - "Server %(vios_name)s for volume %(volume_id)s. Volume " - "UDID: %(volume_uid)s.", - {'vios_name': vios_w.name, 'volume_id': self.volume_id, - 'volume_uid': udid}, instance=self.instance) - return False - - # We have found the device name - LOG.info("Disconnect Volume: Discovered the device %(hdisk)s " - "on Virtual I/O Server %(vios_name)s for volume " - "%(volume_id)s. Volume UDID: %(volume_uid)s.", - {'volume_uid': udid, 'volume_id': self.volume_id, - 'vios_name': vios_w.name, 'hdisk': device_name}, - instance=self.instance) - - # Add the action to remove the mapping when the stg_ftsk is run. - partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) - - with lockutils.lock(hash(self)): - self._add_remove_mapping(partition_id, vios_w.uuid, - device_name, slot_mgr) - - # Add a step to also remove the hdisk - self._add_remove_hdisk(vios_w, device_name) - - # Found a valid element to remove - return True - - try: - # See logic in _connect_volume for why this new FeedTask is here. - discon_ftsk = tx.FeedTask( - 'discon_volume_from_vio', pvm_vios.VIOS.getter( - self.adapter, xag=[pvm_const.XAG.VIO_STOR])) - # Find hdisks to disconnect - discon_ftsk.add_functor_subtask( - discon_vol_for_vio, provides='vio_modified', flag_update=False) - ret = discon_ftsk.execute() - - # Warn if no hdisks disconnected. - if not any([result['vio_modified'] - for result in ret['wrapper_task_rets'].values()]): - LOG.warning("Disconnect Volume: Failed to disconnect the " - "volume %(volume_id)s on ANY of the Virtual " - "I/O Servers.", {'volume_id': self.volume_id}, - instance=self.instance) - - except Exception as e: - LOG.exception('PowerVM error detaching volume from virtual ' - 'machine.', instance=self.instance) - ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e), - 'instance_name': self.instance.name} - raise p_exc.VolumeDetachFailed(**ex_args) - - @lockutils.synchronized('vscsi_wwpns') - def wwpns(self): - """Builds the WWPNs of the adapters that will connect the ports. - - :return: The list of WWPNs that need to be included in the zone set. - """ - # Use a global variable so this is pulled once when the process starts. - global _vscsi_pfc_wwpns - if _vscsi_pfc_wwpns is None: - _vscsi_pfc_wwpns = pvm_tpar.get_physical_wwpns(self.adapter) - return _vscsi_pfc_wwpns - - def _get_hdisk_itls(self, vios_w): - """Returns the mapped ITLs for the hdisk for the given VIOS. - - A PowerVM system may have multiple Virtual I/O Servers to virtualize - the I/O to the virtual machines. Each Virtual I/O server may have their - own set of initiator WWPNs, target WWPNs and Lun on which hdisk is - mapped. It will determine and return the ITLs for the given VIOS. - - :param vios_w: A virtual I/O Server wrapper. - :return: List of the i_wwpns that are part of the vios_w, - :return: List of the t_wwpns that are part of the vios_w, - :return: Target lun id of the hdisk for the vios_w. - """ - it_map = self.connection_info['data']['initiator_target_map'] - i_wwpns = it_map.keys() - - active_wwpns = vios_w.get_active_pfc_wwpns() - vio_wwpns = [x for x in i_wwpns if x in active_wwpns] - - t_wwpns = [] - for it_key in vio_wwpns: - t_wwpns.extend(it_map[it_key]) - lun = self.connection_info['data']['target_lun'] - - return vio_wwpns, t_wwpns, lun diff --git a/openstack-common.conf b/openstack-common.conf deleted file mode 100644 index 6d9e052b..00000000 --- a/openstack-common.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] - -# The base module to hold the copy of openstack.common -base=nova-powervm diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1f490be5..00000000 --- a/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -Babel!=2.4.0,>=2.3.4 # BSD -six>=1.10.0 # MIT -oslo.config>=6.1.0 # Apache-2.0 -oslo.log>=3.36.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=2.21.1 # Apache-2.0 -oslo.utils>=3.37.0 # Apache-2.0 -os-resource-classes>=0.1.0 # Apache-2.0 -pypowervm>=1.1.23 # Apache-2.0 -sphinx!=1.6.6,!=1.6.7,<2.0.0,>=1.6.2;python_version=='2.7' # BSD -sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD -python-swiftclient>=3.2.0 # Apache-2.0 -taskflow>=2.16.0 # Apache-2.0 -setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=21.0.0 # PSF/ZPL diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 0e5244bd..00000000 --- a/setup.cfg +++ /dev/null @@ -1,46 +0,0 @@ -[metadata] -name = nova_powervm -summary = PowerVM driver for OpenStack Nova. -description-file = README.rst -author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://nova-powervm.readthedocs.io -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.6 - -[files] -packages = - nova_powervm - nova/virt/powervm_ext - -[compile_catalog] -directory = nova_powervm/locale -domain = nova-powervm - -[init_catalog] -domain = nova-powervm -output_dir = nova_powervm/locale -input_file = nova_powervm/locale/nova-powervm.pot - -[update_catalog] -domain = nova-powervm -output_dir = nova_powervm/locale -input_file = nova_powervm/locale/nova-powervm.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = nova_powervm/locale/nova-powervm.pot - -[entry_points] -oslo.config.opts = - nova_powervm = nova_powervm.conf.powervm:list_opts diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d8443..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/sonar-project.properties b/sonar-project.properties deleted file mode 100644 index c5aa1dc8..00000000 --- a/sonar-project.properties +++ /dev/null @@ -1,13 +0,0 @@ -# Required metadata -sonar.projectKey=org.codehaus.sonar:nova-powervm -sonar.projectName=nova-powervm -sonar.projectVersion=1.0 - -# Comma-separated paths to directories with sources (required) -sonar.sources=. - -# Language -sonar.language=py - -# Encoding of the source files -sonar.sourceEncoding=UTF-8 diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 963ddbdb..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -hacking>=1.1.0,<1.2.0 # Apache-2.0 -bashate>=0.5.1 # Apache-2.0 -coverage!=4.4,>=4.0 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -oslotest>=3.2.0 # Apache-2.0 -sphinx!=1.6.6,!=1.6.7,<2.0.0,>=1.6.2;python_version=='2.7' # BSD -sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD -stestr>=1.0.0 # Apache-2.0 -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.2.0 # MIT -mock>=2.0.0 # BSD -pycodestyle>=2.0.0 # MIT License diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 7441bded..00000000 --- a/tox.ini +++ /dev/null @@ -1,99 +0,0 @@ -[tox] -minversion = 3.1.1 -envlist = py36,py27,pep8 -skipsdist = True -# Automatic envs (pyXX) will use the python version appropriate to that -# env and ignore basepython inherited from [testenv]. That's what we -# want, and we don't need to be warned about it. -ignore_basepython_conflict = True - -[testenv] -basepython = python3 -usedevelop = True -install_command = pip install {opts} {packages} -setenv = - VIRTUAL_ENV={envdir} - LANGUAGE=en_US - LC_ALL=en_US.utf-8 - OS_STDOUT_CAPTURE=1 - OS_STDERR_CAPTURE=1 - OS_TEST_TIMEOUT=60 - PYTHONDONTWRITEBYTECODE=1 -deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -egit+https://git.openstack.org/openstack/nova#egg=nova - -rhttps://git.openstack.org/cgit/openstack/nova/plain/test-requirements.txt -whitelist_externals = - bash -commands = - -[testenv:py27] -commands = - {[testenv]commands} - stestr run {posargs} - stestr slowest - -[testenv:py36] -commands = - {[testenv]commands} - stestr run {posargs} - stestr slowest - -[testenv:py37] -commands = - {[testenv]commands} - stestr run {posargs} - stestr slowest - -[testenv:pep8] -commands = flake8 - -[testenv:venv] -commands = {posargs} - -[testenv:cover] -# TODO(stephenfin): Remove the PYTHON hack below in favour of a [coverage] -# section once we rely on coverage 4.3+ -# -# https://bitbucket.org/ned/coveragepy/issues/519/ -setenv = - {[testenv]setenv} - PYTHON=coverage run --source nova,nova_powervm --parallel-mode -commands = - {[testenv]commands} - coverage erase - stestr run {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report - -[testenv:docs] -deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} - -r{toxinidir}/doc/requirements.txt -commands = sphinx-build -W -b html doc/source doc/build/html - -[testenv:bashate] -commands = - bash -c "ls devstack/*.sh | xargs bashate -v {posargs}" -whitelist_externals = bash - -[flake8] -# N342 - Config Opts need to be outside nova/conf until powervm is part of nova proper -# E402 module level import not at top of file -ignore = E125,N342,W504,W503,E402 -exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools - -[hacking] -local-check-factory = nova_powervm.hacking.checks.factory - -[testenv:lower-constraints] -deps = - -c{toxinidir}/lower-constraints.txt - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/requirements.txt - -egit+https://git.openstack.org/openstack/nova#egg=nova - -rhttps://git.openstack.org/cgit/openstack/nova/plain/test-requirements.txt